quota.c 32.0 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 */

/*
 * Quota change tags are associated with each transaction that allocates or
 * deallocates space.  Those changes are accumulated locally to each node (in a
 * per-node file) and then are periodically synced to the quota file.  This
 * avoids the bottleneck of constantly touching the quota file, but introduces
 * fuzziness in the current usage value of IDs that are being used on different
 * nodes in the cluster simultaneously.  So, it is possible for a user on
 * multiple nodes to overrun their quota, but that overrun is controlable.
 * Since quota tags are part of transactions, there is no need to a quota check
 * program to be run on node crashes or anything like that.
 *
 * There are couple of knobs that let the administrator manage the quota
 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
 * sitting on one node before being synced to the quota file.  (The default is
 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
 * of quota file syncs increases as the user moves closer to their limit.  The
 * more frequent the syncs, the more accurate the quota enforcement, but that
 * means that there is more contention between the nodes for the quota file.
 * The default value is one.  This sets the maximum theoretical quota overrun
 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
 * number greater than one makes quota syncs more frequent and reduces the
 * maximum overrun.  Numbers less than one (but greater than zero) make quota
 * syncs less frequent.
 *
 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
 * the quota file, so it is not being constantly read.
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/sort.h>
45
#include <linux/fs.h>
46
#include <linux/bio.h>
47
#include <linux/gfs2_ondisk.h>
48 49
#include <linux/kthread.h>
#include <linux/freezer.h>
50
#include <linux/dqblk_xfs.h>
D
David Teigland 已提交
51 52

#include "gfs2.h"
53
#include "incore.h"
D
David Teigland 已提交
54 55 56 57 58 59 60 61 62
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "log.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
63
#include "inode.h"
64
#include "util.h"
D
David Teigland 已提交
65 66 67 68

#define QUOTA_USER 1
#define QUOTA_GROUP 0

69 70 71 72
struct gfs2_quota_host {
	u64 qu_limit;
	u64 qu_warn;
	s64 qu_value;
73
	u32 qu_ll_next;
74 75 76 77 78 79 80 81
};

struct gfs2_quota_change_host {
	u64 qc_change;
	u32 qc_flags; /* GFS2_QCF_... */
	u32 qc_id;
};

82 83
static LIST_HEAD(qd_lru_list);
static atomic_t qd_lru_count = ATOMIC_INIT(0);
X
Xu Gang 已提交
84
static DEFINE_SPINLOCK(qd_lru_lock);
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
{
	struct gfs2_quota_data *qd;
	struct gfs2_sbd *sdp;

	if (nr == 0)
		goto out;

	if (!(gfp_mask & __GFP_FS))
		return -1;

	spin_lock(&qd_lru_lock);
	while (nr && !list_empty(&qd_lru_list)) {
		qd = list_entry(qd_lru_list.next,
				struct gfs2_quota_data, qd_reclaim);
		sdp = qd->qd_gl->gl_sbd;

		/* Free from the filesystem-specific list */
		list_del(&qd->qd_list);

		gfs2_assert_warn(sdp, !qd->qd_change);
		gfs2_assert_warn(sdp, !qd->qd_slot_count);
		gfs2_assert_warn(sdp, !qd->qd_bh_count);

110
		gfs2_glock_put(qd->qd_gl);
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
		atomic_dec(&sdp->sd_quota_count);

		/* Delete it from the common reclaim list */
		list_del_init(&qd->qd_reclaim);
		atomic_dec(&qd_lru_count);
		spin_unlock(&qd_lru_lock);
		kmem_cache_free(gfs2_quotad_cachep, qd);
		spin_lock(&qd_lru_lock);
		nr--;
	}
	spin_unlock(&qd_lru_lock);

out:
	return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
}

127
static u64 qd2offset(struct gfs2_quota_data *qd)
D
David Teigland 已提交
128
{
129
	u64 offset;
D
David Teigland 已提交
130

131
	offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
D
David Teigland 已提交
132 133 134 135 136
	offset *= sizeof(struct gfs2_quota);

	return offset;
}

137
static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
D
David Teigland 已提交
138 139 140 141 142
		    struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd;
	int error;

143
	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
D
David Teigland 已提交
144 145 146
	if (!qd)
		return -ENOMEM;

147
	atomic_set(&qd->qd_count, 1);
D
David Teigland 已提交
148 149 150 151
	qd->qd_id = id;
	if (user)
		set_bit(QDF_USER, &qd->qd_flags);
	qd->qd_slot = -1;
152
	INIT_LIST_HEAD(&qd->qd_reclaim);
D
David Teigland 已提交
153

154
	error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
D
David Teigland 已提交
155 156 157 158 159 160 161 162
			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
	if (error)
		goto fail;

	*qdp = qd;

	return 0;

163
fail:
164
	kmem_cache_free(gfs2_quotad_cachep, qd);
D
David Teigland 已提交
165 166 167
	return error;
}

168
static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
D
David Teigland 已提交
169 170 171 172 173 174 175 176 177
		  struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
	int error, found;

	*qdp = NULL;

	for (;;) {
		found = 0;
178
		spin_lock(&qd_lru_lock);
D
David Teigland 已提交
179 180 181
		list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
			if (qd->qd_id == id &&
			    !test_bit(QDF_USER, &qd->qd_flags) == !user) {
182 183 184 185 186 187 188
				if (!atomic_read(&qd->qd_count) &&
				    !list_empty(&qd->qd_reclaim)) {
					/* Remove it from reclaim list */
					list_del_init(&qd->qd_reclaim);
					atomic_dec(&qd_lru_count);
				}
				atomic_inc(&qd->qd_count);
D
David Teigland 已提交
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
				found = 1;
				break;
			}
		}

		if (!found)
			qd = NULL;

		if (!qd && new_qd) {
			qd = new_qd;
			list_add(&qd->qd_list, &sdp->sd_quota_list);
			atomic_inc(&sdp->sd_quota_count);
			new_qd = NULL;
		}

204
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
205

206
		if (qd) {
D
David Teigland 已提交
207
			if (new_qd) {
208
				gfs2_glock_put(new_qd->qd_gl);
209
				kmem_cache_free(gfs2_quotad_cachep, new_qd);
D
David Teigland 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223
			}
			*qdp = qd;
			return 0;
		}

		error = qd_alloc(sdp, user, id, &new_qd);
		if (error)
			return error;
	}
}

static void qd_hold(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
224 225
	gfs2_assert(sdp, atomic_read(&qd->qd_count));
	atomic_inc(&qd->qd_count);
D
David Teigland 已提交
226 227 228 229
}

static void qd_put(struct gfs2_quota_data *qd)
{
230 231 232 233 234 235
	if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
		/* Add to the reclaim list */
		list_add_tail(&qd->qd_reclaim, &qd_lru_list);
		atomic_inc(&qd_lru_count);
		spin_unlock(&qd_lru_lock);
	}
D
David Teigland 已提交
236 237 238 239 240 241 242 243
}

static int slot_get(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
	unsigned int c, o = 0, b;
	unsigned char byte = 0;

244
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
245 246

	if (qd->qd_slot_count++) {
247
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
248 249 250 251 252 253 254 255 256 257 258 259
		return 0;
	}

	for (c = 0; c < sdp->sd_quota_chunks; c++)
		for (o = 0; o < PAGE_SIZE; o++) {
			byte = sdp->sd_quota_bitmap[c][o];
			if (byte != 0xFF)
				goto found;
		}

	goto fail;

260
found:
D
David Teigland 已提交
261 262 263 264 265 266 267 268 269 270
	for (b = 0; b < 8; b++)
		if (!(byte & (1 << b)))
			break;
	qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;

	if (qd->qd_slot >= sdp->sd_quota_slots)
		goto fail;

	sdp->sd_quota_bitmap[c][o] |= 1 << b;

271
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
272 273 274

	return 0;

275
fail:
D
David Teigland 已提交
276
	qd->qd_slot_count--;
277
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
278 279 280 281 282 283 284
	return -ENOSPC;
}

static void slot_hold(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

285
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
286 287
	gfs2_assert(sdp, qd->qd_slot_count);
	qd->qd_slot_count++;
288
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
289 290 291 292 293 294
}

static void slot_put(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

295
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
296 297 298 299 300
	gfs2_assert(sdp, qd->qd_slot_count);
	if (!--qd->qd_slot_count) {
		gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
		qd->qd_slot = -1;
	}
301
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
302 303 304 305 306
}

static int bh_get(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
307
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
D
David Teigland 已提交
308 309 310
	unsigned int block, offset;
	struct buffer_head *bh;
	int error;
311
	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
D
David Teigland 已提交
312

313
	mutex_lock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
314 315

	if (qd->qd_bh_count++) {
316
		mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
317 318 319 320
		return 0;
	}

	block = qd->qd_slot / sdp->sd_qc_per_block;
321
	offset = qd->qd_slot % sdp->sd_qc_per_block;
D
David Teigland 已提交
322

323
	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
324
	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
D
David Teigland 已提交
325 326
	if (error)
		goto fail;
S
Steven Whitehouse 已提交
327
	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
D
David Teigland 已提交
328 329 330 331 332 333 334 335 336 337 338
	if (error)
		goto fail;
	error = -EIO;
	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
		goto fail_brelse;

	qd->qd_bh = bh;
	qd->qd_bh_qc = (struct gfs2_quota_change *)
		(bh->b_data + sizeof(struct gfs2_meta_header) +
		 offset * sizeof(struct gfs2_quota_change));

J
Josef Whiter 已提交
339
	mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
340 341 342

	return 0;

343
fail_brelse:
D
David Teigland 已提交
344
	brelse(bh);
345
fail:
D
David Teigland 已提交
346
	qd->qd_bh_count--;
347
	mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
348 349 350 351 352 353 354
	return error;
}

static void bh_put(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

355
	mutex_lock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
356 357 358 359 360 361
	gfs2_assert(sdp, qd->qd_bh_count);
	if (!--qd->qd_bh_count) {
		brelse(qd->qd_bh);
		qd->qd_bh = NULL;
		qd->qd_bh_qc = NULL;
	}
362
	mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
363 364 365 366 367 368 369 370 371 372 373 374 375
}

static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd = NULL;
	int error;
	int found = 0;

	*qdp = NULL;

	if (sdp->sd_vfs->s_flags & MS_RDONLY)
		return 0;

376
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
377 378 379 380 381 382 383 384 385 386

	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
		    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
		    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
			continue;

		list_move_tail(&qd->qd_list, &sdp->sd_quota_list);

		set_bit(QDF_LOCKED, &qd->qd_flags);
387 388
		gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
		atomic_inc(&qd->qd_count);
D
David Teigland 已提交
389 390 391 392 393 394 395 396 397 398 399
		qd->qd_change_sync = qd->qd_change;
		gfs2_assert_warn(sdp, qd->qd_slot_count);
		qd->qd_slot_count++;
		found = 1;

		break;
	}

	if (!found)
		qd = NULL;

400
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424

	if (qd) {
		gfs2_assert_warn(sdp, qd->qd_change_sync);
		error = bh_get(qd);
		if (error) {
			clear_bit(QDF_LOCKED, &qd->qd_flags);
			slot_put(qd);
			qd_put(qd);
			return error;
		}
	}

	*qdp = qd;

	return 0;
}

static int qd_trylock(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

	if (sdp->sd_vfs->s_flags & MS_RDONLY)
		return 0;

425
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
426 427 428

	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
	    !test_bit(QDF_CHANGE, &qd->qd_flags)) {
429
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
430 431 432 433 434 435
		return 0;
	}

	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);

	set_bit(QDF_LOCKED, &qd->qd_flags);
436 437
	gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
	atomic_inc(&qd->qd_count);
D
David Teigland 已提交
438 439 440 441
	qd->qd_change_sync = qd->qd_change;
	gfs2_assert_warn(sdp, qd->qd_slot_count);
	qd->qd_slot_count++;

442
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
443 444 445 446 447 448 449 450 451 452 453 454 455 456

	gfs2_assert_warn(sdp, qd->qd_change_sync);
	if (bh_get(qd)) {
		clear_bit(QDF_LOCKED, &qd->qd_flags);
		slot_put(qd);
		qd_put(qd);
		return 0;
	}

	return 1;
}

static void qd_unlock(struct gfs2_quota_data *qd)
{
457 458
	gfs2_assert_warn(qd->qd_gl->gl_sbd,
			 test_bit(QDF_LOCKED, &qd->qd_flags));
D
David Teigland 已提交
459 460 461 462 463 464
	clear_bit(QDF_LOCKED, &qd->qd_flags);
	bh_put(qd);
	slot_put(qd);
	qd_put(qd);
}

465
static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
D
David Teigland 已提交
466 467 468 469
		    struct gfs2_quota_data **qdp)
{
	int error;

470
	error = qd_get(sdp, user, id, qdp);
D
David Teigland 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483
	if (error)
		return error;

	error = slot_get(*qdp);
	if (error)
		goto fail;

	error = bh_get(*qdp);
	if (error)
		goto fail_slot;

	return 0;

484
fail_slot:
D
David Teigland 已提交
485
	slot_put(*qdp);
486
fail:
D
David Teigland 已提交
487 488 489 490 491 492 493 494 495 496 497
	qd_put(*qdp);
	return error;
}

static void qdsb_put(struct gfs2_quota_data *qd)
{
	bh_put(qd);
	slot_put(qd);
	qd_put(qd);
}

498
int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
D
David Teigland 已提交
499
{
500
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
501
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
502 503 504 505 506 507 508 509 510 511
	struct gfs2_quota_data **qd = al->al_qd;
	int error;

	if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
		return -EIO;

	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
		return 0;

512
	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
D
David Teigland 已提交
513 514 515 516 517
	if (error)
		goto out;
	al->al_qd_num++;
	qd++;

518
	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
D
David Teigland 已提交
519 520 521 522 523
	if (error)
		goto out;
	al->al_qd_num++;
	qd++;

524
	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
525
		error = qdsb_get(sdp, QUOTA_USER, uid, qd);
D
David Teigland 已提交
526 527 528 529 530 531
		if (error)
			goto out;
		al->al_qd_num++;
		qd++;
	}

532
	if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
533
		error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
D
David Teigland 已提交
534 535 536 537 538 539
		if (error)
			goto out;
		al->al_qd_num++;
		qd++;
	}

540
out:
D
David Teigland 已提交
541 542 543 544 545 546 547
	if (error)
		gfs2_quota_unhold(ip);
	return error;
}

void gfs2_quota_unhold(struct gfs2_inode *ip)
{
548
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
549
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
550 551 552 553 554 555 556 557 558 559 560 561 562
	unsigned int x;

	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));

	for (x = 0; x < al->al_qd_num; x++) {
		qdsb_put(al->al_qd[x]);
		al->al_qd[x] = NULL;
	}
	al->al_qd_num = 0;
}

static int sort_qd(const void *a, const void *b)
{
563 564
	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
D
David Teigland 已提交
565 566 567 568

	if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
	    !test_bit(QDF_USER, &qd_b->qd_flags)) {
		if (test_bit(QDF_USER, &qd_a->qd_flags))
569
			return -1;
D
David Teigland 已提交
570
		else
571
			return 1;
D
David Teigland 已提交
572
	}
573 574 575 576
	if (qd_a->qd_id < qd_b->qd_id)
		return -1;
	if (qd_a->qd_id > qd_b->qd_id)
		return 1;
D
David Teigland 已提交
577

578
	return 0;
D
David Teigland 已提交
579 580
}

581
static void do_qc(struct gfs2_quota_data *qd, s64 change)
D
David Teigland 已提交
582 583
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
584
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
D
David Teigland 已提交
585
	struct gfs2_quota_change *qc = qd->qd_bh_qc;
586
	s64 x;
D
David Teigland 已提交
587

588
	mutex_lock(&sdp->sd_quota_mutex);
589
	gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
D
David Teigland 已提交
590 591 592 593 594 595 596 597 598

	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
		qc->qc_change = 0;
		qc->qc_flags = 0;
		if (test_bit(QDF_USER, &qd->qd_flags))
			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
		qc->qc_id = cpu_to_be32(qd->qd_id);
	}

A
Al Viro 已提交
599
	x = be64_to_cpu(qc->qc_change) + change;
D
David Teigland 已提交
600 601
	qc->qc_change = cpu_to_be64(x);

602
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
603
	qd->qd_change = x;
604
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
605 606 607 608 609 610 611 612 613 614 615 616

	if (!x) {
		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
		clear_bit(QDF_CHANGE, &qd->qd_flags);
		qc->qc_flags = 0;
		qc->qc_id = 0;
		slot_put(qd);
		qd_put(qd);
	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
		qd_hold(qd);
		slot_hold(qd);
	}
617

618
	mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
619 620
}

621 622 623 624 625 626 627
static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
{
	const struct gfs2_quota *str = buf;

	qu->qu_limit = be64_to_cpu(str->qu_limit);
	qu->qu_warn = be64_to_cpu(str->qu_warn);
	qu->qu_value = be64_to_cpu(str->qu_value);
628
	qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
629 630 631 632 633 634 635 636 637
}

static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
{
	struct gfs2_quota *str = buf;

	str->qu_limit = cpu_to_be64(qu->qu_limit);
	str->qu_warn = cpu_to_be64(qu->qu_warn);
	str->qu_value = cpu_to_be64(qu->qu_value);
638
	str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
639 640 641
	memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
}

642 643 644 645 646 647 648
/**
 * gfs2_adjust_quota
 *
 * This function was mostly borrowed from gfs2_block_truncate_page which was
 * in turn mostly borrowed from ext3
 */
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
649
			     s64 change, struct gfs2_quota_data *qd)
650
{
651
	struct inode *inode = &ip->i_inode;
652 653
	struct address_space *mapping = inode->i_mapping;
	unsigned long index = loc >> PAGE_CACHE_SHIFT;
654
	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
655 656 657 658
	unsigned blocksize, iblock, pos;
	struct buffer_head *bh;
	struct page *page;
	void *kaddr;
659 660
	char *ptr;
	struct gfs2_quota_host qp;
661
	s64 value;
662 663
	int err = -EIO;

664
	if (gfs2_is_stuffed(ip))
665
		gfs2_unstuff_dinode(ip, NULL);
666
	
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
	page = grab_cache_page(mapping, index);
	if (!page)
		return -ENOMEM;

	blocksize = inode->i_sb->s_blocksize;
	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);

	if (!page_has_buffers(page))
		create_empty_buffers(page, blocksize, 0);

	bh = page_buffers(page);
	pos = blocksize;
	while (offset >= pos) {
		bh = bh->b_this_page;
		iblock++;
		pos += blocksize;
	}

	if (!buffer_mapped(bh)) {
686
		gfs2_block_map(inode, iblock, bh, 1);
687 688 689 690 691 692 693 694
		if (!buffer_mapped(bh))
			goto unlock;
	}

	if (PageUptodate(page))
		set_buffer_uptodate(bh);

	if (!buffer_uptodate(bh)) {
695
		ll_rw_block(READ_META, 1, &bh);
696 697 698 699 700 701 702 703
		wait_on_buffer(bh);
		if (!buffer_uptodate(bh))
			goto unlock;
	}

	gfs2_trans_add_bh(ip->i_gl, bh, 0);

	kaddr = kmap_atomic(page, KM_USER0);
704
	ptr = kaddr + offset;
705 706 707 708
	gfs2_quota_in(&qp, ptr);
	qp.qu_value += change;
	value = qp.qu_value;
	gfs2_quota_out(&qp, ptr);
709 710 711 712 713
	flush_dcache_page(page);
	kunmap_atomic(kaddr, KM_USER0);
	err = 0;
	qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
	qd->qd_qb.qb_value = cpu_to_be64(value);
714 715
	((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
	((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
716 717 718 719 720 721
unlock:
	unlock_page(page);
	page_cache_release(page);
	return err;
}

D
David Teigland 已提交
722 723 724
static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
{
	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
725
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
D
David Teigland 已提交
726 727 728 729
	unsigned int data_blocks, ind_blocks;
	struct gfs2_holder *ghs, i_gh;
	unsigned int qx, x;
	struct gfs2_quota_data *qd;
730
	loff_t offset;
731
	unsigned int nalloc = 0, blocks;
D
David Teigland 已提交
732 733 734 735 736 737
	struct gfs2_alloc *al = NULL;
	int error;

	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
			      &data_blocks, &ind_blocks);

J
Josef Bacik 已提交
738
	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
D
David Teigland 已提交
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
	if (!ghs)
		return -ENOMEM;

	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
	for (qx = 0; qx < num_qd; qx++) {
		error = gfs2_glock_nq_init(qda[qx]->qd_gl,
					   LM_ST_EXCLUSIVE,
					   GL_NOCACHE, &ghs[qx]);
		if (error)
			goto out;
	}

	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
	if (error)
		goto out;

	for (x = 0; x < num_qd; x++) {
		int alloc_required;

		offset = qd2offset(qda[x]);
		error = gfs2_write_alloc_required(ip, offset,
						  sizeof(struct gfs2_quota),
						  &alloc_required);
		if (error)
			goto out_gunlock;
		if (alloc_required)
			nalloc++;
	}

768 769 770 771 772 773 774 775 776 777 778 779 780 781
	al = gfs2_alloc_get(ip);
	if (!al) {
		error = -ENOMEM;
		goto out_gunlock;
	}
	/* 
	 * 1 blk for unstuffing inode if stuffed. We add this extra
	 * block to the reservation unconditionally. If the inode
	 * doesn't need unstuffing, the block will be released to the 
	 * rgrp since it won't be allocated during the transaction
	 */
	al->al_requested = 1;
	/* +1 in the end for block requested above for unstuffing */
	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
D
David Teigland 已提交
782

783 784 785 786 787
	if (nalloc)
		al->al_requested += nalloc * (data_blocks + ind_blocks);		
	error = gfs2_inplace_reserve(ip);
	if (error)
		goto out_alloc;
D
David Teigland 已提交
788

789 790 791 792 793 794
	if (nalloc)
		blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;

	error = gfs2_trans_begin(sdp, blocks, 0);
	if (error)
		goto out_ipres;
D
David Teigland 已提交
795 796 797 798

	for (x = 0; x < num_qd; x++) {
		qd = qda[x];
		offset = qd2offset(qd);
799
		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
800
					  (struct gfs2_quota_data *)
801
					  qd);
802
		if (error)
D
David Teigland 已提交
803 804 805 806 807 808 809
			goto out_end_trans;

		do_qc(qd, -qd->qd_change_sync);
	}

	error = 0;

810
out_end_trans:
D
David Teigland 已提交
811
	gfs2_trans_end(sdp);
812
out_ipres:
813
	gfs2_inplace_release(ip);
814
out_alloc:
815
	gfs2_alloc_put(ip);
816
out_gunlock:
D
David Teigland 已提交
817
	gfs2_glock_dq_uninit(&i_gh);
818
out:
D
David Teigland 已提交
819 820 821
	while (qx--)
		gfs2_glock_dq_uninit(&ghs[qx]);
	kfree(ghs);
822
	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
D
David Teigland 已提交
823 824 825 826 827 828 829
	return error;
}

static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
		    struct gfs2_holder *q_gh)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
830
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
D
David Teigland 已提交
831
	struct gfs2_holder i_gh;
A
Al Viro 已提交
832
	struct gfs2_quota_host q;
D
David Teigland 已提交
833 834
	char buf[sizeof(struct gfs2_quota)];
	int error;
835
	struct gfs2_quota_lvb *qlvb;
D
David Teigland 已提交
836

837
restart:
D
David Teigland 已提交
838 839 840 841
	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
	if (error)
		return error;

842
	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
D
David Teigland 已提交
843

844
	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
845
		loff_t pos;
D
David Teigland 已提交
846
		gfs2_glock_dq_uninit(q_gh);
847 848
		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
					   GL_NOCACHE, q_gh);
D
David Teigland 已提交
849 850 851
		if (error)
			return error;

852
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
D
David Teigland 已提交
853 854 855 856
		if (error)
			goto fail;

		memset(buf, 0, sizeof(struct gfs2_quota));
857
		pos = qd2offset(qd);
858 859
		error = gfs2_internal_read(ip, NULL, buf, &pos,
					   sizeof(struct gfs2_quota));
D
David Teigland 已提交
860 861 862 863 864 865
		if (error < 0)
			goto fail_gunlock;

		gfs2_glock_dq_uninit(&i_gh);

		gfs2_quota_in(&q, buf);
866 867 868 869 870 871 872
		qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
		qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
		qlvb->__pad = 0;
		qlvb->qb_limit = cpu_to_be64(q.qu_limit);
		qlvb->qb_warn = cpu_to_be64(q.qu_warn);
		qlvb->qb_value = cpu_to_be64(q.qu_value);
		qd->qd_qb = *qlvb;
D
David Teigland 已提交
873

874 875 876
		gfs2_glock_dq_uninit(q_gh);
		force_refresh = 0;
		goto restart;
D
David Teigland 已提交
877 878 879 880
	}

	return 0;

881
fail_gunlock:
D
David Teigland 已提交
882
	gfs2_glock_dq_uninit(&i_gh);
883
fail:
D
David Teigland 已提交
884 885 886 887
	gfs2_glock_dq_uninit(q_gh);
	return error;
}

888
int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
D
David Teigland 已提交
889
{
890
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
891
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
	unsigned int x;
	int error = 0;

	gfs2_quota_hold(ip, uid, gid);

	if (capable(CAP_SYS_RESOURCE) ||
	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
		return 0;

	sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
	     sort_qd, NULL);

	for (x = 0; x < al->al_qd_num; x++) {
		error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
		if (error)
			break;
	}

	if (!error)
		set_bit(GIF_QD_LOCKED, &ip->i_flags);
	else {
		while (x--)
			gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
		gfs2_quota_unhold(ip);
	}

	return error;
}

static int need_sync(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
	struct gfs2_tune *gt = &sdp->sd_tune;
925
	s64 value;
D
David Teigland 已提交
926 927 928 929 930 931
	unsigned int num, den;
	int do_sync = 1;

	if (!qd->qd_qb.qb_limit)
		return 0;

932
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
933
	value = qd->qd_change;
934
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
935 936 937 938 939 940 941 942

	spin_lock(&gt->gt_spin);
	num = gt->gt_quota_scale_num;
	den = gt->gt_quota_scale_den;
	spin_unlock(&gt->gt_spin);

	if (value < 0)
		do_sync = 0;
943 944
	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
D
David Teigland 已提交
945 946 947
		do_sync = 0;
	else {
		value *= gfs2_jindex_size(sdp) * num;
948
		value = div_s64(value, den);
949
		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
950
		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
D
David Teigland 已提交
951 952 953 954 955 956 957 958
			do_sync = 0;
	}

	return do_sync;
}

void gfs2_quota_unlock(struct gfs2_inode *ip)
{
959
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
	struct gfs2_quota_data *qda[4];
	unsigned int count = 0;
	unsigned int x;

	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
		goto out;

	for (x = 0; x < al->al_qd_num; x++) {
		struct gfs2_quota_data *qd;
		int sync;

		qd = al->al_qd[x];
		sync = need_sync(qd);

		gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);

		if (sync && qd_trylock(qd))
			qda[count++] = qd;
	}

	if (count) {
		do_sync(count, qda);
		for (x = 0; x < count; x++)
			qd_unlock(qda[x]);
	}

986
out:
D
David Teigland 已提交
987 988 989 990 991 992 993 994 995
	gfs2_quota_unhold(ip);
}

#define MAX_LINE 256

static int print_message(struct gfs2_quota_data *qd, char *type)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

996 997 998 999
	printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
	       sdp->sd_fsname, type,
	       (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
	       qd->qd_id);
D
David Teigland 已提交
1000 1001 1002 1003

	return 0;
}

1004
int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
D
David Teigland 已提交
1005
{
1006
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1007
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
1008
	struct gfs2_quota_data *qd;
1009
	s64 value;
D
David Teigland 已提交
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
	unsigned int x;
	int error = 0;

	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
		return 0;

        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
                return 0;

	for (x = 0; x < al->al_qd_num; x++) {
		qd = al->al_qd[x];

		if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
		      (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
			continue;

1026
		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1027
		spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1028
		value += qd->qd_change;
1029
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1030

1031
		if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
D
David Teigland 已提交
1032 1033 1034
			print_message(qd, "exceeded");
			error = -EDQUOT;
			break;
1035
		} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1036
			   (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
D
David Teigland 已提交
1037
			   time_after_eq(jiffies, qd->qd_last_warn +
1038 1039
					 gfs2_tune_get(sdp,
						gt_quota_warn_period) * HZ)) {
D
David Teigland 已提交
1040 1041 1042 1043 1044 1045 1046 1047
			error = print_message(qd, "warning");
			qd->qd_last_warn = jiffies;
		}
	}

	return error;
}

1048 1049
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
		       u32 uid, u32 gid)
D
David Teigland 已提交
1050
{
1051
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
1052 1053 1054
	struct gfs2_quota_data *qd;
	unsigned int x;

1055
	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
D
David Teigland 已提交
1056
		return;
1057
	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
D
David Teigland 已提交
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
		return;

	for (x = 0; x < al->al_qd_num; x++) {
		qd = al->al_qd[x];

		if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
		    (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
			do_qc(qd, change);
		}
	}
}

1070
int gfs2_quota_sync(struct super_block *sb, int type)
D
David Teigland 已提交
1071
{
1072
	struct gfs2_sbd *sdp = sb->s_fs_info;
D
David Teigland 已提交
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	struct gfs2_quota_data **qda;
	unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
	unsigned int num_qd;
	unsigned int x;
	int error = 0;

	sdp->sd_quota_sync_gen++;

	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
	if (!qda)
		return -ENOMEM;

	do {
		num_qd = 0;

		for (;;) {
			error = qd_fish(sdp, qda + num_qd);
			if (error || !qda[num_qd])
				break;
			if (++num_qd == max_qd)
				break;
		}

		if (num_qd) {
			if (!error)
				error = do_sync(num_qd, qda);
			if (!error)
				for (x = 0; x < num_qd; x++)
					qda[x]->qd_sync_gen =
						sdp->sd_quota_sync_gen;

			for (x = 0; x < num_qd; x++)
				qd_unlock(qda[x]);
		}
	} while (!error && num_qd == max_qd);

	kfree(qda);

	return error;
}

1114
int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
D
David Teigland 已提交
1115 1116 1117 1118 1119
{
	struct gfs2_quota_data *qd;
	struct gfs2_holder q_gh;
	int error;

1120
	error = qd_get(sdp, user, id, &qd);
D
David Teigland 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	if (error)
		return error;

	error = do_glock(qd, FORCE, &q_gh);
	if (!error)
		gfs2_glock_dq_uninit(&q_gh);

	qd_put(qd);

	return error;
}

1133 1134 1135 1136 1137 1138 1139 1140 1141
static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
{
	const struct gfs2_quota_change *str = buf;

	qc->qc_change = be64_to_cpu(str->qc_change);
	qc->qc_flags = be32_to_cpu(str->qc_flags);
	qc->qc_id = be32_to_cpu(str->qc_id);
}

D
David Teigland 已提交
1142 1143
int gfs2_quota_init(struct gfs2_sbd *sdp)
{
1144
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1145
	unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
D
David Teigland 已提交
1146 1147
	unsigned int x, slot = 0;
	unsigned int found = 0;
1148 1149
	u64 dblock;
	u32 extlen = 0;
D
David Teigland 已提交
1150 1151
	int error;

1152 1153
	if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
	    ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
D
David Teigland 已提交
1154
		gfs2_consist_inode(ip);
1155
		return -EIO;
D
David Teigland 已提交
1156 1157
	}
	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1158
	sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
D
David Teigland 已提交
1159 1160 1161 1162

	error = -ENOMEM;

	sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
J
Josef Bacik 已提交
1163
				       sizeof(unsigned char *), GFP_NOFS);
D
David Teigland 已提交
1164 1165 1166 1167
	if (!sdp->sd_quota_bitmap)
		return error;

	for (x = 0; x < sdp->sd_quota_chunks; x++) {
J
Josef Bacik 已提交
1168
		sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
D
David Teigland 已提交
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
		if (!sdp->sd_quota_bitmap[x])
			goto fail;
	}

	for (x = 0; x < blocks; x++) {
		struct buffer_head *bh;
		unsigned int y;

		if (!extlen) {
			int new = 0;
1179
			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
D
David Teigland 已提交
1180 1181 1182 1183
			if (error)
				goto fail;
		}
		error = -EIO;
S
Steven Whitehouse 已提交
1184 1185 1186
		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
		if (!bh)
			goto fail;
D
David Teigland 已提交
1187 1188 1189 1190 1191
		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
			brelse(bh);
			goto fail;
		}

S
Steven Whitehouse 已提交
1192
		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
D
David Teigland 已提交
1193
		     y++, slot++) {
1194
			struct gfs2_quota_change_host qc;
D
David Teigland 已提交
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
			struct gfs2_quota_data *qd;

			gfs2_quota_change_in(&qc, bh->b_data +
					  sizeof(struct gfs2_meta_header) +
					  y * sizeof(struct gfs2_quota_change));
			if (!qc.qc_change)
				continue;

			error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
					 qc.qc_id, &qd);
			if (error) {
				brelse(bh);
				goto fail;
			}

			set_bit(QDF_CHANGE, &qd->qd_flags);
			qd->qd_change = qc.qc_change;
			qd->qd_slot = slot;
			qd->qd_slot_count = 1;

1215
			spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1216 1217 1218
			gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
			list_add(&qd->qd_list, &sdp->sd_quota_list);
			atomic_inc(&sdp->sd_quota_count);
1219
			spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233

			found++;
		}

		brelse(bh);
		dblock++;
		extlen--;
	}

	if (found)
		fs_info(sdp, "found %u quota changes\n", found);

	return 0;

1234
fail:
D
David Teigland 已提交
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
	gfs2_quota_cleanup(sdp);
	return error;
}

void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
{
	struct list_head *head = &sdp->sd_quota_list;
	struct gfs2_quota_data *qd;
	unsigned int x;

1245
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1246 1247 1248
	while (!list_empty(head)) {
		qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);

1249 1250 1251 1252 1253
		if (atomic_read(&qd->qd_count) > 1 ||
		    (atomic_read(&qd->qd_count) &&
		     !test_bit(QDF_CHANGE, &qd->qd_flags))) {
			list_move(&qd->qd_list, head);
			spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1254
			schedule();
1255
			spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1256 1257 1258 1259
			continue;
		}

		list_del(&qd->qd_list);
1260 1261 1262 1263 1264
		/* Also remove if this qd exists in the reclaim list */
		if (!list_empty(&qd->qd_reclaim)) {
			list_del_init(&qd->qd_reclaim);
			atomic_dec(&qd_lru_count);
		}
D
David Teigland 已提交
1265
		atomic_dec(&sdp->sd_quota_count);
1266
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1267

1268
		if (!atomic_read(&qd->qd_count)) {
D
David Teigland 已提交
1269 1270 1271 1272 1273 1274
			gfs2_assert_warn(sdp, !qd->qd_change);
			gfs2_assert_warn(sdp, !qd->qd_slot_count);
		} else
			gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
		gfs2_assert_warn(sdp, !qd->qd_bh_count);

1275
		gfs2_glock_put(qd->qd_gl);
1276
		kmem_cache_free(gfs2_quotad_cachep, qd);
D
David Teigland 已提交
1277

1278
		spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1279
	}
1280
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290

	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));

	if (sdp->sd_quota_bitmap) {
		for (x = 0; x < sdp->sd_quota_chunks; x++)
			kfree(sdp->sd_quota_bitmap[x]);
		kfree(sdp->sd_quota_bitmap);
	}
}

1291 1292 1293 1294 1295 1296 1297 1298 1299
static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
{
	if (error == 0 || error == -EROFS)
		return;
	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
}

static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1300
			       int (*fxn)(struct super_block *sb, int type),
1301 1302 1303 1304
			       unsigned long t, unsigned long *timeo,
			       unsigned int *new_timeo)
{
	if (t >= *timeo) {
1305
		int error = fxn(sdp->sd_vfs, 0);
1306 1307 1308 1309 1310 1311 1312
		quotad_error(sdp, msg, error);
		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
	} else {
		*timeo -= t;
	}
}

1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
{
	struct gfs2_inode *ip;

	while(1) {
		ip = NULL;
		spin_lock(&sdp->sd_trunc_lock);
		if (!list_empty(&sdp->sd_trunc_list)) {
			ip = list_entry(sdp->sd_trunc_list.next,
					struct gfs2_inode, i_trunc_list);
			list_del_init(&ip->i_trunc_list);
		}
		spin_unlock(&sdp->sd_trunc_lock);
		if (ip == NULL)
			return;
		gfs2_glock_finish_truncate(ip);
	}
}

1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
/**
 * gfs2_quotad - Write cached quota changes into the quota file
 * @sdp: Pointer to GFS2 superblock
 *
 */

int gfs2_quotad(void *data)
{
	struct gfs2_sbd *sdp = data;
	struct gfs2_tune *tune = &sdp->sd_tune;
	unsigned long statfs_timeo = 0;
	unsigned long quotad_timeo = 0;
	unsigned long t = 0;
	DEFINE_WAIT(wait);
1346
	int empty;
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357

	while (!kthread_should_stop()) {

		/* Update the master statfs file */
		quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
				   &statfs_timeo, &tune->gt_statfs_quantum);

		/* Update quota file */
		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
				   &quotad_timeo, &tune->gt_quota_quantum);

1358 1359 1360
		/* Check for & recover partially truncated inodes */
		quotad_check_trunc_list(sdp);

1361 1362 1363 1364
		if (freezing(current))
			refrigerator();
		t = min(quotad_timeo, statfs_timeo);

1365
		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1366 1367 1368 1369 1370 1371 1372
		spin_lock(&sdp->sd_trunc_lock);
		empty = list_empty(&sdp->sd_trunc_list);
		spin_unlock(&sdp->sd_trunc_lock);
		if (empty)
			t -= schedule_timeout(t);
		else
			t = 0;
1373 1374 1375 1376 1377 1378
		finish_wait(&sdp->sd_quota_wait, &wait);
	}

	return 0;
}

1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
static int gfs2_quota_get_xstate(struct super_block *sb,
				 struct fs_quota_stat *fqs)
{
	struct gfs2_sbd *sdp = sb->s_fs_info;

	memset(fqs, 0, sizeof(struct fs_quota_stat));
	fqs->qs_version = FS_QSTAT_VERSION;
	if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
		fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
	else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
		fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
	if (sdp->sd_quota_inode) {
		fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
		fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
	}
	fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
	fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
	fqs->qs_incoredqs = atomic_read(&qd_lru_count);
	return 0;
}

1400 1401
const struct quotactl_ops gfs2_quotactl_ops = {
	.quota_sync     = gfs2_quota_sync,
1402
	.get_xstate     = gfs2_quota_get_xstate,
1403 1404
};