quota.c 37.7 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12 13 14 15 16 17
 */

/*
 * Quota change tags are associated with each transaction that allocates or
 * deallocates space.  Those changes are accumulated locally to each node (in a
 * per-node file) and then are periodically synced to the quota file.  This
 * avoids the bottleneck of constantly touching the quota file, but introduces
 * fuzziness in the current usage value of IDs that are being used on different
 * nodes in the cluster simultaneously.  So, it is possible for a user on
 * multiple nodes to overrun their quota, but that overrun is controlable.
18
 * Since quota tags are part of transactions, there is no need for a quota check
D
David Teigland 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 * program to be run on node crashes or anything like that.
 *
 * There are couple of knobs that let the administrator manage the quota
 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
 * sitting on one node before being synced to the quota file.  (The default is
 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
 * of quota file syncs increases as the user moves closer to their limit.  The
 * more frequent the syncs, the more accurate the quota enforcement, but that
 * means that there is more contention between the nodes for the quota file.
 * The default value is one.  This sets the maximum theoretical quota overrun
 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
 * number greater than one makes quota syncs more frequent and reduces the
 * maximum overrun.  Numbers less than one (but greater than zero) make quota
 * syncs less frequent.
 *
 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
 * the quota file, so it is not being constantly read.
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/sort.h>
45
#include <linux/fs.h>
46
#include <linux/bio.h>
47
#include <linux/gfs2_ondisk.h>
48 49
#include <linux/kthread.h>
#include <linux/freezer.h>
S
Steven Whitehouse 已提交
50
#include <linux/quota.h>
51
#include <linux/dqblk_xfs.h>
D
David Teigland 已提交
52 53

#include "gfs2.h"
54
#include "incore.h"
D
David Teigland 已提交
55 56 57 58 59 60 61 62 63
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "log.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
64
#include "inode.h"
65
#include "util.h"
D
David Teigland 已提交
66 67 68 69

#define QUOTA_USER 1
#define QUOTA_GROUP 0

70 71 72 73 74 75
struct gfs2_quota_change_host {
	u64 qc_change;
	u32 qc_flags; /* GFS2_QCF_... */
	u32 qc_id;
};

76 77
static LIST_HEAD(qd_lru_list);
static atomic_t qd_lru_count = ATOMIC_INIT(0);
X
Xu Gang 已提交
78
static DEFINE_SPINLOCK(qd_lru_lock);
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
{
	struct gfs2_quota_data *qd;
	struct gfs2_sbd *sdp;

	if (nr == 0)
		goto out;

	if (!(gfp_mask & __GFP_FS))
		return -1;

	spin_lock(&qd_lru_lock);
	while (nr && !list_empty(&qd_lru_list)) {
		qd = list_entry(qd_lru_list.next,
				struct gfs2_quota_data, qd_reclaim);
		sdp = qd->qd_gl->gl_sbd;

		/* Free from the filesystem-specific list */
		list_del(&qd->qd_list);

		gfs2_assert_warn(sdp, !qd->qd_change);
		gfs2_assert_warn(sdp, !qd->qd_slot_count);
		gfs2_assert_warn(sdp, !qd->qd_bh_count);

104
		gfs2_glock_put(qd->qd_gl);
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
		atomic_dec(&sdp->sd_quota_count);

		/* Delete it from the common reclaim list */
		list_del_init(&qd->qd_reclaim);
		atomic_dec(&qd_lru_count);
		spin_unlock(&qd_lru_lock);
		kmem_cache_free(gfs2_quotad_cachep, qd);
		spin_lock(&qd_lru_lock);
		nr--;
	}
	spin_unlock(&qd_lru_lock);

out:
	return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
}

121
static u64 qd2offset(struct gfs2_quota_data *qd)
D
David Teigland 已提交
122
{
123
	u64 offset;
D
David Teigland 已提交
124

125
	offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
D
David Teigland 已提交
126 127 128 129 130
	offset *= sizeof(struct gfs2_quota);

	return offset;
}

131
static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
D
David Teigland 已提交
132 133 134 135 136
		    struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd;
	int error;

137
	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
D
David Teigland 已提交
138 139 140
	if (!qd)
		return -ENOMEM;

141
	atomic_set(&qd->qd_count, 1);
D
David Teigland 已提交
142 143 144 145
	qd->qd_id = id;
	if (user)
		set_bit(QDF_USER, &qd->qd_flags);
	qd->qd_slot = -1;
146
	INIT_LIST_HEAD(&qd->qd_reclaim);
D
David Teigland 已提交
147

148
	error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
D
David Teigland 已提交
149 150 151 152 153 154 155 156
			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
	if (error)
		goto fail;

	*qdp = qd;

	return 0;

157
fail:
158
	kmem_cache_free(gfs2_quotad_cachep, qd);
D
David Teigland 已提交
159 160 161
	return error;
}

162
static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
D
David Teigland 已提交
163 164 165 166 167 168 169 170 171
		  struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
	int error, found;

	*qdp = NULL;

	for (;;) {
		found = 0;
172
		spin_lock(&qd_lru_lock);
D
David Teigland 已提交
173 174 175
		list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
			if (qd->qd_id == id &&
			    !test_bit(QDF_USER, &qd->qd_flags) == !user) {
176 177 178 179 180 181 182
				if (!atomic_read(&qd->qd_count) &&
				    !list_empty(&qd->qd_reclaim)) {
					/* Remove it from reclaim list */
					list_del_init(&qd->qd_reclaim);
					atomic_dec(&qd_lru_count);
				}
				atomic_inc(&qd->qd_count);
D
David Teigland 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
				found = 1;
				break;
			}
		}

		if (!found)
			qd = NULL;

		if (!qd && new_qd) {
			qd = new_qd;
			list_add(&qd->qd_list, &sdp->sd_quota_list);
			atomic_inc(&sdp->sd_quota_count);
			new_qd = NULL;
		}

198
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
199

200
		if (qd) {
D
David Teigland 已提交
201
			if (new_qd) {
202
				gfs2_glock_put(new_qd->qd_gl);
203
				kmem_cache_free(gfs2_quotad_cachep, new_qd);
D
David Teigland 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217
			}
			*qdp = qd;
			return 0;
		}

		error = qd_alloc(sdp, user, id, &new_qd);
		if (error)
			return error;
	}
}

static void qd_hold(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
218 219
	gfs2_assert(sdp, atomic_read(&qd->qd_count));
	atomic_inc(&qd->qd_count);
D
David Teigland 已提交
220 221 222 223
}

static void qd_put(struct gfs2_quota_data *qd)
{
224 225 226 227 228 229
	if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
		/* Add to the reclaim list */
		list_add_tail(&qd->qd_reclaim, &qd_lru_list);
		atomic_inc(&qd_lru_count);
		spin_unlock(&qd_lru_lock);
	}
D
David Teigland 已提交
230 231 232 233 234 235 236 237
}

static int slot_get(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
	unsigned int c, o = 0, b;
	unsigned char byte = 0;

238
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
239 240

	if (qd->qd_slot_count++) {
241
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
242 243 244 245 246 247 248 249 250 251 252 253
		return 0;
	}

	for (c = 0; c < sdp->sd_quota_chunks; c++)
		for (o = 0; o < PAGE_SIZE; o++) {
			byte = sdp->sd_quota_bitmap[c][o];
			if (byte != 0xFF)
				goto found;
		}

	goto fail;

254
found:
D
David Teigland 已提交
255 256 257 258 259 260 261 262 263 264
	for (b = 0; b < 8; b++)
		if (!(byte & (1 << b)))
			break;
	qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;

	if (qd->qd_slot >= sdp->sd_quota_slots)
		goto fail;

	sdp->sd_quota_bitmap[c][o] |= 1 << b;

265
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
266 267 268

	return 0;

269
fail:
D
David Teigland 已提交
270
	qd->qd_slot_count--;
271
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
272 273 274 275 276 277 278
	return -ENOSPC;
}

static void slot_hold(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

279
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
280 281
	gfs2_assert(sdp, qd->qd_slot_count);
	qd->qd_slot_count++;
282
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
283 284 285 286 287 288
}

static void slot_put(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

289
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
290 291 292 293 294
	gfs2_assert(sdp, qd->qd_slot_count);
	if (!--qd->qd_slot_count) {
		gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
		qd->qd_slot = -1;
	}
295
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
296 297 298 299 300
}

static int bh_get(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
301
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
D
David Teigland 已提交
302 303 304
	unsigned int block, offset;
	struct buffer_head *bh;
	int error;
305
	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
D
David Teigland 已提交
306

307
	mutex_lock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
308 309

	if (qd->qd_bh_count++) {
310
		mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
311 312 313 314
		return 0;
	}

	block = qd->qd_slot / sdp->sd_qc_per_block;
315
	offset = qd->qd_slot % sdp->sd_qc_per_block;
D
David Teigland 已提交
316

317
	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
318
	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
D
David Teigland 已提交
319 320
	if (error)
		goto fail;
S
Steven Whitehouse 已提交
321
	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
D
David Teigland 已提交
322 323 324 325 326 327 328 329 330 331 332
	if (error)
		goto fail;
	error = -EIO;
	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
		goto fail_brelse;

	qd->qd_bh = bh;
	qd->qd_bh_qc = (struct gfs2_quota_change *)
		(bh->b_data + sizeof(struct gfs2_meta_header) +
		 offset * sizeof(struct gfs2_quota_change));

J
Josef Whiter 已提交
333
	mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
334 335 336

	return 0;

337
fail_brelse:
D
David Teigland 已提交
338
	brelse(bh);
339
fail:
D
David Teigland 已提交
340
	qd->qd_bh_count--;
341
	mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
342 343 344 345 346 347 348
	return error;
}

static void bh_put(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

349
	mutex_lock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
350 351 352 353 354 355
	gfs2_assert(sdp, qd->qd_bh_count);
	if (!--qd->qd_bh_count) {
		brelse(qd->qd_bh);
		qd->qd_bh = NULL;
		qd->qd_bh_qc = NULL;
	}
356
	mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369
}

static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd = NULL;
	int error;
	int found = 0;

	*qdp = NULL;

	if (sdp->sd_vfs->s_flags & MS_RDONLY)
		return 0;

370
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
371 372 373 374 375 376 377 378 379 380

	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
		    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
		    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
			continue;

		list_move_tail(&qd->qd_list, &sdp->sd_quota_list);

		set_bit(QDF_LOCKED, &qd->qd_flags);
381 382
		gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
		atomic_inc(&qd->qd_count);
D
David Teigland 已提交
383 384 385 386 387 388 389 390 391 392 393
		qd->qd_change_sync = qd->qd_change;
		gfs2_assert_warn(sdp, qd->qd_slot_count);
		qd->qd_slot_count++;
		found = 1;

		break;
	}

	if (!found)
		qd = NULL;

394
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418

	if (qd) {
		gfs2_assert_warn(sdp, qd->qd_change_sync);
		error = bh_get(qd);
		if (error) {
			clear_bit(QDF_LOCKED, &qd->qd_flags);
			slot_put(qd);
			qd_put(qd);
			return error;
		}
	}

	*qdp = qd;

	return 0;
}

static int qd_trylock(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

	if (sdp->sd_vfs->s_flags & MS_RDONLY)
		return 0;

419
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
420 421 422

	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
	    !test_bit(QDF_CHANGE, &qd->qd_flags)) {
423
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
424 425 426 427 428 429
		return 0;
	}

	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);

	set_bit(QDF_LOCKED, &qd->qd_flags);
430 431
	gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
	atomic_inc(&qd->qd_count);
D
David Teigland 已提交
432 433 434 435
	qd->qd_change_sync = qd->qd_change;
	gfs2_assert_warn(sdp, qd->qd_slot_count);
	qd->qd_slot_count++;

436
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449 450

	gfs2_assert_warn(sdp, qd->qd_change_sync);
	if (bh_get(qd)) {
		clear_bit(QDF_LOCKED, &qd->qd_flags);
		slot_put(qd);
		qd_put(qd);
		return 0;
	}

	return 1;
}

static void qd_unlock(struct gfs2_quota_data *qd)
{
451 452
	gfs2_assert_warn(qd->qd_gl->gl_sbd,
			 test_bit(QDF_LOCKED, &qd->qd_flags));
D
David Teigland 已提交
453 454 455 456 457 458
	clear_bit(QDF_LOCKED, &qd->qd_flags);
	bh_put(qd);
	slot_put(qd);
	qd_put(qd);
}

459
static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
D
David Teigland 已提交
460 461 462 463
		    struct gfs2_quota_data **qdp)
{
	int error;

464
	error = qd_get(sdp, user, id, qdp);
D
David Teigland 已提交
465 466 467 468 469 470 471 472 473 474 475 476 477
	if (error)
		return error;

	error = slot_get(*qdp);
	if (error)
		goto fail;

	error = bh_get(*qdp);
	if (error)
		goto fail_slot;

	return 0;

478
fail_slot:
D
David Teigland 已提交
479
	slot_put(*qdp);
480
fail:
D
David Teigland 已提交
481 482 483 484 485 486 487 488 489 490 491
	qd_put(*qdp);
	return error;
}

static void qdsb_put(struct gfs2_quota_data *qd)
{
	bh_put(qd);
	slot_put(qd);
	qd_put(qd);
}

492
int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
D
David Teigland 已提交
493
{
494
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
495
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
496 497 498 499 500 501 502 503 504 505
	struct gfs2_quota_data **qd = al->al_qd;
	int error;

	if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
		return -EIO;

	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
		return 0;

506
	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
D
David Teigland 已提交
507 508 509 510 511
	if (error)
		goto out;
	al->al_qd_num++;
	qd++;

512
	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
D
David Teigland 已提交
513 514 515 516 517
	if (error)
		goto out;
	al->al_qd_num++;
	qd++;

518
	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
519
		error = qdsb_get(sdp, QUOTA_USER, uid, qd);
D
David Teigland 已提交
520 521 522 523 524 525
		if (error)
			goto out;
		al->al_qd_num++;
		qd++;
	}

526
	if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
527
		error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
D
David Teigland 已提交
528 529 530 531 532 533
		if (error)
			goto out;
		al->al_qd_num++;
		qd++;
	}

534
out:
D
David Teigland 已提交
535 536 537 538 539 540 541
	if (error)
		gfs2_quota_unhold(ip);
	return error;
}

void gfs2_quota_unhold(struct gfs2_inode *ip)
{
542
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
543
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
544 545 546 547 548 549 550 551 552 553 554 555 556
	unsigned int x;

	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));

	for (x = 0; x < al->al_qd_num; x++) {
		qdsb_put(al->al_qd[x]);
		al->al_qd[x] = NULL;
	}
	al->al_qd_num = 0;
}

static int sort_qd(const void *a, const void *b)
{
557 558
	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
D
David Teigland 已提交
559 560 561 562

	if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
	    !test_bit(QDF_USER, &qd_b->qd_flags)) {
		if (test_bit(QDF_USER, &qd_a->qd_flags))
563
			return -1;
D
David Teigland 已提交
564
		else
565
			return 1;
D
David Teigland 已提交
566
	}
567 568 569 570
	if (qd_a->qd_id < qd_b->qd_id)
		return -1;
	if (qd_a->qd_id > qd_b->qd_id)
		return 1;
D
David Teigland 已提交
571

572
	return 0;
D
David Teigland 已提交
573 574
}

575
static void do_qc(struct gfs2_quota_data *qd, s64 change)
D
David Teigland 已提交
576 577
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
578
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
D
David Teigland 已提交
579
	struct gfs2_quota_change *qc = qd->qd_bh_qc;
580
	s64 x;
D
David Teigland 已提交
581

582
	mutex_lock(&sdp->sd_quota_mutex);
583
	gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
D
David Teigland 已提交
584 585 586 587 588 589 590 591 592

	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
		qc->qc_change = 0;
		qc->qc_flags = 0;
		if (test_bit(QDF_USER, &qd->qd_flags))
			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
		qc->qc_id = cpu_to_be32(qd->qd_id);
	}

A
Al Viro 已提交
593
	x = be64_to_cpu(qc->qc_change) + change;
D
David Teigland 已提交
594 595
	qc->qc_change = cpu_to_be64(x);

596
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
597
	qd->qd_change = x;
598
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
599 600 601 602 603 604 605 606 607 608 609 610

	if (!x) {
		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
		clear_bit(QDF_CHANGE, &qd->qd_flags);
		qc->qc_flags = 0;
		qc->qc_id = 0;
		slot_put(qd);
		qd_put(qd);
	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
		qd_hold(qd);
		slot_hold(qd);
	}
611

612
	mutex_unlock(&sdp->sd_quota_mutex);
D
David Teigland 已提交
613 614
}

615
/**
616 617 618
 * gfs2_adjust_quota - adjust record of current block usage
 * @ip: The quota inode
 * @loc: Offset of the entry in the quota file
S
Steven Whitehouse 已提交
619
 * @change: The amount of usage change to record
620
 * @qd: The quota data
S
Steven Whitehouse 已提交
621
 * @fdq: The updated limits to record
622 623 624
 *
 * This function was mostly borrowed from gfs2_block_truncate_page which was
 * in turn mostly borrowed from ext3
625 626
 *
 * Returns: 0 or -ve on error
627
 */
628

629
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
S
Steven Whitehouse 已提交
630 631
			     s64 change, struct gfs2_quota_data *qd,
			     struct fs_disk_quota *fdq)
632
{
633
	struct inode *inode = &ip->i_inode;
634 635
	struct address_space *mapping = inode->i_mapping;
	unsigned long index = loc >> PAGE_CACHE_SHIFT;
636
	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
637
	unsigned blocksize, iblock, pos;
S
Steven Whitehouse 已提交
638
	struct buffer_head *bh, *dibh;
639
	struct page *page;
640 641 642
	void *kaddr, *ptr;
	struct gfs2_quota q, *qp;
	int err, nbytes;
S
Steven Whitehouse 已提交
643
	u64 size;
644

645
	if (gfs2_is_stuffed(ip))
646
		gfs2_unstuff_dinode(ip, NULL);
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673

	memset(&q, 0, sizeof(struct gfs2_quota));
	err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
	if (err < 0)
		return err;

	err = -EIO;
	qp = &q;
	qp->qu_value = be64_to_cpu(qp->qu_value);
	qp->qu_value += change;
	qp->qu_value = cpu_to_be64(qp->qu_value);
	qd->qd_qb.qb_value = qp->qu_value;
	if (fdq) {
		if (fdq->d_fieldmask & FS_DQ_BSOFT) {
			qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
			qd->qd_qb.qb_warn = qp->qu_warn;
		}
		if (fdq->d_fieldmask & FS_DQ_BHARD) {
			qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
			qd->qd_qb.qb_limit = qp->qu_limit;
		}
	}

	/* Write the quota into the quota file on disk */
	ptr = qp;
	nbytes = sizeof(struct gfs2_quota);
get_a_page:
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
	page = grab_cache_page(mapping, index);
	if (!page)
		return -ENOMEM;

	blocksize = inode->i_sb->s_blocksize;
	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);

	if (!page_has_buffers(page))
		create_empty_buffers(page, blocksize, 0);

	bh = page_buffers(page);
	pos = blocksize;
	while (offset >= pos) {
		bh = bh->b_this_page;
		iblock++;
		pos += blocksize;
	}

	if (!buffer_mapped(bh)) {
693
		gfs2_block_map(inode, iblock, bh, 1);
694
		if (!buffer_mapped(bh))
695 696 697 698 699 700
			goto unlock_out;
		/* If it's a newly allocated disk block for quota, zero it */
		if (buffer_new(bh)) {
			memset(bh->b_data, 0, bh->b_size);
			set_buffer_uptodate(bh);
		}
701 702 703 704 705 706
	}

	if (PageUptodate(page))
		set_buffer_uptodate(bh);

	if (!buffer_uptodate(bh)) {
707
		ll_rw_block(READ_META, 1, &bh);
708 709
		wait_on_buffer(bh);
		if (!buffer_uptodate(bh))
710
			goto unlock_out;
711 712 713 714 715
	}

	gfs2_trans_add_bh(ip->i_gl, bh, 0);

	kaddr = kmap_atomic(page, KM_USER0);
716 717 718
	if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
		nbytes = PAGE_CACHE_SIZE - offset;
	memcpy(kaddr + offset, ptr, nbytes);
719 720
	flush_dcache_page(page);
	kunmap_atomic(kaddr, KM_USER0);
721 722 723 724 725 726 727 728 729 730 731 732
	unlock_page(page);
	page_cache_release(page);

	/* If quota straddles page boundary, we need to update the rest of the
	 * quota at the beginning of the next page */
	if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */
		ptr = ptr + nbytes;
		nbytes = sizeof(struct gfs2_quota) - nbytes;
		offset = 0;
		index++;
		goto get_a_page;
	}
S
Steven Whitehouse 已提交
733

734
	/* Update the disk inode timestamp and size (if extended) */
S
Steven Whitehouse 已提交
735 736
	err = gfs2_meta_inode_buffer(ip, &dibh);
	if (err)
737
		goto out;
S
Steven Whitehouse 已提交
738 739 740 741 742 743 744 745 746 747 748 749

	size = loc + sizeof(struct gfs2_quota);
	if (size > inode->i_size) {
		ip->i_disksize = size;
		i_size_write(inode, size);
	}
	inode->i_mtime = inode->i_atime = CURRENT_TIME;
	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
	gfs2_dinode_out(ip, dibh->b_data);
	brelse(dibh);
	mark_inode_dirty(inode);

750 751 752
out:
	return err;
unlock_out:
753 754 755 756 757
	unlock_page(page);
	page_cache_release(page);
	return err;
}

D
David Teigland 已提交
758 759 760
static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
{
	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
761
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
D
David Teigland 已提交
762 763 764 765
	unsigned int data_blocks, ind_blocks;
	struct gfs2_holder *ghs, i_gh;
	unsigned int qx, x;
	struct gfs2_quota_data *qd;
766
	loff_t offset;
767
	unsigned int nalloc = 0, blocks;
D
David Teigland 已提交
768 769 770 771 772 773
	struct gfs2_alloc *al = NULL;
	int error;

	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
			      &data_blocks, &ind_blocks);

J
Josef Bacik 已提交
774
	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
D
David Teigland 已提交
775 776 777 778
	if (!ghs)
		return -ENOMEM;

	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
S
Steven Whitehouse 已提交
779
	mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
D
David Teigland 已提交
780
	for (qx = 0; qx < num_qd; qx++) {
781
		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
D
David Teigland 已提交
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
					   GL_NOCACHE, &ghs[qx]);
		if (error)
			goto out;
	}

	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
	if (error)
		goto out;

	for (x = 0; x < num_qd; x++) {
		int alloc_required;

		offset = qd2offset(qda[x]);
		error = gfs2_write_alloc_required(ip, offset,
						  sizeof(struct gfs2_quota),
						  &alloc_required);
		if (error)
			goto out_gunlock;
		if (alloc_required)
			nalloc++;
	}

804 805 806 807 808 809 810 811 812 813 814 815
	al = gfs2_alloc_get(ip);
	if (!al) {
		error = -ENOMEM;
		goto out_gunlock;
	}
	/* 
	 * 1 blk for unstuffing inode if stuffed. We add this extra
	 * block to the reservation unconditionally. If the inode
	 * doesn't need unstuffing, the block will be released to the 
	 * rgrp since it won't be allocated during the transaction
	 */
	al->al_requested = 1;
816 817 818 819
	/* +3 in the end for unstuffing block, inode size update block
	 * and another block in case quota straddles page boundary and 
	 * two blocks need to be updated instead of 1 */
	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
D
David Teigland 已提交
820

821 822 823 824 825
	if (nalloc)
		al->al_requested += nalloc * (data_blocks + ind_blocks);		
	error = gfs2_inplace_reserve(ip);
	if (error)
		goto out_alloc;
D
David Teigland 已提交
826

827 828 829 830 831 832
	if (nalloc)
		blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;

	error = gfs2_trans_begin(sdp, blocks, 0);
	if (error)
		goto out_ipres;
D
David Teigland 已提交
833 834 835 836

	for (x = 0; x < num_qd; x++) {
		qd = qda[x];
		offset = qd2offset(qd);
S
Steven Whitehouse 已提交
837
		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
838
		if (error)
D
David Teigland 已提交
839 840 841 842 843 844 845
			goto out_end_trans;

		do_qc(qd, -qd->qd_change_sync);
	}

	error = 0;

846
out_end_trans:
D
David Teigland 已提交
847
	gfs2_trans_end(sdp);
848
out_ipres:
849
	gfs2_inplace_release(ip);
850
out_alloc:
851
	gfs2_alloc_put(ip);
852
out_gunlock:
D
David Teigland 已提交
853
	gfs2_glock_dq_uninit(&i_gh);
854
out:
D
David Teigland 已提交
855 856
	while (qx--)
		gfs2_glock_dq_uninit(&ghs[qx]);
S
Steven Whitehouse 已提交
857
	mutex_unlock(&ip->i_inode.i_mutex);
D
David Teigland 已提交
858
	kfree(ghs);
859
	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
D
David Teigland 已提交
860 861 862
	return error;
}

S
Steven Whitehouse 已提交
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
{
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
	struct gfs2_quota q;
	struct gfs2_quota_lvb *qlvb;
	loff_t pos;
	int error;

	memset(&q, 0, sizeof(struct gfs2_quota));
	pos = qd2offset(qd);
	error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
	if (error < 0)
		return error;

	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
	qlvb->__pad = 0;
	qlvb->qb_limit = q.qu_limit;
	qlvb->qb_warn = q.qu_warn;
	qlvb->qb_value = q.qu_value;
	qd->qd_qb = *qlvb;

	return 0;
}

D
David Teigland 已提交
888 889 890 891
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
		    struct gfs2_holder *q_gh)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
892
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
D
David Teigland 已提交
893 894 895
	struct gfs2_holder i_gh;
	int error;

896
restart:
D
David Teigland 已提交
897 898 899 900
	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
	if (error)
		return error;

901
	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
D
David Teigland 已提交
902

903
	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
D
David Teigland 已提交
904
		gfs2_glock_dq_uninit(q_gh);
905 906
		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
					   GL_NOCACHE, q_gh);
D
David Teigland 已提交
907 908 909
		if (error)
			return error;

910
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
D
David Teigland 已提交
911 912 913
		if (error)
			goto fail;

S
Steven Whitehouse 已提交
914 915
		error = update_qd(sdp, qd);
		if (error)
916
			goto fail_gunlock;
D
David Teigland 已提交
917

S
Steven Whitehouse 已提交
918
		gfs2_glock_dq_uninit(&i_gh);
919 920 921
		gfs2_glock_dq_uninit(q_gh);
		force_refresh = 0;
		goto restart;
D
David Teigland 已提交
922 923 924 925
	}

	return 0;

926
fail_gunlock:
D
David Teigland 已提交
927
	gfs2_glock_dq_uninit(&i_gh);
928
fail:
D
David Teigland 已提交
929 930 931 932
	gfs2_glock_dq_uninit(q_gh);
	return error;
}

933
int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
D
David Teigland 已提交
934
{
935
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
936
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
	unsigned int x;
	int error = 0;

	gfs2_quota_hold(ip, uid, gid);

	if (capable(CAP_SYS_RESOURCE) ||
	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
		return 0;

	sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
	     sort_qd, NULL);

	for (x = 0; x < al->al_qd_num; x++) {
		error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
		if (error)
			break;
	}

	if (!error)
		set_bit(GIF_QD_LOCKED, &ip->i_flags);
	else {
		while (x--)
			gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
		gfs2_quota_unhold(ip);
	}

	return error;
}

static int need_sync(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
	struct gfs2_tune *gt = &sdp->sd_tune;
970
	s64 value;
D
David Teigland 已提交
971 972 973 974 975 976
	unsigned int num, den;
	int do_sync = 1;

	if (!qd->qd_qb.qb_limit)
		return 0;

977
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
978
	value = qd->qd_change;
979
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
980 981 982 983 984 985 986 987

	spin_lock(&gt->gt_spin);
	num = gt->gt_quota_scale_num;
	den = gt->gt_quota_scale_den;
	spin_unlock(&gt->gt_spin);

	if (value < 0)
		do_sync = 0;
988 989
	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
D
David Teigland 已提交
990 991 992
		do_sync = 0;
	else {
		value *= gfs2_jindex_size(sdp) * num;
993
		value = div_s64(value, den);
994
		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
995
		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
D
David Teigland 已提交
996 997 998 999 1000 1001 1002 1003
			do_sync = 0;
	}

	return do_sync;
}

void gfs2_quota_unlock(struct gfs2_inode *ip)
{
1004
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	struct gfs2_quota_data *qda[4];
	unsigned int count = 0;
	unsigned int x;

	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
		goto out;

	for (x = 0; x < al->al_qd_num; x++) {
		struct gfs2_quota_data *qd;
		int sync;

		qd = al->al_qd[x];
		sync = need_sync(qd);

		gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);

		if (sync && qd_trylock(qd))
			qda[count++] = qd;
	}

	if (count) {
		do_sync(count, qda);
		for (x = 0; x < count; x++)
			qd_unlock(qda[x]);
	}

1031
out:
D
David Teigland 已提交
1032 1033 1034 1035 1036 1037 1038 1039 1040
	gfs2_quota_unhold(ip);
}

#define MAX_LINE 256

static int print_message(struct gfs2_quota_data *qd, char *type)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

S
Steven Whitehouse 已提交
1041
	printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1042 1043 1044
	       sdp->sd_fsname, type,
	       (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
	       qd->qd_id);
D
David Teigland 已提交
1045 1046 1047 1048

	return 0;
}

1049
int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
D
David Teigland 已提交
1050
{
1051
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1052
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
1053
	struct gfs2_quota_data *qd;
1054
	s64 value;
D
David Teigland 已提交
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	unsigned int x;
	int error = 0;

	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
		return 0;

        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
                return 0;

	for (x = 0; x < al->al_qd_num; x++) {
		qd = al->al_qd[x];

		if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
		      (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
			continue;

1071
		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1072
		spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1073
		value += qd->qd_change;
1074
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1075

1076
		if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
D
David Teigland 已提交
1077
			print_message(qd, "exceeded");
S
Steven Whitehouse 已提交
1078 1079 1080 1081
			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
					   USRQUOTA : GRPQUOTA, qd->qd_id,
					   sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);

D
David Teigland 已提交
1082 1083
			error = -EDQUOT;
			break;
1084
		} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1085
			   (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
D
David Teigland 已提交
1086
			   time_after_eq(jiffies, qd->qd_last_warn +
1087 1088
					 gfs2_tune_get(sdp,
						gt_quota_warn_period) * HZ)) {
S
Steven Whitehouse 已提交
1089 1090 1091
			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
					   USRQUOTA : GRPQUOTA, qd->qd_id,
					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
D
David Teigland 已提交
1092 1093 1094 1095 1096 1097 1098 1099
			error = print_message(qd, "warning");
			qd->qd_last_warn = jiffies;
		}
	}

	return error;
}

1100 1101
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
		       u32 uid, u32 gid)
D
David Teigland 已提交
1102
{
1103
	struct gfs2_alloc *al = ip->i_alloc;
D
David Teigland 已提交
1104 1105 1106
	struct gfs2_quota_data *qd;
	unsigned int x;

1107
	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
D
David Teigland 已提交
1108
		return;
1109
	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
D
David Teigland 已提交
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
		return;

	for (x = 0; x < al->al_qd_num; x++) {
		qd = al->al_qd[x];

		if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
		    (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
			do_qc(qd, change);
		}
	}
}

1122
int gfs2_quota_sync(struct super_block *sb, int type, int wait)
D
David Teigland 已提交
1123
{
1124
	struct gfs2_sbd *sdp = sb->s_fs_info;
D
David Teigland 已提交
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	struct gfs2_quota_data **qda;
	unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
	unsigned int num_qd;
	unsigned int x;
	int error = 0;

	sdp->sd_quota_sync_gen++;

	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
	if (!qda)
		return -ENOMEM;

	do {
		num_qd = 0;

		for (;;) {
			error = qd_fish(sdp, qda + num_qd);
			if (error || !qda[num_qd])
				break;
			if (++num_qd == max_qd)
				break;
		}

		if (num_qd) {
			if (!error)
				error = do_sync(num_qd, qda);
			if (!error)
				for (x = 0; x < num_qd; x++)
					qda[x]->qd_sync_gen =
						sdp->sd_quota_sync_gen;

			for (x = 0; x < num_qd; x++)
				qd_unlock(qda[x]);
		}
	} while (!error && num_qd == max_qd);

	kfree(qda);

	return error;
}

1166 1167 1168 1169 1170
static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
{
	return gfs2_quota_sync(sb, type, 0);
}

1171
int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
D
David Teigland 已提交
1172 1173 1174 1175 1176
{
	struct gfs2_quota_data *qd;
	struct gfs2_holder q_gh;
	int error;

1177
	error = qd_get(sdp, user, id, &qd);
D
David Teigland 已提交
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
	if (error)
		return error;

	error = do_glock(qd, FORCE, &q_gh);
	if (!error)
		gfs2_glock_dq_uninit(&q_gh);

	qd_put(qd);
	return error;
}

1189 1190 1191 1192 1193 1194 1195 1196 1197
static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
{
	const struct gfs2_quota_change *str = buf;

	qc->qc_change = be64_to_cpu(str->qc_change);
	qc->qc_flags = be32_to_cpu(str->qc_flags);
	qc->qc_id = be32_to_cpu(str->qc_id);
}

D
David Teigland 已提交
1198 1199
int gfs2_quota_init(struct gfs2_sbd *sdp)
{
1200
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1201
	unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
D
David Teigland 已提交
1202 1203
	unsigned int x, slot = 0;
	unsigned int found = 0;
1204 1205
	u64 dblock;
	u32 extlen = 0;
D
David Teigland 已提交
1206 1207
	int error;

1208 1209
	if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
	    ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
D
David Teigland 已提交
1210
		gfs2_consist_inode(ip);
1211
		return -EIO;
D
David Teigland 已提交
1212 1213
	}
	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1214
	sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
D
David Teigland 已提交
1215 1216 1217 1218

	error = -ENOMEM;

	sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
J
Josef Bacik 已提交
1219
				       sizeof(unsigned char *), GFP_NOFS);
D
David Teigland 已提交
1220 1221 1222 1223
	if (!sdp->sd_quota_bitmap)
		return error;

	for (x = 0; x < sdp->sd_quota_chunks; x++) {
J
Josef Bacik 已提交
1224
		sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
D
David Teigland 已提交
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
		if (!sdp->sd_quota_bitmap[x])
			goto fail;
	}

	for (x = 0; x < blocks; x++) {
		struct buffer_head *bh;
		unsigned int y;

		if (!extlen) {
			int new = 0;
1235
			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
D
David Teigland 已提交
1236 1237 1238 1239
			if (error)
				goto fail;
		}
		error = -EIO;
S
Steven Whitehouse 已提交
1240 1241 1242
		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
		if (!bh)
			goto fail;
D
David Teigland 已提交
1243 1244 1245 1246 1247
		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
			brelse(bh);
			goto fail;
		}

S
Steven Whitehouse 已提交
1248
		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
D
David Teigland 已提交
1249
		     y++, slot++) {
1250
			struct gfs2_quota_change_host qc;
D
David Teigland 已提交
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
			struct gfs2_quota_data *qd;

			gfs2_quota_change_in(&qc, bh->b_data +
					  sizeof(struct gfs2_meta_header) +
					  y * sizeof(struct gfs2_quota_change));
			if (!qc.qc_change)
				continue;

			error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
					 qc.qc_id, &qd);
			if (error) {
				brelse(bh);
				goto fail;
			}

			set_bit(QDF_CHANGE, &qd->qd_flags);
			qd->qd_change = qc.qc_change;
			qd->qd_slot = slot;
			qd->qd_slot_count = 1;

1271
			spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1272 1273 1274
			gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
			list_add(&qd->qd_list, &sdp->sd_quota_list);
			atomic_inc(&sdp->sd_quota_count);
1275
			spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289

			found++;
		}

		brelse(bh);
		dblock++;
		extlen--;
	}

	if (found)
		fs_info(sdp, "found %u quota changes\n", found);

	return 0;

1290
fail:
D
David Teigland 已提交
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
	gfs2_quota_cleanup(sdp);
	return error;
}

void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
{
	struct list_head *head = &sdp->sd_quota_list;
	struct gfs2_quota_data *qd;
	unsigned int x;

1301
	spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1302 1303 1304
	while (!list_empty(head)) {
		qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);

1305 1306 1307 1308 1309
		if (atomic_read(&qd->qd_count) > 1 ||
		    (atomic_read(&qd->qd_count) &&
		     !test_bit(QDF_CHANGE, &qd->qd_flags))) {
			list_move(&qd->qd_list, head);
			spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1310
			schedule();
1311
			spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1312 1313 1314 1315
			continue;
		}

		list_del(&qd->qd_list);
1316 1317 1318 1319 1320
		/* Also remove if this qd exists in the reclaim list */
		if (!list_empty(&qd->qd_reclaim)) {
			list_del_init(&qd->qd_reclaim);
			atomic_dec(&qd_lru_count);
		}
D
David Teigland 已提交
1321
		atomic_dec(&sdp->sd_quota_count);
1322
		spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1323

1324
		if (!atomic_read(&qd->qd_count)) {
D
David Teigland 已提交
1325 1326 1327 1328 1329 1330
			gfs2_assert_warn(sdp, !qd->qd_change);
			gfs2_assert_warn(sdp, !qd->qd_slot_count);
		} else
			gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
		gfs2_assert_warn(sdp, !qd->qd_bh_count);

1331
		gfs2_glock_put(qd->qd_gl);
1332
		kmem_cache_free(gfs2_quotad_cachep, qd);
D
David Teigland 已提交
1333

1334
		spin_lock(&qd_lru_lock);
D
David Teigland 已提交
1335
	}
1336
	spin_unlock(&qd_lru_lock);
D
David Teigland 已提交
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346

	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));

	if (sdp->sd_quota_bitmap) {
		for (x = 0; x < sdp->sd_quota_chunks; x++)
			kfree(sdp->sd_quota_bitmap[x]);
		kfree(sdp->sd_quota_bitmap);
	}
}

1347 1348 1349 1350 1351 1352 1353 1354 1355
static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
{
	if (error == 0 || error == -EROFS)
		return;
	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
}

static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1356
			       int (*fxn)(struct super_block *sb, int type),
1357 1358 1359 1360
			       unsigned long t, unsigned long *timeo,
			       unsigned int *new_timeo)
{
	if (t >= *timeo) {
1361
		int error = fxn(sdp->sd_vfs, 0);
1362 1363 1364 1365 1366 1367 1368
		quotad_error(sdp, msg, error);
		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
	} else {
		*timeo -= t;
	}
}

1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
{
	struct gfs2_inode *ip;

	while(1) {
		ip = NULL;
		spin_lock(&sdp->sd_trunc_lock);
		if (!list_empty(&sdp->sd_trunc_list)) {
			ip = list_entry(sdp->sd_trunc_list.next,
					struct gfs2_inode, i_trunc_list);
			list_del_init(&ip->i_trunc_list);
		}
		spin_unlock(&sdp->sd_trunc_lock);
		if (ip == NULL)
			return;
		gfs2_glock_finish_truncate(ip);
	}
}

1388 1389 1390 1391 1392 1393 1394 1395
void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
	if (!sdp->sd_statfs_force_sync) {
		sdp->sd_statfs_force_sync = 1;
		wake_up(&sdp->sd_quota_wait);
	}
}


1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
/**
 * gfs2_quotad - Write cached quota changes into the quota file
 * @sdp: Pointer to GFS2 superblock
 *
 */

int gfs2_quotad(void *data)
{
	struct gfs2_sbd *sdp = data;
	struct gfs2_tune *tune = &sdp->sd_tune;
	unsigned long statfs_timeo = 0;
	unsigned long quotad_timeo = 0;
	unsigned long t = 0;
	DEFINE_WAIT(wait);
1410
	int empty;
1411 1412 1413 1414

	while (!kthread_should_stop()) {

		/* Update the master statfs file */
1415 1416 1417 1418 1419 1420 1421 1422 1423
		if (sdp->sd_statfs_force_sync) {
			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
			quotad_error(sdp, "statfs", error);
			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
		}
		else
			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
				   	   &statfs_timeo,
					   &tune->gt_statfs_quantum);
1424 1425

		/* Update quota file */
1426
		quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
1427 1428
				   &quotad_timeo, &tune->gt_quota_quantum);

1429 1430 1431
		/* Check for & recover partially truncated inodes */
		quotad_check_trunc_list(sdp);

1432 1433 1434 1435
		if (freezing(current))
			refrigerator();
		t = min(quotad_timeo, statfs_timeo);

1436
		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1437 1438 1439
		spin_lock(&sdp->sd_trunc_lock);
		empty = list_empty(&sdp->sd_trunc_list);
		spin_unlock(&sdp->sd_trunc_lock);
1440
		if (empty && !sdp->sd_statfs_force_sync)
1441 1442 1443
			t -= schedule_timeout(t);
		else
			t = 0;
1444 1445 1446 1447 1448 1449
		finish_wait(&sdp->sd_quota_wait, &wait);
	}

	return 0;
}

1450 1451 1452 1453 1454 1455 1456
static int gfs2_quota_get_xstate(struct super_block *sb,
				 struct fs_quota_stat *fqs)
{
	struct gfs2_sbd *sdp = sb->s_fs_info;

	memset(fqs, 0, sizeof(struct fs_quota_stat));
	fqs->qs_version = FS_QSTAT_VERSION;
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468

	switch (sdp->sd_args.ar_quota) {
	case GFS2_QUOTA_ON:
		fqs->qs_flags |= (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
		/*FALLTHRU*/
	case GFS2_QUOTA_ACCOUNT:
		fqs->qs_flags |= (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
		break;
	case GFS2_QUOTA_OFF:
		break;
	}

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
	if (sdp->sd_quota_inode) {
		fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
		fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
	}
	fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
	fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
	fqs->qs_incoredqs = atomic_read(&qd_lru_count);
	return 0;
}

C
Christoph Hellwig 已提交
1479 1480
static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
			  struct fs_disk_quota *fdq)
S
Steven Whitehouse 已提交
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
{
	struct gfs2_sbd *sdp = sb->s_fs_info;
	struct gfs2_quota_lvb *qlvb;
	struct gfs2_quota_data *qd;
	struct gfs2_holder q_gh;
	int error;

	memset(fdq, 0, sizeof(struct fs_disk_quota));

	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
		return -ESRCH; /* Crazy XFS error code */

	if (type == USRQUOTA)
		type = QUOTA_USER;
	else if (type == GRPQUOTA)
		type = QUOTA_GROUP;
	else
		return -EINVAL;

	error = qd_get(sdp, type, id, &qd);
	if (error)
		return error;
	error = do_glock(qd, FORCE, &q_gh);
	if (error)
		goto out;

	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
	fdq->d_version = FS_DQUOT_VERSION;
	fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
	fdq->d_id = id;
	fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
	fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
	fdq->d_bcount = be64_to_cpu(qlvb->qb_value);

	gfs2_glock_dq_uninit(&q_gh);
out:
	qd_put(qd);
	return error;
}

S
Steven Whitehouse 已提交
1521 1522 1523
/* GFS2 only supports a subset of the XFS fields */
#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)

C
Christoph Hellwig 已提交
1524 1525
static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
			  struct fs_disk_quota *fdq)
S
Steven Whitehouse 已提交
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
{
	struct gfs2_sbd *sdp = sb->s_fs_info;
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
	struct gfs2_quota_data *qd;
	struct gfs2_holder q_gh, i_gh;
	unsigned int data_blocks, ind_blocks;
	unsigned int blocks = 0;
	int alloc_required;
	struct gfs2_alloc *al;
	loff_t offset;
	int error;

	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
		return -ESRCH; /* Crazy XFS error code */

	switch(type) {
	case USRQUOTA:
		type = QUOTA_USER;
		if (fdq->d_flags != XFS_USER_QUOTA)
			return -EINVAL;
		break;
	case GRPQUOTA:
		type = QUOTA_GROUP;
		if (fdq->d_flags != XFS_GROUP_QUOTA)
			return -EINVAL;
		break;
	default:
		return -EINVAL;
	}

	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
		return -EINVAL;
	if (fdq->d_id != id)
		return -EINVAL;

	error = qd_get(sdp, type, id, &qd);
	if (error)
		return error;

	mutex_lock(&ip->i_inode.i_mutex);
	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
	if (error)
		goto out_put;
	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
	if (error)
		goto out_q;

	/* Check for existing entry, if none then alloc new blocks */
	error = update_qd(sdp, qd);
	if (error)
		goto out_i;

	/* If nothing has changed, this is a no-op */
	if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
	    (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn)))
		fdq->d_fieldmask ^= FS_DQ_BSOFT;
	if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
	    (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit)))
		fdq->d_fieldmask ^= FS_DQ_BHARD;
	if (fdq->d_fieldmask == 0)
		goto out_i;

	offset = qd2offset(qd);
	error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
					  &alloc_required);
	if (error)
		goto out_i;
	if (alloc_required) {
		al = gfs2_alloc_get(ip);
		if (al == NULL)
			goto out_i;
		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
				       &data_blocks, &ind_blocks);
		blocks = al->al_requested = 1 + data_blocks + ind_blocks;
		error = gfs2_inplace_reserve(ip);
		if (error)
			goto out_alloc;
	}

	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
	if (error)
		goto out_release;

	/* Apply changes */
	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);

	gfs2_trans_end(sdp);
out_release:
	if (alloc_required) {
		gfs2_inplace_release(ip);
out_alloc:
		gfs2_alloc_put(ip);
	}
out_i:
	gfs2_glock_dq_uninit(&i_gh);
out_q:
	gfs2_glock_dq_uninit(&q_gh);
out_put:
	mutex_unlock(&ip->i_inode.i_mutex);
	qd_put(qd);
	return error;
}

1629 1630
const struct quotactl_ops gfs2_quotactl_ops = {
	.quota_sync     = gfs2_quota_sync,
1631
	.get_xstate     = gfs2_quota_get_xstate,
C
Christoph Hellwig 已提交
1632
	.get_dqblk	= gfs2_get_dqblk,
C
Christoph Hellwig 已提交
1633
	.set_dqblk	= gfs2_set_dqblk,
1634 1635
};