qgroup.c 108.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright (C) 2011 STRATO.  All rights reserved.
 */

#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
13
#include <linux/btrfs.h>
14
#include <linux/sched/mm.h>
15 16 17 18 19 20 21

#include "ctree.h"
#include "transaction.h"
#include "disk-io.h"
#include "locking.h"
#include "ulist.h"
#include "backref.h"
J
Jan Schmidt 已提交
22
#include "extent_io.h"
J
Josef Bacik 已提交
23
#include "qgroup.h"
24
#include "block-group.h"
25
#include "sysfs.h"
26
#include "tree-mod-log.h"
27

28 29 30 31 32 33 34
/* TODO XXX FIXME
 *  - subvol delete -> delete when ref goes to 0? delete limits also?
 *  - reorganize keys
 *  - compressed
 *  - sync
 *  - copy also limits on subvol creation
 *  - limit
35
 *  - caches for ulists
36 37 38 39
 *  - performance benchmarks
 *  - check all ioctl parameters
 */

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/*
 * Helpers to access qgroup reservation
 *
 * Callers should ensure the lock context and type are valid
 */

static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
{
	u64 ret = 0;
	int i;

	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
		ret += qgroup->rsv.values[i];

	return ret;
}

#ifdef CONFIG_BTRFS_DEBUG
static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
{
	if (type == BTRFS_QGROUP_RSV_DATA)
		return "data";
62 63 64 65
	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
		return "meta_pertrans";
	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
		return "meta_prealloc";
66 67 68 69
	return NULL;
}
#endif

70 71
static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
			   struct btrfs_qgroup *qgroup, u64 num_bytes,
72 73
			   enum btrfs_qgroup_rsv_type type)
{
74
	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
75 76 77
	qgroup->rsv.values[type] += num_bytes;
}

78 79
static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
			       struct btrfs_qgroup *qgroup, u64 num_bytes,
80 81
			       enum btrfs_qgroup_rsv_type type)
{
82
	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
83 84 85 86 87 88 89 90 91 92 93 94 95
	if (qgroup->rsv.values[type] >= num_bytes) {
		qgroup->rsv.values[type] -= num_bytes;
		return;
	}
#ifdef CONFIG_BTRFS_DEBUG
	WARN_RATELIMIT(1,
		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
		qgroup->qgroupid, qgroup_rsv_type_str(type),
		qgroup->rsv.values[type], num_bytes);
#endif
	qgroup->rsv.values[type] = 0;
}

96 97 98
static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
				     struct btrfs_qgroup *dest,
				     struct btrfs_qgroup *src)
99 100 101 102
{
	int i;

	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
103
		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
104 105
}

106 107
static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
					 struct btrfs_qgroup *dest,
108 109 110 111 112
					  struct btrfs_qgroup *src)
{
	int i;

	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
113
		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
114 115
}

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
					   int mod)
{
	if (qg->old_refcnt < seq)
		qg->old_refcnt = seq;
	qg->old_refcnt += mod;
}

static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
					   int mod)
{
	if (qg->new_refcnt < seq)
		qg->new_refcnt = seq;
	qg->new_refcnt += mod;
}

static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
{
	if (qg->old_refcnt < seq)
		return 0;
	return qg->old_refcnt - seq;
}

static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
{
	if (qg->new_refcnt < seq)
		return 0;
	return qg->new_refcnt - seq;
}

146 147 148 149 150 151 152 153 154 155
/*
 * glue structure to represent the relations between qgroups.
 */
struct btrfs_qgroup_list {
	struct list_head next_group;
	struct list_head next_member;
	struct btrfs_qgroup *group;
	struct btrfs_qgroup *member;
};

156 157 158 159 160 161 162 163 164
static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
{
	return (u64)(uintptr_t)qg;
}

static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
{
	return (struct btrfs_qgroup *)(uintptr_t)n->aux;
}
J
Josef Bacik 已提交
165

166 167 168 169
static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
		   int init_flags);
static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
J
Jan Schmidt 已提交
170

171
/* must be called with qgroup_ioctl_lock held */
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
					   u64 qgroupid)
{
	struct rb_node *n = fs_info->qgroup_tree.rb_node;
	struct btrfs_qgroup *qgroup;

	while (n) {
		qgroup = rb_entry(n, struct btrfs_qgroup, node);
		if (qgroup->qgroupid < qgroupid)
			n = n->rb_left;
		else if (qgroup->qgroupid > qgroupid)
			n = n->rb_right;
		else
			return qgroup;
	}
	return NULL;
}

/* must be called with qgroup_lock held */
static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
					  u64 qgroupid)
{
	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
	struct rb_node *parent = NULL;
	struct btrfs_qgroup *qgroup;

	while (*p) {
		parent = *p;
		qgroup = rb_entry(parent, struct btrfs_qgroup, node);

		if (qgroup->qgroupid < qgroupid)
			p = &(*p)->rb_left;
		else if (qgroup->qgroupid > qgroupid)
			p = &(*p)->rb_right;
		else
			return qgroup;
	}

	qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
	if (!qgroup)
		return ERR_PTR(-ENOMEM);

	qgroup->qgroupid = qgroupid;
	INIT_LIST_HEAD(&qgroup->groups);
	INIT_LIST_HEAD(&qgroup->members);
	INIT_LIST_HEAD(&qgroup->dirty);

	rb_link_node(&qgroup->node, parent, p);
	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);

	return qgroup;
}

225 226
static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
			    struct btrfs_qgroup *qgroup)
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
{
	struct btrfs_qgroup_list *list;

	list_del(&qgroup->dirty);
	while (!list_empty(&qgroup->groups)) {
		list = list_first_entry(&qgroup->groups,
					struct btrfs_qgroup_list, next_group);
		list_del(&list->next_group);
		list_del(&list->next_member);
		kfree(list);
	}

	while (!list_empty(&qgroup->members)) {
		list = list_first_entry(&qgroup->members,
					struct btrfs_qgroup_list, next_member);
		list_del(&list->next_group);
		list_del(&list->next_member);
		kfree(list);
	}
246
}
247

248 249 250 251 252 253 254 255 256
/* must be called with qgroup_lock held */
static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
{
	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);

	if (!qgroup)
		return -ENOENT;

	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
257
	__del_qgroup_rb(fs_info, qgroup);
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	return 0;
}

/* must be called with qgroup_lock held */
static int add_relation_rb(struct btrfs_fs_info *fs_info,
			   u64 memberid, u64 parentid)
{
	struct btrfs_qgroup *member;
	struct btrfs_qgroup *parent;
	struct btrfs_qgroup_list *list;

	member = find_qgroup_rb(fs_info, memberid);
	parent = find_qgroup_rb(fs_info, parentid);
	if (!member || !parent)
		return -ENOENT;

	list = kzalloc(sizeof(*list), GFP_ATOMIC);
	if (!list)
		return -ENOMEM;

	list->group = parent;
	list->member = member;
	list_add_tail(&list->next_group, &member->groups);
	list_add_tail(&list->next_member, &parent->members);

	return 0;
}

/* must be called with qgroup_lock held */
static int del_relation_rb(struct btrfs_fs_info *fs_info,
			   u64 memberid, u64 parentid)
{
	struct btrfs_qgroup *member;
	struct btrfs_qgroup *parent;
	struct btrfs_qgroup_list *list;

	member = find_qgroup_rb(fs_info, memberid);
	parent = find_qgroup_rb(fs_info, parentid);
	if (!member || !parent)
		return -ENOENT;

	list_for_each_entry(list, &member->groups, next_group) {
		if (list->group == parent) {
			list_del(&list->next_group);
			list_del(&list->next_member);
			kfree(list);
			return 0;
		}
	}
	return -ENOENT;
}

310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
			       u64 rfer, u64 excl)
{
	struct btrfs_qgroup *qgroup;

	qgroup = find_qgroup_rb(fs_info, qgroupid);
	if (!qgroup)
		return -EINVAL;
	if (qgroup->rfer != rfer || qgroup->excl != excl)
		return -EINVAL;
	return 0;
}
#endif

325 326 327 328 329 330 331 332 333 334 335 336 337 338
/*
 * The full config is read in one go, only called from open_ctree()
 * It doesn't use any locking, as at this point we're still single-threaded
 */
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_root *quota_root = fs_info->quota_root;
	struct btrfs_path *path = NULL;
	struct extent_buffer *l;
	int slot;
	int ret = 0;
	u64 flags = 0;
339
	u64 rescan_progress = 0;
340

341
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
342 343
		return 0;

344
	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
345 346 347 348 349
	if (!fs_info->qgroup_ulist) {
		ret = -ENOMEM;
		goto out;
	}

350 351 352 353 354 355
	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

356 357 358
	ret = btrfs_sysfs_add_qgroups(fs_info);
	if (ret < 0)
		goto out;
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
	/* default this to quota off, in case no status key is found */
	fs_info->qgroup_flags = 0;

	/*
	 * pass 1: read status, all qgroup infos and limits
	 */
	key.objectid = 0;
	key.type = 0;
	key.offset = 0;
	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
	if (ret)
		goto out;

	while (1) {
		struct btrfs_qgroup *qgroup;

		slot = path->slots[0];
		l = path->nodes[0];
		btrfs_item_key_to_cpu(l, &found_key, slot);

		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
			struct btrfs_qgroup_status_item *ptr;

			ptr = btrfs_item_ptr(l, slot,
					     struct btrfs_qgroup_status_item);

			if (btrfs_qgroup_status_version(l, ptr) !=
			    BTRFS_QGROUP_STATUS_VERSION) {
387 388
				btrfs_err(fs_info,
				 "old qgroup version, quota disabled");
389 390 391 392 393
				goto out;
			}
			if (btrfs_qgroup_status_generation(l, ptr) !=
			    fs_info->generation) {
				flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
394
				btrfs_err(fs_info,
J
Jeff Mahoney 已提交
395
					"qgroup generation mismatch, marked as inconsistent");
396 397 398
			}
			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
									  ptr);
399
			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
400 401 402 403 404 405 406 407 408 409
			goto next1;
		}

		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
			goto next1;

		qgroup = find_qgroup_rb(fs_info, found_key.offset);
		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
410
			btrfs_err(fs_info, "inconsistent qgroup config");
411 412 413 414 415 416 417 418 419
			flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
		}
		if (!qgroup) {
			qgroup = add_qgroup_rb(fs_info, found_key.offset);
			if (IS_ERR(qgroup)) {
				ret = PTR_ERR(qgroup);
				goto out;
			}
		}
420 421 422 423
		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
		if (ret < 0)
			goto out;

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
		switch (found_key.type) {
		case BTRFS_QGROUP_INFO_KEY: {
			struct btrfs_qgroup_info_item *ptr;

			ptr = btrfs_item_ptr(l, slot,
					     struct btrfs_qgroup_info_item);
			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
			/* generation currently unused */
			break;
		}
		case BTRFS_QGROUP_LIMIT_KEY: {
			struct btrfs_qgroup_limit_item *ptr;

			ptr = btrfs_item_ptr(l, slot,
					     struct btrfs_qgroup_limit_item);
			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
			break;
		}
		}
next1:
		ret = btrfs_next_item(quota_root, path);
		if (ret < 0)
			goto out;
		if (ret)
			break;
	}
	btrfs_release_path(path);

	/*
	 * pass 2: read all qgroup relations
	 */
	key.objectid = 0;
	key.type = BTRFS_QGROUP_RELATION_KEY;
	key.offset = 0;
	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
	if (ret)
		goto out;
	while (1) {
		slot = path->slots[0];
		l = path->nodes[0];
		btrfs_item_key_to_cpu(l, &found_key, slot);

		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
			goto next2;

		if (found_key.objectid > found_key.offset) {
			/* parent <- member, not needed to build config */
			/* FIXME should we omit the key completely? */
			goto next2;
		}

		ret = add_relation_rb(fs_info, found_key.objectid,
				      found_key.offset);
484
		if (ret == -ENOENT) {
485 486
			btrfs_warn(fs_info,
				"orphan qgroup relation 0x%llx->0x%llx",
487
				found_key.objectid, found_key.offset);
488 489
			ret = 0;	/* ignore the error */
		}
490 491 492 493 494 495 496 497 498 499
		if (ret)
			goto out;
next2:
		ret = btrfs_next_item(quota_root, path);
		if (ret < 0)
			goto out;
		if (ret)
			break;
	}
out:
500
	btrfs_free_path(path);
501
	fs_info->qgroup_flags |= flags;
502 503 504 505
	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
		clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
	else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
		 ret >= 0)
506
		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
507

508
	if (ret < 0) {
509
		ulist_free(fs_info->qgroup_ulist);
510
		fs_info->qgroup_ulist = NULL;
511
		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
512
		btrfs_sysfs_del_qgroups(fs_info);
513
	}
514

515 516 517
	return ret < 0 ? ret : 0;
}

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
/*
 * Called in close_ctree() when quota is still enabled.  This verifies we don't
 * leak some reserved space.
 *
 * Return false if no reserved space is left.
 * Return true if some reserved space is leaked.
 */
bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
{
	struct rb_node *node;
	bool ret = false;

	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
		return ret;
	/*
	 * Since we're unmounting, there is no race and no need to grab qgroup
	 * lock.  And here we don't go post-order to provide a more user
	 * friendly sorted result.
	 */
	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
		struct btrfs_qgroup *qgroup;
		int i;

		qgroup = rb_entry(node, struct btrfs_qgroup, node);
		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
			if (qgroup->rsv.values[i]) {
				ret = true;
				btrfs_warn(fs_info,
546
		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
547 548 549 550 551 552 553 554 555
				   btrfs_qgroup_level(qgroup->qgroupid),
				   btrfs_qgroup_subvolid(qgroup->qgroupid),
				   i, qgroup->rsv.values[i]);
			}
		}
	}
	return ret;
}

556
/*
557 558 559 560
 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
 * first two are in single-threaded paths.And for the third one, we have set
 * quota_root to be null with qgroup_lock held before, so it is safe to clean
 * up the in-memory structures without qgroup_lock held.
561 562 563 564 565 566 567 568 569
 */
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
{
	struct rb_node *n;
	struct btrfs_qgroup *qgroup;

	while ((n = rb_first(&fs_info->qgroup_tree))) {
		qgroup = rb_entry(n, struct btrfs_qgroup, node);
		rb_erase(n, &fs_info->qgroup_tree);
570
		__del_qgroup_rb(fs_info, qgroup);
571 572
		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
		kfree(qgroup);
573
	}
574
	/*
575
	 * We call btrfs_free_qgroup_config() when unmounting
576
	 * filesystem and disabling quota, so we set qgroup_ulist
577 578
	 * to be null here to avoid double free.
	 */
579
	ulist_free(fs_info->qgroup_ulist);
580
	fs_info->qgroup_ulist = NULL;
581
	btrfs_sysfs_del_qgroups(fs_info);
582 583
}

584 585
static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
				    u64 dst)
586 587
{
	int ret;
588
	struct btrfs_root *quota_root = trans->fs_info->quota_root;
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
	struct btrfs_path *path;
	struct btrfs_key key;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = src;
	key.type = BTRFS_QGROUP_RELATION_KEY;
	key.offset = dst;

	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);

	btrfs_mark_buffer_dirty(path->nodes[0]);

	btrfs_free_path(path);
	return ret;
}

608 609
static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
				    u64 dst)
610 611
{
	int ret;
612
	struct btrfs_root *quota_root = trans->fs_info->quota_root;
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
	struct btrfs_path *path;
	struct btrfs_key key;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = src;
	key.type = BTRFS_QGROUP_RELATION_KEY;
	key.offset = dst;

	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, quota_root, path);
out:
	btrfs_free_path(path);
	return ret;
}

static int add_qgroup_item(struct btrfs_trans_handle *trans,
			   struct btrfs_root *quota_root, u64 qgroupid)
{
	int ret;
	struct btrfs_path *path;
	struct btrfs_qgroup_info_item *qgroup_info;
	struct btrfs_qgroup_limit_item *qgroup_limit;
	struct extent_buffer *leaf;
	struct btrfs_key key;

649
	if (btrfs_is_testing(quota_root->fs_info))
650
		return 0;
651

652 653 654 655 656 657 658 659
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = 0;
	key.type = BTRFS_QGROUP_INFO_KEY;
	key.offset = qgroupid;

660 661 662 663 664 665
	/*
	 * Avoid a transaction abort by catching -EEXIST here. In that
	 * case, we proceed by re-initializing the existing structure
	 * on disk.
	 */

666 667
	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
				      sizeof(*qgroup_info));
668
	if (ret && ret != -EEXIST)
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
		goto out;

	leaf = path->nodes[0];
	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
				 struct btrfs_qgroup_info_item);
	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);

	btrfs_mark_buffer_dirty(leaf);

	btrfs_release_path(path);

	key.type = BTRFS_QGROUP_LIMIT_KEY;
	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
				      sizeof(*qgroup_limit));
687
	if (ret && ret != -EEXIST)
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
		goto out;

	leaf = path->nodes[0];
	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
				  struct btrfs_qgroup_limit_item);
	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);

	btrfs_mark_buffer_dirty(leaf);

	ret = 0;
out:
	btrfs_free_path(path);
	return ret;
}

707
static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
708 709
{
	int ret;
710
	struct btrfs_root *quota_root = trans->fs_info->quota_root;
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
	struct btrfs_path *path;
	struct btrfs_key key;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = 0;
	key.type = BTRFS_QGROUP_INFO_KEY;
	key.offset = qgroupid;
	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, quota_root, path);
	if (ret)
		goto out;

	btrfs_release_path(path);

	key.type = BTRFS_QGROUP_LIMIT_KEY;
	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
	if (ret < 0)
		goto out;

	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	ret = btrfs_del_item(trans, quota_root, path);

out:
	btrfs_free_path(path);
	return ret;
}

static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
754
				    struct btrfs_qgroup *qgroup)
755
{
756
	struct btrfs_root *quota_root = trans->fs_info->quota_root;
757 758 759 760 761 762 763 764 765
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *l;
	struct btrfs_qgroup_limit_item *qgroup_limit;
	int ret;
	int slot;

	key.objectid = 0;
	key.type = BTRFS_QGROUP_LIMIT_KEY;
766
	key.offset = qgroup->qgroupid;
767 768

	path = btrfs_alloc_path();
769 770 771
	if (!path)
		return -ENOMEM;

772
	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
773 774 775 776 777 778 779 780
	if (ret > 0)
		ret = -ENOENT;

	if (ret)
		goto out;

	l = path->nodes[0];
	slot = path->slots[0];
781
	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
782 783 784 785 786
	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
787 788 789 790 791 792 793 794 795 796 797

	btrfs_mark_buffer_dirty(l);

out:
	btrfs_free_path(path);
	return ret;
}

static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
				   struct btrfs_qgroup *qgroup)
{
798 799
	struct btrfs_fs_info *fs_info = trans->fs_info;
	struct btrfs_root *quota_root = fs_info->quota_root;
800 801 802 803 804 805 806
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *l;
	struct btrfs_qgroup_info_item *qgroup_info;
	int ret;
	int slot;

807
	if (btrfs_is_testing(fs_info))
808
		return 0;
809

810 811 812 813 814
	key.objectid = 0;
	key.type = BTRFS_QGROUP_INFO_KEY;
	key.offset = qgroup->qgroupid;

	path = btrfs_alloc_path();
815 816 817
	if (!path)
		return -ENOMEM;

818
	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
819 820 821 822 823 824 825 826
	if (ret > 0)
		ret = -ENOENT;

	if (ret)
		goto out;

	l = path->nodes[0];
	slot = path->slots[0];
827
	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
828 829 830 831 832 833 834 835 836 837 838 839 840
	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);

	btrfs_mark_buffer_dirty(l);

out:
	btrfs_free_path(path);
	return ret;
}

841
static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
842
{
843 844
	struct btrfs_fs_info *fs_info = trans->fs_info;
	struct btrfs_root *quota_root = fs_info->quota_root;
845 846 847 848 849 850 851 852 853 854 855 856
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *l;
	struct btrfs_qgroup_status_item *ptr;
	int ret;
	int slot;

	key.objectid = 0;
	key.type = BTRFS_QGROUP_STATUS_KEY;
	key.offset = 0;

	path = btrfs_alloc_path();
857 858 859
	if (!path)
		return -ENOMEM;

860
	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
861 862 863 864 865 866 867 868 869 870 871
	if (ret > 0)
		ret = -ENOENT;

	if (ret)
		goto out;

	l = path->nodes[0];
	slot = path->slots[0];
	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
J
Jan Schmidt 已提交
872 873
	btrfs_set_qgroup_status_rescan(l, ptr,
				fs_info->qgroup_rescan_progress.objectid);
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889

	btrfs_mark_buffer_dirty(l);

out:
	btrfs_free_path(path);
	return ret;
}

/*
 * called with qgroup_lock held
 */
static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root)
{
	struct btrfs_path *path;
	struct btrfs_key key;
890
	struct extent_buffer *leaf = NULL;
891
	int ret;
892
	int nr = 0;
893 894 895 896 897

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

898 899 900
	key.objectid = 0;
	key.offset = 0;
	key.type = 0;
901

902
	while (1) {
903
		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
904 905 906 907 908
		if (ret < 0)
			goto out;
		leaf = path->nodes[0];
		nr = btrfs_header_nritems(leaf);
		if (!nr)
909
			break;
910 911 912 913 914 915 916
		/*
		 * delete the leaf one by one
		 * since the whole tree is going
		 * to be deleted.
		 */
		path->slots[0] = 0;
		ret = btrfs_del_items(trans, root, path, 0, nr);
917 918
		if (ret)
			goto out;
919

920 921 922 923 924 925 926 927
		btrfs_release_path(path);
	}
	ret = 0;
out:
	btrfs_free_path(path);
	return ret;
}

928
int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
929 930
{
	struct btrfs_root *quota_root;
931
	struct btrfs_root *tree_root = fs_info->tree_root;
932 933 934 935
	struct btrfs_path *path = NULL;
	struct btrfs_qgroup_status_item *ptr;
	struct extent_buffer *leaf;
	struct btrfs_key key;
936 937
	struct btrfs_key found_key;
	struct btrfs_qgroup *qgroup = NULL;
938
	struct btrfs_trans_handle *trans = NULL;
939
	struct ulist *ulist = NULL;
940
	int ret = 0;
941
	int slot;
942

943
	mutex_lock(&fs_info->qgroup_ioctl_lock);
944
	if (fs_info->quota_root)
945 946
		goto out;

947 948
	ulist = ulist_alloc(GFP_KERNEL);
	if (!ulist) {
949 950 951 952
		ret = -ENOMEM;
		goto out;
	}

953 954 955
	ret = btrfs_sysfs_add_qgroups(fs_info);
	if (ret < 0)
		goto out;
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971

	/*
	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
	 * avoid lock acquisition inversion problems (reported by lockdep) between
	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
	 * start a transaction.
	 * After we started the transaction lock qgroup_ioctl_lock again and
	 * check if someone else created the quota root in the meanwhile. If so,
	 * just return success and release the transaction handle.
	 *
	 * Also we don't need to worry about someone else calling
	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
	 * that function returns 0 (success) when the sysfs entries already exist.
	 */
	mutex_unlock(&fs_info->qgroup_ioctl_lock);

972 973 974 975 976 977 978 979 980
	/*
	 * 1 for quota root item
	 * 1 for BTRFS_QGROUP_STATUS item
	 *
	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
	 * per subvolume. However those are not currently reserved since it
	 * would be a lot of overkill.
	 */
	trans = btrfs_start_transaction(tree_root, 2);
981 982

	mutex_lock(&fs_info->qgroup_ioctl_lock);
983 984 985 986 987 988
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		trans = NULL;
		goto out;
	}

989 990 991 992 993 994
	if (fs_info->quota_root)
		goto out;

	fs_info->qgroup_ulist = ulist;
	ulist = NULL;

995 996 997
	/*
	 * initially create the quota tree
	 */
998
	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
999 1000
	if (IS_ERR(quota_root)) {
		ret =  PTR_ERR(quota_root);
1001
		btrfs_abort_transaction(trans, ret);
1002 1003 1004 1005
		goto out;
	}

	path = btrfs_alloc_path();
1006 1007
	if (!path) {
		ret = -ENOMEM;
1008
		btrfs_abort_transaction(trans, ret);
1009 1010
		goto out_free_root;
	}
1011 1012 1013 1014 1015 1016 1017

	key.objectid = 0;
	key.type = BTRFS_QGROUP_STATUS_KEY;
	key.offset = 0;

	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
				      sizeof(*ptr));
1018 1019
	if (ret) {
		btrfs_abort_transaction(trans, ret);
1020
		goto out_free_path;
1021
	}
1022 1023 1024 1025 1026 1027 1028 1029 1030

	leaf = path->nodes[0];
	ptr = btrfs_item_ptr(leaf, path->slots[0],
				 struct btrfs_qgroup_status_item);
	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
				BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
J
Jan Schmidt 已提交
1031
	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1032 1033 1034

	btrfs_mark_buffer_dirty(leaf);

1035 1036 1037 1038 1039 1040 1041 1042
	key.objectid = 0;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = 0;

	btrfs_release_path(path);
	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
	if (ret > 0)
		goto out_add_root;
1043 1044
	if (ret < 0) {
		btrfs_abort_transaction(trans, ret);
1045
		goto out_free_path;
1046
	}
1047 1048 1049 1050 1051 1052 1053

	while (1) {
		slot = path->slots[0];
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1054 1055 1056 1057

			/* Release locks on tree_root before we access quota_root */
			btrfs_release_path(path);

1058 1059
			ret = add_qgroup_item(trans, quota_root,
					      found_key.offset);
1060 1061
			if (ret) {
				btrfs_abort_transaction(trans, ret);
1062
				goto out_free_path;
1063
			}
1064 1065 1066 1067

			qgroup = add_qgroup_rb(fs_info, found_key.offset);
			if (IS_ERR(qgroup)) {
				ret = PTR_ERR(qgroup);
1068
				btrfs_abort_transaction(trans, ret);
1069 1070
				goto out_free_path;
			}
1071 1072 1073 1074 1075
			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
			if (ret < 0) {
				btrfs_abort_transaction(trans, ret);
				goto out_free_path;
			}
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
			ret = btrfs_search_slot_for_read(tree_root, &found_key,
							 path, 1, 0);
			if (ret < 0) {
				btrfs_abort_transaction(trans, ret);
				goto out_free_path;
			}
			if (ret > 0) {
				/*
				 * Shouldn't happen, but in case it does we
				 * don't need to do the btrfs_next_item, just
				 * continue.
				 */
				continue;
			}
1090 1091
		}
		ret = btrfs_next_item(tree_root, path);
1092 1093
		if (ret < 0) {
			btrfs_abort_transaction(trans, ret);
1094
			goto out_free_path;
1095
		}
1096 1097 1098 1099 1100 1101 1102
		if (ret)
			break;
	}

out_add_root:
	btrfs_release_path(path);
	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1103 1104
	if (ret) {
		btrfs_abort_transaction(trans, ret);
1105
		goto out_free_path;
1106
	}
1107 1108 1109 1110

	qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
	if (IS_ERR(qgroup)) {
		ret = PTR_ERR(qgroup);
1111
		btrfs_abort_transaction(trans, ret);
1112 1113
		goto out_free_path;
	}
1114 1115 1116 1117 1118
	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
	if (ret < 0) {
		btrfs_abort_transaction(trans, ret);
		goto out_free_path;
	}
1119 1120

	ret = btrfs_commit_transaction(trans);
1121 1122
	trans = NULL;
	if (ret)
1123 1124
		goto out_free_path;

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	/*
	 * Set quota enabled flag after committing the transaction, to avoid
	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
	 * creation.
	 */
	spin_lock(&fs_info->qgroup_lock);
	fs_info->quota_root = quota_root;
	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
	spin_unlock(&fs_info->qgroup_lock);

1135 1136 1137
	ret = qgroup_rescan_init(fs_info, 0, 1);
	if (!ret) {
	        qgroup_rescan_zero_tracking(fs_info);
1138
		fs_info->qgroup_rescan_running = true;
1139 1140 1141 1142
	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
	                         &fs_info->qgroup_rescan_work);
	}

1143
out_free_path:
1144
	btrfs_free_path(path);
1145
out_free_root:
1146
	if (ret)
1147
		btrfs_put_root(quota_root);
1148
out:
1149
	if (ret) {
1150
		ulist_free(fs_info->qgroup_ulist);
1151
		fs_info->qgroup_ulist = NULL;
1152
		btrfs_sysfs_del_qgroups(fs_info);
1153
	}
1154
	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1155 1156 1157 1158 1159
	if (ret && trans)
		btrfs_end_transaction(trans);
	else if (trans)
		ret = btrfs_end_transaction(trans);
	ulist_free(ulist);
1160 1161 1162
	return ret;
}

1163
int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1164 1165
{
	struct btrfs_root *quota_root;
1166
	struct btrfs_trans_handle *trans = NULL;
1167 1168
	int ret = 0;

1169
	mutex_lock(&fs_info->qgroup_ioctl_lock);
1170
	if (!fs_info->quota_root)
1171
		goto out;
1172
	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1173 1174 1175 1176 1177 1178

	/*
	 * 1 For the root item
	 *
	 * We should also reserve enough items for the quota tree deletion in
	 * btrfs_clean_quota_tree but this is not done.
1179 1180 1181
	 *
	 * Also, we must always start a transaction without holding the mutex
	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1182 1183
	 */
	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1184 1185

	mutex_lock(&fs_info->qgroup_ioctl_lock);
1186 1187
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
1188
		trans = NULL;
1189 1190 1191
		goto out;
	}

1192 1193 1194
	if (!fs_info->quota_root)
		goto out;

1195
	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1196
	btrfs_qgroup_wait_for_completion(fs_info, false);
1197
	spin_lock(&fs_info->qgroup_lock);
1198 1199
	quota_root = fs_info->quota_root;
	fs_info->quota_root = NULL;
1200
	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1201 1202
	spin_unlock(&fs_info->qgroup_lock);

1203 1204
	btrfs_free_qgroup_config(fs_info);

1205
	ret = btrfs_clean_quota_tree(trans, quota_root);
1206 1207
	if (ret) {
		btrfs_abort_transaction(trans, ret);
1208
		goto out;
1209
	}
1210

1211
	ret = btrfs_del_root(trans, &quota_root->root_key);
1212 1213
	if (ret) {
		btrfs_abort_transaction(trans, ret);
1214
		goto out;
1215
	}
1216 1217 1218 1219

	list_del(&quota_root->dirty_list);

	btrfs_tree_lock(quota_root->node);
1220
	btrfs_clean_tree_block(quota_root->node);
1221 1222 1223
	btrfs_tree_unlock(quota_root->node);
	btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);

1224
	btrfs_put_root(quota_root);
1225

1226
out:
1227
	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1228 1229 1230 1231 1232
	if (ret && trans)
		btrfs_end_transaction(trans);
	else if (trans)
		ret = btrfs_end_transaction(trans);

1233 1234 1235
	return ret;
}

J
Jan Schmidt 已提交
1236 1237
static void qgroup_dirty(struct btrfs_fs_info *fs_info,
			 struct btrfs_qgroup *qgroup)
1238
{
J
Jan Schmidt 已提交
1239 1240
	if (list_empty(&qgroup->dirty))
		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1241 1242
}

1243
/*
1244 1245 1246
 * The easy accounting, we're updating qgroup relationship whose child qgroup
 * only has exclusive extents.
 *
1247
 * In this case, all exclusive extents will also be exclusive for parent, so
1248 1249 1250 1251 1252 1253
 * excl/rfer just get added/removed.
 *
 * So is qgroup reservation space, which should also be added/removed to
 * parent.
 * Or when child tries to release reservation space, parent will underflow its
 * reservation (for relationship adding case).
1254 1255 1256 1257 1258
 *
 * Caller should hold fs_info->qgroup_lock.
 */
static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
				    struct ulist *tmp, u64 ref_root,
1259
				    struct btrfs_qgroup *src, int sign)
1260 1261 1262 1263 1264
{
	struct btrfs_qgroup *qgroup;
	struct btrfs_qgroup_list *glist;
	struct ulist_node *unode;
	struct ulist_iterator uiter;
1265
	u64 num_bytes = src->excl;
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	int ret = 0;

	qgroup = find_qgroup_rb(fs_info, ref_root);
	if (!qgroup)
		goto out;

	qgroup->rfer += sign * num_bytes;
	qgroup->rfer_cmpr += sign * num_bytes;

	WARN_ON(sign < 0 && qgroup->excl < num_bytes);
	qgroup->excl += sign * num_bytes;
	qgroup->excl_cmpr += sign * num_bytes;
1278 1279

	if (sign > 0)
1280
		qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1281
	else
1282
		qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1283 1284 1285 1286 1287 1288

	qgroup_dirty(fs_info, qgroup);

	/* Get all of the parent groups that contain this qgroup */
	list_for_each_entry(glist, &qgroup->groups, next_group) {
		ret = ulist_add(tmp, glist->group->qgroupid,
1289
				qgroup_to_aux(glist->group), GFP_ATOMIC);
1290 1291 1292 1293 1294 1295 1296
		if (ret < 0)
			goto out;
	}

	/* Iterate all of the parents and adjust their reference counts */
	ULIST_ITER_INIT(&uiter);
	while ((unode = ulist_next(tmp, &uiter))) {
1297
		qgroup = unode_aux_to_qgroup(unode);
1298 1299 1300 1301
		qgroup->rfer += sign * num_bytes;
		qgroup->rfer_cmpr += sign * num_bytes;
		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
		qgroup->excl += sign * num_bytes;
1302
		if (sign > 0)
1303
			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1304
		else
1305
			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1306 1307 1308 1309 1310 1311
		qgroup->excl_cmpr += sign * num_bytes;
		qgroup_dirty(fs_info, qgroup);

		/* Add any parents of the parents */
		list_for_each_entry(glist, &qgroup->groups, next_group) {
			ret = ulist_add(tmp, glist->group->qgroupid,
1312
					qgroup_to_aux(glist->group), GFP_ATOMIC);
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
			if (ret < 0)
				goto out;
		}
	}
	ret = 0;
out:
	return ret;
}


/*
 * Quick path for updating qgroup with only excl refs.
 *
 * In that case, just update all parent will be enough.
 * Or we needs to do a full rescan.
 * Caller should also hold fs_info->qgroup_lock.
 *
 * Return 0 for quick update, return >0 for need to full rescan
 * and mark INCONSISTENT flag.
 * Return < 0 for other error.
 */
static int quick_update_accounting(struct btrfs_fs_info *fs_info,
				   struct ulist *tmp, u64 src, u64 dst,
				   int sign)
{
	struct btrfs_qgroup *qgroup;
	int ret = 1;
	int err = 0;

	qgroup = find_qgroup_rb(fs_info, src);
	if (!qgroup)
		goto out;
	if (qgroup->excl == qgroup->rfer) {
		ret = 0;
		err = __qgroup_excl_accounting(fs_info, tmp, dst,
1348
					       qgroup, sign);
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
		if (err < 0) {
			ret = err;
			goto out;
		}
	}
out:
	if (ret)
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
	return ret;
}

1360 1361
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
			      u64 dst)
1362
{
1363
	struct btrfs_fs_info *fs_info = trans->fs_info;
1364 1365
	struct btrfs_qgroup *parent;
	struct btrfs_qgroup *member;
1366
	struct btrfs_qgroup_list *list;
1367
	struct ulist *tmp;
1368
	unsigned int nofs_flag;
1369 1370
	int ret = 0;

1371 1372 1373 1374
	/* Check the level of src and dst first */
	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
		return -EINVAL;

1375 1376
	/* We hold a transaction handle open, must do a NOFS allocation. */
	nofs_flag = memalloc_nofs_save();
1377
	tmp = ulist_alloc(GFP_KERNEL);
1378
	memalloc_nofs_restore(nofs_flag);
1379 1380 1381
	if (!tmp)
		return -ENOMEM;

1382
	mutex_lock(&fs_info->qgroup_ioctl_lock);
1383
	if (!fs_info->quota_root) {
1384
		ret = -ENOTCONN;
1385 1386
		goto out;
	}
1387 1388 1389 1390 1391 1392
	member = find_qgroup_rb(fs_info, src);
	parent = find_qgroup_rb(fs_info, dst);
	if (!member || !parent) {
		ret = -EINVAL;
		goto out;
	}
1393

1394 1395 1396 1397 1398 1399 1400 1401
	/* check if such qgroup relation exist firstly */
	list_for_each_entry(list, &member->groups, next_group) {
		if (list->group == parent) {
			ret = -EEXIST;
			goto out;
		}
	}

1402
	ret = add_qgroup_relation_item(trans, src, dst);
1403
	if (ret)
1404
		goto out;
1405

1406
	ret = add_qgroup_relation_item(trans, dst, src);
1407
	if (ret) {
1408
		del_qgroup_relation_item(trans, src, dst);
1409
		goto out;
1410 1411 1412
	}

	spin_lock(&fs_info->qgroup_lock);
1413
	ret = add_relation_rb(fs_info, src, dst);
1414 1415 1416 1417 1418
	if (ret < 0) {
		spin_unlock(&fs_info->qgroup_lock);
		goto out;
	}
	ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1419
	spin_unlock(&fs_info->qgroup_lock);
1420 1421
out:
	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1422
	ulist_free(tmp);
1423 1424 1425
	return ret;
}

1426 1427
static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
				 u64 dst)
1428
{
1429
	struct btrfs_fs_info *fs_info = trans->fs_info;
1430 1431 1432
	struct btrfs_qgroup *parent;
	struct btrfs_qgroup *member;
	struct btrfs_qgroup_list *list;
1433
	struct ulist *tmp;
1434
	bool found = false;
1435
	unsigned int nofs_flag;
1436
	int ret = 0;
1437
	int ret2;
1438

1439 1440
	/* We hold a transaction handle open, must do a NOFS allocation. */
	nofs_flag = memalloc_nofs_save();
1441
	tmp = ulist_alloc(GFP_KERNEL);
1442
	memalloc_nofs_restore(nofs_flag);
1443 1444 1445
	if (!tmp)
		return -ENOMEM;

1446
	if (!fs_info->quota_root) {
1447
		ret = -ENOTCONN;
1448 1449
		goto out;
	}
1450

1451 1452
	member = find_qgroup_rb(fs_info, src);
	parent = find_qgroup_rb(fs_info, dst);
1453 1454 1455 1456 1457 1458
	/*
	 * The parent/member pair doesn't exist, then try to delete the dead
	 * relation items only.
	 */
	if (!member || !parent)
		goto delete_item;
1459 1460 1461

	/* check if such qgroup relation exist firstly */
	list_for_each_entry(list, &member->groups, next_group) {
1462 1463 1464 1465
		if (list->group == parent) {
			found = true;
			break;
		}
1466
	}
1467 1468

delete_item:
1469
	ret = del_qgroup_relation_item(trans, src, dst);
1470 1471 1472 1473 1474
	if (ret < 0 && ret != -ENOENT)
		goto out;
	ret2 = del_qgroup_relation_item(trans, dst, src);
	if (ret2 < 0 && ret2 != -ENOENT)
		goto out;
1475

1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
	/* At least one deletion succeeded, return 0 */
	if (!ret || !ret2)
		ret = 0;

	if (found) {
		spin_lock(&fs_info->qgroup_lock);
		del_relation_rb(fs_info, src, dst);
		ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
		spin_unlock(&fs_info->qgroup_lock);
	}
1486
out:
1487
	ulist_free(tmp);
1488 1489 1490
	return ret;
}

1491 1492
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
			      u64 dst)
1493
{
1494
	struct btrfs_fs_info *fs_info = trans->fs_info;
1495 1496 1497
	int ret = 0;

	mutex_lock(&fs_info->qgroup_ioctl_lock);
1498
	ret = __del_qgroup_relation(trans, src, dst);
1499
	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1500

1501 1502 1503
	return ret;
}

1504
int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1505
{
1506
	struct btrfs_fs_info *fs_info = trans->fs_info;
1507 1508 1509 1510
	struct btrfs_root *quota_root;
	struct btrfs_qgroup *qgroup;
	int ret = 0;

1511
	mutex_lock(&fs_info->qgroup_ioctl_lock);
1512
	if (!fs_info->quota_root) {
1513
		ret = -ENOTCONN;
1514 1515
		goto out;
	}
1516
	quota_root = fs_info->quota_root;
1517 1518 1519 1520 1521
	qgroup = find_qgroup_rb(fs_info, qgroupid);
	if (qgroup) {
		ret = -EEXIST;
		goto out;
	}
1522 1523

	ret = add_qgroup_item(trans, quota_root, qgroupid);
1524 1525
	if (ret)
		goto out;
1526 1527 1528 1529 1530

	spin_lock(&fs_info->qgroup_lock);
	qgroup = add_qgroup_rb(fs_info, qgroupid);
	spin_unlock(&fs_info->qgroup_lock);

1531
	if (IS_ERR(qgroup)) {
1532
		ret = PTR_ERR(qgroup);
1533 1534 1535
		goto out;
	}
	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1536 1537
out:
	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1538 1539 1540
	return ret;
}

1541
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1542
{
1543
	struct btrfs_fs_info *fs_info = trans->fs_info;
1544
	struct btrfs_qgroup *qgroup;
1545
	struct btrfs_qgroup_list *list;
1546 1547
	int ret = 0;

1548
	mutex_lock(&fs_info->qgroup_ioctl_lock);
1549
	if (!fs_info->quota_root) {
1550
		ret = -ENOTCONN;
1551 1552
		goto out;
	}
1553

1554
	qgroup = find_qgroup_rb(fs_info, qgroupid);
1555 1556 1557
	if (!qgroup) {
		ret = -ENOENT;
		goto out;
1558
	}
1559 1560 1561 1562 1563 1564 1565

	/* Check if there are no children of this qgroup */
	if (!list_empty(&qgroup->members)) {
		ret = -EBUSY;
		goto out;
	}

1566
	ret = del_qgroup_item(trans, qgroupid);
1567 1568
	if (ret && ret != -ENOENT)
		goto out;
1569

1570 1571 1572
	while (!list_empty(&qgroup->groups)) {
		list = list_first_entry(&qgroup->groups,
					struct btrfs_qgroup_list, next_group);
1573 1574
		ret = __del_qgroup_relation(trans, qgroupid,
					    list->group->qgroupid);
1575 1576 1577 1578
		if (ret)
			goto out;
	}

1579
	spin_lock(&fs_info->qgroup_lock);
1580
	del_qgroup_rb(fs_info, qgroupid);
1581
	spin_unlock(&fs_info->qgroup_lock);
1582 1583 1584 1585 1586 1587 1588 1589

	/*
	 * Remove the qgroup from sysfs now without holding the qgroup_lock
	 * spinlock, since the sysfs_remove_group() function needs to take
	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
	 */
	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
	kfree(qgroup);
1590 1591
out:
	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1592 1593 1594
	return ret;
}

1595
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1596 1597
		       struct btrfs_qgroup_limit *limit)
{
1598
	struct btrfs_fs_info *fs_info = trans->fs_info;
1599 1600
	struct btrfs_qgroup *qgroup;
	int ret = 0;
1601 1602 1603 1604 1605
	/* Sometimes we would want to clear the limit on this qgroup.
	 * To meet this requirement, we treat the -1 as a special value
	 * which tell kernel to clear the limit on this qgroup.
	 */
	const u64 CLEAR_VALUE = -1;
1606

1607
	mutex_lock(&fs_info->qgroup_ioctl_lock);
1608
	if (!fs_info->quota_root) {
1609
		ret = -ENOTCONN;
1610 1611
		goto out;
	}
1612

1613 1614 1615 1616 1617
	qgroup = find_qgroup_rb(fs_info, qgroupid);
	if (!qgroup) {
		ret = -ENOENT;
		goto out;
	}
1618

1619
	spin_lock(&fs_info->qgroup_lock);
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
		if (limit->max_rfer == CLEAR_VALUE) {
			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
			qgroup->max_rfer = 0;
		} else {
			qgroup->max_rfer = limit->max_rfer;
		}
	}
	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
		if (limit->max_excl == CLEAR_VALUE) {
			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
			qgroup->max_excl = 0;
		} else {
			qgroup->max_excl = limit->max_excl;
		}
	}
	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
		if (limit->rsv_rfer == CLEAR_VALUE) {
			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
			qgroup->rsv_rfer = 0;
		} else {
			qgroup->rsv_rfer = limit->rsv_rfer;
		}
	}
	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
		if (limit->rsv_excl == CLEAR_VALUE) {
			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
			qgroup->rsv_excl = 0;
		} else {
			qgroup->rsv_excl = limit->rsv_excl;
		}
	}
1656 1657
	qgroup->lim_flags |= limit->flags;

1658
	spin_unlock(&fs_info->qgroup_lock);
1659

1660
	ret = update_qgroup_limit_item(trans, qgroup);
1661 1662 1663 1664 1665 1666
	if (ret) {
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
		btrfs_info(fs_info, "unable to update quota limit for %llu",
		       qgroupid);
	}

1667 1668
out:
	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1669 1670
	return ret;
}
1671

1672
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1673 1674
				struct btrfs_delayed_ref_root *delayed_refs,
				struct btrfs_qgroup_extent_record *record)
1675 1676 1677 1678 1679 1680
{
	struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
	struct rb_node *parent_node = NULL;
	struct btrfs_qgroup_extent_record *entry;
	u64 bytenr = record->bytenr;

1681
	lockdep_assert_held(&delayed_refs->lock);
1682
	trace_btrfs_qgroup_trace_extent(fs_info, record);
1683

1684 1685 1686 1687
	while (*p) {
		parent_node = *p;
		entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
				 node);
1688
		if (bytenr < entry->bytenr) {
1689
			p = &(*p)->rb_left;
1690
		} else if (bytenr > entry->bytenr) {
1691
			p = &(*p)->rb_right;
1692 1693 1694 1695 1696 1697
		} else {
			if (record->data_rsv && !entry->data_rsv) {
				entry->data_rsv = record->data_rsv;
				entry->data_rsv_refroot =
					record->data_rsv_refroot;
			}
1698
			return 1;
1699
		}
1700 1701 1702 1703
	}

	rb_link_node(&record->node, parent_node, p);
	rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1704 1705 1706
	return 0;
}

1707 1708 1709 1710 1711 1712 1713
int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
				   struct btrfs_qgroup_extent_record *qrecord)
{
	struct ulist *old_root;
	u64 bytenr = qrecord->bytenr;
	int ret;

1714
	ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1715 1716 1717 1718 1719 1720 1721
	if (ret < 0) {
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
		btrfs_warn(fs_info,
"error accounting new delayed refs extent (err code: %d), quota inconsistent",
			ret);
		return 0;
	}
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733

	/*
	 * Here we don't need to get the lock of
	 * trans->transaction->delayed_refs, since inserted qrecord won't
	 * be deleted, only qrecord->node may be modified (new qrecord insert)
	 *
	 * So modifying qrecord->old_roots is safe here
	 */
	qrecord->old_roots = old_root;
	return 0;
}

1734 1735
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
			      u64 num_bytes, gfp_t gfp_flag)
1736
{
1737
	struct btrfs_fs_info *fs_info = trans->fs_info;
1738 1739 1740 1741
	struct btrfs_qgroup_extent_record *record;
	struct btrfs_delayed_ref_root *delayed_refs;
	int ret;

1742 1743
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
	    || bytenr == 0 || num_bytes == 0)
1744
		return 0;
1745
	record = kzalloc(sizeof(*record), gfp_flag);
1746 1747 1748 1749 1750 1751 1752 1753 1754
	if (!record)
		return -ENOMEM;

	delayed_refs = &trans->transaction->delayed_refs;
	record->bytenr = bytenr;
	record->num_bytes = num_bytes;
	record->old_roots = NULL;

	spin_lock(&delayed_refs->lock);
1755
	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1756
	spin_unlock(&delayed_refs->lock);
1757
	if (ret > 0) {
1758
		kfree(record);
1759 1760 1761
		return 0;
	}
	return btrfs_qgroup_trace_extent_post(fs_info, record);
1762 1763
}

1764 1765 1766
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
				  struct extent_buffer *eb)
{
1767
	struct btrfs_fs_info *fs_info = trans->fs_info;
1768 1769 1770 1771 1772 1773 1774
	int nr = btrfs_header_nritems(eb);
	int i, extent_type, ret;
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	u64 bytenr, num_bytes;

	/* We can be called directly from walk_up_proc() */
1775
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
		return 0;

	for (i = 0; i < nr; i++) {
		btrfs_item_key_to_cpu(eb, &key, i);

		if (key.type != BTRFS_EXTENT_DATA_KEY)
			continue;

		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
		/* filter out non qgroup-accountable extents  */
		extent_type = btrfs_file_extent_type(eb, fi);

		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
			continue;

		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
		if (!bytenr)
			continue;

		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);

1797 1798
		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
						GFP_NOFS);
1799 1800 1801
		if (ret)
			return ret;
	}
1802
	cond_resched();
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
	return 0;
}

/*
 * Walk up the tree from the bottom, freeing leaves and any interior
 * nodes which have had all slots visited. If a node (leaf or
 * interior) is freed, the node above it will have it's slot
 * incremented. The root node will never be freed.
 *
 * At the end of this function, we should have a path which has all
 * slots incremented to the next position for a search. If we need to
 * read a new node it will be NULL and the node above it will have the
 * correct slot selected for a later read.
 *
 * If we increment the root nodes slot counter past the number of
 * elements, 1 is returned to signal completion of the search.
 */
1820
static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
{
	int level = 0;
	int nr, slot;
	struct extent_buffer *eb;

	if (root_level == 0)
		return 1;

	while (level <= root_level) {
		eb = path->nodes[level];
		nr = btrfs_header_nritems(eb);
		path->slots[level]++;
		slot = path->slots[level];
		if (slot >= nr || level == 0) {
			/*
			 * Don't free the root -  we will detect this
			 * condition after our loop and return a
			 * positive value for caller to stop walking the tree.
			 */
			if (level != root_level) {
				btrfs_tree_unlock_rw(eb, path->locks[level]);
				path->locks[level] = 0;

				free_extent_buffer(eb);
				path->nodes[level] = NULL;
				path->slots[level] = 0;
			}
		} else {
			/*
			 * We have a valid slot to walk back down
			 * from. Stop here so caller can process these
			 * new nodes.
			 */
			break;
		}

		level++;
	}

	eb = path->nodes[root_level];
	if (path->slots[root_level] >= btrfs_header_nritems(eb))
		return 1;

	return 0;
}

1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
/*
 * Helper function to trace a subtree tree block swap.
 *
 * The swap will happen in highest tree block, but there may be a lot of
 * tree blocks involved.
 *
 * For example:
 *  OO = Old tree blocks
 *  NN = New tree blocks allocated during balance
 *
 *           File tree (257)                  Reloc tree for 257
 * L2              OO                                NN
 *               /    \                            /    \
 * L1          OO      OO (a)                    OO      NN (a)
 *            / \     / \                       / \     / \
 * L0       OO   OO OO   OO                   OO   OO NN   NN
 *                  (b)  (c)                          (b)  (c)
 *
 * When calling qgroup_trace_extent_swap(), we will pass:
 * @src_eb = OO(a)
 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
 * @dst_level = 0
 * @root_level = 1
 *
 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
 *
 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
 *
 * 1) Tree search from @src_eb
 *    It should acts as a simplified btrfs_search_slot().
 *    The key for search can be extracted from @dst_path->nodes[dst_level]
 *    (first key).
 *
 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
 *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1903
 *    They should be marked during previous (@dst_level = 1) iteration.
1904 1905 1906 1907 1908 1909
 *
 * 3) Mark file extents in leaves dirty
 *    We don't have good way to pick out new file extents only.
 *    So we still follow the old method by scanning all file extents in
 *    the leave.
 *
1910
 * This function can free us from keeping two paths, thus later we only need
1911 1912 1913 1914 1915
 * to care about how to iterate all new tree blocks in reloc tree.
 */
static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
				    struct extent_buffer *src_eb,
				    struct btrfs_path *dst_path,
1916 1917
				    int dst_level, int root_level,
				    bool trace_leaf)
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
{
	struct btrfs_key key;
	struct btrfs_path *src_path;
	struct btrfs_fs_info *fs_info = trans->fs_info;
	u32 nodesize = fs_info->nodesize;
	int cur_level = root_level;
	int ret;

	BUG_ON(dst_level > root_level);
	/* Level mismatch */
	if (btrfs_header_level(src_eb) != root_level)
		return -EINVAL;

	src_path = btrfs_alloc_path();
	if (!src_path) {
		ret = -ENOMEM;
		goto out;
	}

	if (dst_level)
		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
	else
		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);

	/* For src_path */
D
David Sterba 已提交
1943
	atomic_inc(&src_eb->refs);
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
	src_path->nodes[root_level] = src_eb;
	src_path->slots[root_level] = dst_path->slots[root_level];
	src_path->locks[root_level] = 0;

	/* A simplified version of btrfs_search_slot() */
	while (cur_level >= dst_level) {
		struct btrfs_key src_key;
		struct btrfs_key dst_key;

		if (src_path->nodes[cur_level] == NULL) {
			struct extent_buffer *eb;
			int parent_slot;

			eb = src_path->nodes[cur_level + 1];
			parent_slot = src_path->slots[cur_level + 1];

1960
			eb = btrfs_read_node_slot(eb, parent_slot);
1961 1962 1963 1964 1965 1966 1967 1968
			if (IS_ERR(eb)) {
				ret = PTR_ERR(eb);
				goto out;
			}

			src_path->nodes[cur_level] = eb;

			btrfs_tree_read_lock(eb);
1969
			src_path->locks[cur_level] = BTRFS_READ_LOCK;
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
		}

		src_path->slots[cur_level] = dst_path->slots[cur_level];
		if (cur_level) {
			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
					&dst_key, dst_path->slots[cur_level]);
			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
					&src_key, src_path->slots[cur_level]);
		} else {
			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
					&dst_key, dst_path->slots[cur_level]);
			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
					&src_key, src_path->slots[cur_level]);
		}
		/* Content mismatch, something went wrong */
		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
			ret = -ENOENT;
			goto out;
		}
		cur_level--;
	}

	/*
	 * Now both @dst_path and @src_path have been populated, record the tree
	 * blocks for qgroup accounting.
	 */
	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
			nodesize, GFP_NOFS);
	if (ret < 0)
		goto out;
	ret = btrfs_qgroup_trace_extent(trans,
			dst_path->nodes[dst_level]->start,
			nodesize, GFP_NOFS);
	if (ret < 0)
		goto out;

	/* Record leaf file extents */
2007
	if (dst_level == 0 && trace_leaf) {
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
		if (ret < 0)
			goto out;
		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
	}
out:
	btrfs_free_path(src_path);
	return ret;
}

2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
/*
 * Helper function to do recursive generation-aware depth-first search, to
 * locate all new tree blocks in a subtree of reloc tree.
 *
 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
 *         reloc tree
 * L2         NN (a)
 *          /    \
 * L1    OO        NN (b)
 *      /  \      /  \
 * L0  OO  OO    OO  NN
 *               (c) (d)
 * If we pass:
 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
 * @cur_level = 1
 * @root_level = 1
 *
 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
 * above tree blocks along with their counter parts in file tree.
2037
 * While during search, old tree blocks OO(c) will be skipped as tree block swap
2038 2039 2040 2041 2042 2043
 * won't affect OO(c).
 */
static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
					   struct extent_buffer *src_eb,
					   struct btrfs_path *dst_path,
					   int cur_level, int root_level,
2044
					   u64 last_snapshot, bool trace_leaf)
2045 2046 2047 2048 2049 2050 2051 2052
{
	struct btrfs_fs_info *fs_info = trans->fs_info;
	struct extent_buffer *eb;
	bool need_cleanup = false;
	int ret = 0;
	int i;

	/* Level sanity check */
2053 2054
	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089
	    root_level < cur_level) {
		btrfs_err_rl(fs_info,
			"%s: bad levels, cur_level=%d root_level=%d",
			__func__, cur_level, root_level);
		return -EUCLEAN;
	}

	/* Read the tree block if needed */
	if (dst_path->nodes[cur_level] == NULL) {
		int parent_slot;
		u64 child_gen;

		/*
		 * dst_path->nodes[root_level] must be initialized before
		 * calling this function.
		 */
		if (cur_level == root_level) {
			btrfs_err_rl(fs_info,
	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
				__func__, root_level, root_level, cur_level);
			return -EUCLEAN;
		}

		/*
		 * We need to get child blockptr/gen from parent before we can
		 * read it.
		  */
		eb = dst_path->nodes[cur_level + 1];
		parent_slot = dst_path->slots[cur_level + 1];
		child_gen = btrfs_node_ptr_generation(eb, parent_slot);

		/* This node is old, no need to trace */
		if (child_gen < last_snapshot)
			goto out;

2090
		eb = btrfs_read_node_slot(eb, parent_slot);
2091 2092 2093 2094 2095 2096 2097 2098 2099
		if (IS_ERR(eb)) {
			ret = PTR_ERR(eb);
			goto out;
		}

		dst_path->nodes[cur_level] = eb;
		dst_path->slots[cur_level] = 0;

		btrfs_tree_read_lock(eb);
2100
		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2101 2102 2103 2104 2105
		need_cleanup = true;
	}

	/* Now record this tree block and its counter part for qgroups */
	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2106
				       root_level, trace_leaf);
2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122
	if (ret < 0)
		goto cleanup;

	eb = dst_path->nodes[cur_level];

	if (cur_level > 0) {
		/* Iterate all child tree blocks */
		for (i = 0; i < btrfs_header_nritems(eb); i++) {
			/* Skip old tree blocks as they won't be swapped */
			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
				continue;
			dst_path->slots[cur_level] = i;

			/* Recursive call (at most 7 times) */
			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
					dst_path, cur_level - 1, root_level,
2123
					last_snapshot, trace_leaf);
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142
			if (ret < 0)
				goto cleanup;
		}
	}

cleanup:
	if (need_cleanup) {
		/* Clean up */
		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
				     dst_path->locks[cur_level]);
		free_extent_buffer(dst_path->nodes[cur_level]);
		dst_path->nodes[cur_level] = NULL;
		dst_path->slots[cur_level] = 0;
		dst_path->locks[cur_level] = 0;
	}
out:
	return ret;
}

2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
				struct extent_buffer *src_eb,
				struct extent_buffer *dst_eb,
				u64 last_snapshot, bool trace_leaf)
{
	struct btrfs_fs_info *fs_info = trans->fs_info;
	struct btrfs_path *dst_path = NULL;
	int level;
	int ret;

	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
		return 0;

	/* Wrong parameter order */
	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
		btrfs_err_rl(fs_info,
		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
			     btrfs_header_generation(src_eb),
			     btrfs_header_generation(dst_eb));
		return -EUCLEAN;
	}

	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
		ret = -EIO;
		goto out;
	}

	level = btrfs_header_level(dst_eb);
	dst_path = btrfs_alloc_path();
	if (!dst_path) {
		ret = -ENOMEM;
		goto out;
	}
	/* For dst_path */
D
David Sterba 已提交
2177
	atomic_inc(&dst_eb->refs);
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
	dst_path->nodes[level] = dst_eb;
	dst_path->slots[level] = 0;
	dst_path->locks[level] = 0;

	/* Do the generation aware breadth-first search */
	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
					      level, last_snapshot, trace_leaf);
	if (ret < 0)
		goto out;
	ret = 0;

out:
	btrfs_free_path(dst_path);
	if (ret < 0)
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
	return ret;
}

2196 2197 2198 2199
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
			       struct extent_buffer *root_eb,
			       u64 root_gen, int root_level)
{
2200
	struct btrfs_fs_info *fs_info = trans->fs_info;
2201 2202 2203 2204 2205
	int ret = 0;
	int level;
	struct extent_buffer *eb = root_eb;
	struct btrfs_path *path = NULL;

2206
	BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
2207 2208
	BUG_ON(root_eb == NULL);

2209
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2210 2211 2212
		return 0;

	if (!extent_buffer_uptodate(root_eb)) {
2213
		ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
2214 2215 2216 2217 2218
		if (ret)
			goto out;
	}

	if (root_level == 0) {
2219
		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
		goto out;
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	/*
	 * Walk down the tree.  Missing extent blocks are filled in as
	 * we go. Metadata is accounted every time we read a new
	 * extent block.
	 *
	 * When we reach a leaf, we account for file extent items in it,
	 * walk back up the tree (adjusting slot pointers as we go)
	 * and restart the search process.
	 */
D
David Sterba 已提交
2236
	atomic_inc(&root_eb->refs);	/* For path */
2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
	path->nodes[root_level] = root_eb;
	path->slots[root_level] = 0;
	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
walk_down:
	level = root_level;
	while (level >= 0) {
		if (path->nodes[level] == NULL) {
			int parent_slot;
			u64 child_bytenr;

			/*
2248 2249
			 * We need to get child blockptr from parent before we
			 * can read it.
2250 2251 2252 2253 2254
			  */
			eb = path->nodes[level + 1];
			parent_slot = path->slots[level + 1];
			child_bytenr = btrfs_node_blockptr(eb, parent_slot);

2255
			eb = btrfs_read_node_slot(eb, parent_slot);
2256 2257 2258 2259 2260 2261 2262 2263 2264
			if (IS_ERR(eb)) {
				ret = PTR_ERR(eb);
				goto out;
			}

			path->nodes[level] = eb;
			path->slots[level] = 0;

			btrfs_tree_read_lock(eb);
2265
			path->locks[level] = BTRFS_READ_LOCK;
2266

2267
			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2268 2269
							fs_info->nodesize,
							GFP_NOFS);
2270 2271 2272 2273 2274
			if (ret)
				goto out;
		}

		if (level == 0) {
2275 2276
			ret = btrfs_qgroup_trace_leaf_items(trans,
							    path->nodes[level]);
2277 2278 2279 2280
			if (ret)
				goto out;

			/* Nonzero return here means we completed our search */
2281
			ret = adjust_slots_upwards(path, root_level);
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
			if (ret)
				break;

			/* Restart search with new slots */
			goto walk_down;
		}

		level--;
	}

	ret = 0;
out:
	btrfs_free_path(path);

	return ret;
}

2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
#define UPDATE_NEW	0
#define UPDATE_OLD	1
/*
 * Walk all of the roots that points to the bytenr and adjust their refcnts.
 */
static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
				struct ulist *roots, struct ulist *tmp,
				struct ulist *qgroups, u64 seq, int update_old)
{
	struct ulist_node *unode;
	struct ulist_iterator uiter;
	struct ulist_node *tmp_unode;
	struct ulist_iterator tmp_uiter;
	struct btrfs_qgroup *qg;
	int ret = 0;

	if (!roots)
		return 0;
	ULIST_ITER_INIT(&uiter);
	while ((unode = ulist_next(roots, &uiter))) {
		qg = find_qgroup_rb(fs_info, unode->val);
		if (!qg)
			continue;

		ulist_reinit(tmp);
2324
		ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
2325 2326 2327
				GFP_ATOMIC);
		if (ret < 0)
			return ret;
2328
		ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
2329 2330 2331 2332 2333 2334
		if (ret < 0)
			return ret;
		ULIST_ITER_INIT(&tmp_uiter);
		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
			struct btrfs_qgroup_list *glist;

2335
			qg = unode_aux_to_qgroup(tmp_unode);
2336 2337 2338 2339 2340 2341
			if (update_old)
				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
			else
				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
			list_for_each_entry(glist, &qg->groups, next_group) {
				ret = ulist_add(qgroups, glist->group->qgroupid,
2342
						qgroup_to_aux(glist->group),
2343 2344 2345 2346
						GFP_ATOMIC);
				if (ret < 0)
					return ret;
				ret = ulist_add(tmp, glist->group->qgroupid,
2347
						qgroup_to_aux(glist->group),
2348 2349 2350 2351 2352 2353 2354 2355 2356
						GFP_ATOMIC);
				if (ret < 0)
					return ret;
			}
		}
	}
	return 0;
}

2357 2358 2359
/*
 * Update qgroup rfer/excl counters.
 * Rfer update is easy, codes can explain themselves.
2360
 *
2361
 * Excl update is tricky, the update is split into 2 parts.
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373
 * Part 1: Possible exclusive <-> sharing detect:
 *	|	A	|	!A	|
 *  -------------------------------------
 *  B	|	*	|	-	|
 *  -------------------------------------
 *  !B	|	+	|	**	|
 *  -------------------------------------
 *
 * Conditions:
 * A:	cur_old_roots < nr_old_roots	(not exclusive before)
 * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
 * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2374
 * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
 *
 * Results:
 * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
 * *: Definitely not changed.		**: Possible unchanged.
 *
 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
 *
 * To make the logic clear, we first use condition A and B to split
 * combination into 4 results.
 *
 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
 * only on variant maybe 0.
 *
 * Lastly, check result **, since there are 2 variants maybe 0, split them
 * again(2x2).
 * But this time we don't need to consider other things, the codes and logic
 * is easy to understand now.
 */
static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
				  struct ulist *qgroups,
				  u64 nr_old_roots,
				  u64 nr_new_roots,
				  u64 num_bytes, u64 seq)
{
	struct ulist_node *unode;
	struct ulist_iterator uiter;
	struct btrfs_qgroup *qg;
	u64 cur_new_count, cur_old_count;

	ULIST_ITER_INIT(&uiter);
	while ((unode = ulist_next(qgroups, &uiter))) {
		bool dirty = false;

2408
		qg = unode_aux_to_qgroup(unode);
2409 2410 2411
		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);

2412 2413
		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
					     cur_new_count);
M
Mark Fasheh 已提交
2414

2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474
		/* Rfer update part */
		if (cur_old_count == 0 && cur_new_count > 0) {
			qg->rfer += num_bytes;
			qg->rfer_cmpr += num_bytes;
			dirty = true;
		}
		if (cur_old_count > 0 && cur_new_count == 0) {
			qg->rfer -= num_bytes;
			qg->rfer_cmpr -= num_bytes;
			dirty = true;
		}

		/* Excl update part */
		/* Exclusive/none -> shared case */
		if (cur_old_count == nr_old_roots &&
		    cur_new_count < nr_new_roots) {
			/* Exclusive -> shared */
			if (cur_old_count != 0) {
				qg->excl -= num_bytes;
				qg->excl_cmpr -= num_bytes;
				dirty = true;
			}
		}

		/* Shared -> exclusive/none case */
		if (cur_old_count < nr_old_roots &&
		    cur_new_count == nr_new_roots) {
			/* Shared->exclusive */
			if (cur_new_count != 0) {
				qg->excl += num_bytes;
				qg->excl_cmpr += num_bytes;
				dirty = true;
			}
		}

		/* Exclusive/none -> exclusive/none case */
		if (cur_old_count == nr_old_roots &&
		    cur_new_count == nr_new_roots) {
			if (cur_old_count == 0) {
				/* None -> exclusive/none */

				if (cur_new_count != 0) {
					/* None -> exclusive */
					qg->excl += num_bytes;
					qg->excl_cmpr += num_bytes;
					dirty = true;
				}
				/* None -> none, nothing changed */
			} else {
				/* Exclusive -> exclusive/none */

				if (cur_new_count == 0) {
					/* Exclusive -> none */
					qg->excl -= num_bytes;
					qg->excl_cmpr -= num_bytes;
					dirty = true;
				}
				/* Exclusive -> exclusive, nothing changed */
			}
		}
2475

2476 2477 2478 2479 2480 2481
		if (dirty)
			qgroup_dirty(fs_info, qg);
	}
	return 0;
}

2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
/*
 * Check if the @roots potentially is a list of fs tree roots
 *
 * Return 0 for definitely not a fs/subvol tree roots ulist
 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
 *          one as well)
 */
static int maybe_fs_roots(struct ulist *roots)
{
	struct ulist_node *unode;
	struct ulist_iterator uiter;

	/* Empty one, still possible for fs roots */
	if (!roots || roots->nnodes == 0)
		return 1;

	ULIST_ITER_INIT(&uiter);
	unode = ulist_next(roots, &uiter);
	if (!unode)
		return 1;

	/*
	 * If it contains fs tree roots, then it must belong to fs/subvol
	 * trees.
	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
	 */
	return is_fstree(unode->val);
}

2511 2512 2513
int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
				u64 num_bytes, struct ulist *old_roots,
				struct ulist *new_roots)
2514
{
2515
	struct btrfs_fs_info *fs_info = trans->fs_info;
2516 2517 2518 2519 2520 2521 2522
	struct ulist *qgroups = NULL;
	struct ulist *tmp = NULL;
	u64 seq;
	u64 nr_new_roots = 0;
	u64 nr_old_roots = 0;
	int ret = 0;

2523
	/*
D
David Sterba 已提交
2524
	 * If quotas get disabled meanwhile, the resources need to be freed and
2525 2526
	 * we can't just exit here.
	 */
2527
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2528
		goto out_free;
2529

2530 2531 2532
	if (new_roots) {
		if (!maybe_fs_roots(new_roots))
			goto out_free;
2533
		nr_new_roots = new_roots->nnodes;
2534 2535 2536 2537
	}
	if (old_roots) {
		if (!maybe_fs_roots(old_roots))
			goto out_free;
2538
		nr_old_roots = old_roots->nnodes;
2539 2540 2541 2542 2543
	}

	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
	if (nr_old_roots == 0 && nr_new_roots == 0)
		goto out_free;
2544 2545 2546

	BUG_ON(!fs_info->quota_root);

2547 2548
	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
					num_bytes, nr_old_roots, nr_new_roots);
M
Mark Fasheh 已提交
2549

2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602
	qgroups = ulist_alloc(GFP_NOFS);
	if (!qgroups) {
		ret = -ENOMEM;
		goto out_free;
	}
	tmp = ulist_alloc(GFP_NOFS);
	if (!tmp) {
		ret = -ENOMEM;
		goto out_free;
	}

	mutex_lock(&fs_info->qgroup_rescan_lock);
	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
			mutex_unlock(&fs_info->qgroup_rescan_lock);
			ret = 0;
			goto out_free;
		}
	}
	mutex_unlock(&fs_info->qgroup_rescan_lock);

	spin_lock(&fs_info->qgroup_lock);
	seq = fs_info->qgroup_seq;

	/* Update old refcnts using old_roots */
	ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
				   UPDATE_OLD);
	if (ret < 0)
		goto out;

	/* Update new refcnts using new_roots */
	ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
				   UPDATE_NEW);
	if (ret < 0)
		goto out;

	qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
			       num_bytes, seq);

	/*
	 * Bump qgroup_seq to avoid seq overlap
	 */
	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
out:
	spin_unlock(&fs_info->qgroup_lock);
out_free:
	ulist_free(tmp);
	ulist_free(qgroups);
	ulist_free(old_roots);
	ulist_free(new_roots);
	return ret;
}

2603
int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2604
{
2605
	struct btrfs_fs_info *fs_info = trans->fs_info;
2606 2607 2608 2609
	struct btrfs_qgroup_extent_record *record;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct ulist *new_roots = NULL;
	struct rb_node *node;
2610
	u64 num_dirty_extents = 0;
2611
	u64 qgroup_to_skip;
2612 2613 2614
	int ret = 0;

	delayed_refs = &trans->transaction->delayed_refs;
2615
	qgroup_to_skip = delayed_refs->qgroup_to_skip;
2616 2617 2618 2619
	while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
		record = rb_entry(node, struct btrfs_qgroup_extent_record,
				  node);

2620
		num_dirty_extents++;
2621
		trace_btrfs_qgroup_account_extents(fs_info, record);
M
Mark Fasheh 已提交
2622

2623
		if (!ret) {
2624 2625 2626 2627 2628 2629 2630 2631
			/*
			 * Old roots should be searched when inserting qgroup
			 * extent record
			 */
			if (WARN_ON(!record->old_roots)) {
				/* Search commit root to find old_roots */
				ret = btrfs_find_all_roots(NULL, fs_info,
						record->bytenr, 0,
2632
						&record->old_roots, false);
2633 2634 2635 2636
				if (ret < 0)
					goto cleanup;
			}

2637 2638 2639 2640 2641
			/* Free the reserved data space */
			btrfs_qgroup_free_refroot(fs_info,
					record->data_rsv_refroot,
					record->data_rsv,
					BTRFS_QGROUP_RSV_DATA);
2642
			/*
2643 2644 2645
			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
			 * which doesn't lock tree or delayed_refs and search
			 * current root. It's safe inside commit_transaction().
2646 2647
			 */
			ret = btrfs_find_all_roots(trans, fs_info,
2648
				record->bytenr, BTRFS_SEQ_LAST, &new_roots, false);
2649 2650
			if (ret < 0)
				goto cleanup;
2651
			if (qgroup_to_skip) {
2652
				ulist_del(new_roots, qgroup_to_skip, 0);
2653 2654 2655
				ulist_del(record->old_roots, qgroup_to_skip,
					  0);
			}
2656 2657 2658 2659
			ret = btrfs_qgroup_account_extent(trans, record->bytenr,
							  record->num_bytes,
							  record->old_roots,
							  new_roots);
2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670
			record->old_roots = NULL;
			new_roots = NULL;
		}
cleanup:
		ulist_free(record->old_roots);
		ulist_free(new_roots);
		new_roots = NULL;
		rb_erase(node, &delayed_refs->dirty_extent_root);
		kfree(record);

	}
2671 2672
	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
				       num_dirty_extents);
2673 2674 2675
	return ret;
}

2676 2677 2678
/*
 * called from commit_transaction. Writes all changed qgroups to disk.
 */
2679
int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
2680
{
2681
	struct btrfs_fs_info *fs_info = trans->fs_info;
2682 2683
	int ret = 0;

2684
	if (!fs_info->quota_root)
2685
		return ret;
2686 2687 2688 2689 2690 2691 2692 2693

	spin_lock(&fs_info->qgroup_lock);
	while (!list_empty(&fs_info->dirty_qgroups)) {
		struct btrfs_qgroup *qgroup;
		qgroup = list_first_entry(&fs_info->dirty_qgroups,
					  struct btrfs_qgroup, dirty);
		list_del_init(&qgroup->dirty);
		spin_unlock(&fs_info->qgroup_lock);
2694
		ret = update_qgroup_info_item(trans, qgroup);
2695 2696 2697
		if (ret)
			fs_info->qgroup_flags |=
					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2698
		ret = update_qgroup_limit_item(trans, qgroup);
2699 2700 2701 2702 2703
		if (ret)
			fs_info->qgroup_flags |=
					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
		spin_lock(&fs_info->qgroup_lock);
	}
2704
	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2705 2706 2707 2708 2709
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
	else
		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
	spin_unlock(&fs_info->qgroup_lock);

2710
	ret = update_qgroup_status_item(trans);
2711 2712 2713 2714 2715 2716 2717
	if (ret)
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;

	return ret;
}

/*
2718
 * Copy the accounting information between qgroups. This is necessary
2719 2720 2721
 * when a snapshot or a subvolume is created. Throwing an error will
 * cause a transaction abort so we take extra care here to only error
 * when a readonly fs is a reasonable outcome.
2722
 */
2723 2724
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
			 u64 objectid, struct btrfs_qgroup_inherit *inherit)
2725 2726 2727 2728
{
	int ret = 0;
	int i;
	u64 *i_qgroups;
2729
	bool committing = false;
2730
	struct btrfs_fs_info *fs_info = trans->fs_info;
2731
	struct btrfs_root *quota_root;
2732 2733
	struct btrfs_qgroup *srcgroup;
	struct btrfs_qgroup *dstgroup;
2734
	bool need_rescan = false;
2735
	u32 level_size = 0;
2736
	u64 nums;
2737

2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756
	/*
	 * There are only two callers of this function.
	 *
	 * One in create_subvol() in the ioctl context, which needs to hold
	 * the qgroup_ioctl_lock.
	 *
	 * The other one in create_pending_snapshot() where no other qgroup
	 * code can modify the fs as they all need to either start a new trans
	 * or hold a trans handler, thus we don't need to hold
	 * qgroup_ioctl_lock.
	 * This would avoid long and complex lock chain and make lockdep happy.
	 */
	spin_lock(&fs_info->trans_lock);
	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
		committing = true;
	spin_unlock(&fs_info->trans_lock);

	if (!committing)
		mutex_lock(&fs_info->qgroup_ioctl_lock);
2757
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2758
		goto out;
2759

2760
	quota_root = fs_info->quota_root;
2761 2762 2763 2764
	if (!quota_root) {
		ret = -EINVAL;
		goto out;
	}
2765

2766 2767 2768 2769 2770 2771
	if (inherit) {
		i_qgroups = (u64 *)(inherit + 1);
		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
		       2 * inherit->num_excl_copies;
		for (i = 0; i < nums; ++i) {
			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2772

2773 2774 2775 2776 2777 2778 2779 2780
			/*
			 * Zero out invalid groups so we can ignore
			 * them later.
			 */
			if (!srcgroup ||
			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
				*i_qgroups = 0ULL;

2781 2782 2783 2784
			++i_qgroups;
		}
	}

2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796
	/*
	 * create a tracking group for the subvol itself
	 */
	ret = add_qgroup_item(trans, quota_root, objectid);
	if (ret)
		goto out;

	/*
	 * add qgroup to all inherited groups
	 */
	if (inherit) {
		i_qgroups = (u64 *)(inherit + 1);
2797 2798 2799
		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
			if (*i_qgroups == 0)
				continue;
2800 2801
			ret = add_qgroup_relation_item(trans, objectid,
						       *i_qgroups);
2802
			if (ret && ret != -EEXIST)
2803
				goto out;
2804 2805
			ret = add_qgroup_relation_item(trans, *i_qgroups,
						       objectid);
2806
			if (ret && ret != -EEXIST)
2807 2808
				goto out;
		}
2809
		ret = 0;
2810 2811 2812 2813 2814 2815
	}


	spin_lock(&fs_info->qgroup_lock);

	dstgroup = add_qgroup_rb(fs_info, objectid);
2816 2817
	if (IS_ERR(dstgroup)) {
		ret = PTR_ERR(dstgroup);
2818
		goto unlock;
2819
	}
2820

2821 2822 2823 2824 2825 2826
	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
		dstgroup->lim_flags = inherit->lim.flags;
		dstgroup->max_rfer = inherit->lim.max_rfer;
		dstgroup->max_excl = inherit->lim.max_excl;
		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
		dstgroup->rsv_excl = inherit->lim.rsv_excl;
2827

2828
		ret = update_qgroup_limit_item(trans, dstgroup);
2829 2830
		if (ret) {
			fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
J
Jeff Mahoney 已提交
2831 2832 2833
			btrfs_info(fs_info,
				   "unable to update quota limit for %llu",
				   dstgroup->qgroupid);
2834 2835
			goto unlock;
		}
2836 2837
	}

2838 2839
	if (srcid) {
		srcgroup = find_qgroup_rb(fs_info, srcid);
2840
		if (!srcgroup)
2841
			goto unlock;
J
Josef Bacik 已提交
2842 2843 2844 2845 2846 2847

		/*
		 * We call inherit after we clone the root in order to make sure
		 * our counts don't go crazy, so at this point the only
		 * difference between the two roots should be the root node.
		 */
2848
		level_size = fs_info->nodesize;
J
Josef Bacik 已提交
2849 2850 2851 2852
		dstgroup->rfer = srcgroup->rfer;
		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
		dstgroup->excl = level_size;
		dstgroup->excl_cmpr = level_size;
2853 2854
		srcgroup->excl = level_size;
		srcgroup->excl_cmpr = level_size;
2855 2856 2857 2858 2859 2860 2861 2862

		/* inherit the limit info */
		dstgroup->lim_flags = srcgroup->lim_flags;
		dstgroup->max_rfer = srcgroup->max_rfer;
		dstgroup->max_excl = srcgroup->max_excl;
		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
		dstgroup->rsv_excl = srcgroup->rsv_excl;

2863 2864 2865 2866
		qgroup_dirty(fs_info, dstgroup);
		qgroup_dirty(fs_info, srcgroup);
	}

2867
	if (!inherit)
2868 2869 2870 2871
		goto unlock;

	i_qgroups = (u64 *)(inherit + 1);
	for (i = 0; i < inherit->num_qgroups; ++i) {
2872
		if (*i_qgroups) {
2873
			ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2874 2875 2876
			if (ret)
				goto unlock;
		}
2877
		++i_qgroups;
2878 2879 2880 2881 2882 2883 2884

		/*
		 * If we're doing a snapshot, and adding the snapshot to a new
		 * qgroup, the numbers are guaranteed to be incorrect.
		 */
		if (srcid)
			need_rescan = true;
2885 2886
	}

2887
	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2888 2889 2890
		struct btrfs_qgroup *src;
		struct btrfs_qgroup *dst;

2891 2892 2893
		if (!i_qgroups[0] || !i_qgroups[1])
			continue;

2894 2895 2896 2897 2898 2899 2900 2901 2902 2903
		src = find_qgroup_rb(fs_info, i_qgroups[0]);
		dst = find_qgroup_rb(fs_info, i_qgroups[1]);

		if (!src || !dst) {
			ret = -EINVAL;
			goto unlock;
		}

		dst->rfer = src->rfer - level_size;
		dst->rfer_cmpr = src->rfer_cmpr - level_size;
2904 2905 2906

		/* Manually tweaking numbers certainly needs a rescan */
		need_rescan = true;
2907
	}
2908
	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2909 2910 2911
		struct btrfs_qgroup *src;
		struct btrfs_qgroup *dst;

2912 2913 2914
		if (!i_qgroups[0] || !i_qgroups[1])
			continue;

2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
		src = find_qgroup_rb(fs_info, i_qgroups[0]);
		dst = find_qgroup_rb(fs_info, i_qgroups[1]);

		if (!src || !dst) {
			ret = -EINVAL;
			goto unlock;
		}

		dst->excl = src->excl + level_size;
		dst->excl_cmpr = src->excl_cmpr + level_size;
2925
		need_rescan = true;
2926 2927 2928 2929
	}

unlock:
	spin_unlock(&fs_info->qgroup_lock);
2930 2931
	if (!ret)
		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
2932
out:
2933 2934
	if (!committing)
		mutex_unlock(&fs_info->qgroup_ioctl_lock);
2935 2936
	if (need_rescan)
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2937 2938 2939
	return ret;
}

2940
static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
2941 2942
{
	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2943
	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
2944 2945 2946
		return false;

	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2947
	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
2948 2949 2950 2951 2952
		return false;

	return true;
}

2953 2954
static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
			  enum btrfs_qgroup_rsv_type type)
2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
{
	struct btrfs_qgroup *qgroup;
	struct btrfs_fs_info *fs_info = root->fs_info;
	u64 ref_root = root->root_key.objectid;
	int ret = 0;
	struct ulist_node *unode;
	struct ulist_iterator uiter;

	if (!is_fstree(ref_root))
		return 0;

	if (num_bytes == 0)
		return 0;
2968 2969 2970 2971 2972

	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
	    capable(CAP_SYS_RESOURCE))
		enforce = false;

2973
	spin_lock(&fs_info->qgroup_lock);
2974
	if (!fs_info->quota_root)
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984
		goto out;

	qgroup = find_qgroup_rb(fs_info, ref_root);
	if (!qgroup)
		goto out;

	/*
	 * in a first step, we check all affected qgroups if any limits would
	 * be exceeded
	 */
2985 2986
	ulist_reinit(fs_info->qgroup_ulist);
	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2987
			qgroup_to_aux(qgroup), GFP_ATOMIC);
2988 2989
	if (ret < 0)
		goto out;
2990
	ULIST_ITER_INIT(&uiter);
2991
	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2992 2993 2994
		struct btrfs_qgroup *qg;
		struct btrfs_qgroup_list *glist;

2995
		qg = unode_aux_to_qgroup(unode);
2996

2997
		if (enforce && !qgroup_check_limits(qg, num_bytes)) {
2998
			ret = -EDQUOT;
2999 3000
			goto out;
		}
3001 3002

		list_for_each_entry(glist, &qg->groups, next_group) {
3003 3004
			ret = ulist_add(fs_info->qgroup_ulist,
					glist->group->qgroupid,
3005
					qgroup_to_aux(glist->group), GFP_ATOMIC);
3006 3007
			if (ret < 0)
				goto out;
3008 3009
		}
	}
3010
	ret = 0;
3011 3012 3013 3014
	/*
	 * no limits exceeded, now record the reservation into all qgroups
	 */
	ULIST_ITER_INIT(&uiter);
3015
	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3016 3017
		struct btrfs_qgroup *qg;

3018
		qg = unode_aux_to_qgroup(unode);
3019

3020
		qgroup_rsv_add(fs_info, qg, num_bytes, type);
3021 3022 3023 3024 3025 3026 3027
	}

out:
	spin_unlock(&fs_info->qgroup_lock);
	return ret;
}

3028 3029 3030 3031 3032 3033 3034 3035 3036
/*
 * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
 * qgroup).
 *
 * Will handle all higher level qgroup too.
 *
 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
 * This special case is only used for META_PERTRANS type.
 */
3037
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3038 3039
			       u64 ref_root, u64 num_bytes,
			       enum btrfs_qgroup_rsv_type type)
3040 3041 3042 3043
{
	struct btrfs_qgroup *qgroup;
	struct ulist_node *unode;
	struct ulist_iterator uiter;
3044
	int ret = 0;
3045 3046 3047 3048 3049 3050 3051

	if (!is_fstree(ref_root))
		return;

	if (num_bytes == 0)
		return;

3052 3053 3054 3055
	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
		WARN(1, "%s: Invalid type to free", __func__);
		return;
	}
3056 3057
	spin_lock(&fs_info->qgroup_lock);

3058
	if (!fs_info->quota_root)
3059 3060 3061 3062 3063 3064
		goto out;

	qgroup = find_qgroup_rb(fs_info, ref_root);
	if (!qgroup)
		goto out;

3065
	if (num_bytes == (u64)-1)
3066 3067 3068 3069
		/*
		 * We're freeing all pertrans rsv, get reserved value from
		 * level 0 qgroup as real num_bytes to free.
		 */
3070 3071
		num_bytes = qgroup->rsv.values[type];

3072 3073
	ulist_reinit(fs_info->qgroup_ulist);
	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3074
			qgroup_to_aux(qgroup), GFP_ATOMIC);
3075 3076
	if (ret < 0)
		goto out;
3077
	ULIST_ITER_INIT(&uiter);
3078
	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3079 3080 3081
		struct btrfs_qgroup *qg;
		struct btrfs_qgroup_list *glist;

3082
		qg = unode_aux_to_qgroup(unode);
3083

3084
		qgroup_rsv_release(fs_info, qg, num_bytes, type);
3085 3086

		list_for_each_entry(glist, &qg->groups, next_group) {
3087 3088
			ret = ulist_add(fs_info->qgroup_ulist,
					glist->group->qgroupid,
3089
					qgroup_to_aux(glist->group), GFP_ATOMIC);
3090 3091
			if (ret < 0)
				goto out;
3092 3093 3094 3095 3096 3097 3098
		}
	}

out:
	spin_unlock(&fs_info->qgroup_lock);
}

3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
/*
 * Check if the leaf is the last leaf. Which means all node pointers
 * are at their last position.
 */
static bool is_last_leaf(struct btrfs_path *path)
{
	int i;

	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
			return false;
	}
	return true;
}

J
Jan Schmidt 已提交
3114 3115
/*
 * returns < 0 on error, 0 when more leafs are to be scanned.
3116
 * returns 1 when done.
J
Jan Schmidt 已提交
3117
 */
3118 3119
static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
			      struct btrfs_path *path)
J
Jan Schmidt 已提交
3120
{
3121
	struct btrfs_fs_info *fs_info = trans->fs_info;
J
Jan Schmidt 已提交
3122
	struct btrfs_key found;
3123
	struct extent_buffer *scratch_leaf = NULL;
J
Jan Schmidt 已提交
3124
	struct ulist *roots = NULL;
J
Josef Bacik 已提交
3125
	u64 num_bytes;
3126
	bool done;
J
Jan Schmidt 已提交
3127 3128 3129 3130 3131 3132 3133 3134
	int slot;
	int ret;

	mutex_lock(&fs_info->qgroup_rescan_lock);
	ret = btrfs_search_slot_for_read(fs_info->extent_root,
					 &fs_info->qgroup_rescan_progress,
					 path, 1, 0);

3135 3136 3137 3138 3139
	btrfs_debug(fs_info,
		"current progress key (%llu %u %llu), search_slot ret %d",
		fs_info->qgroup_rescan_progress.objectid,
		fs_info->qgroup_rescan_progress.type,
		fs_info->qgroup_rescan_progress.offset, ret);
J
Jan Schmidt 已提交
3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154

	if (ret) {
		/*
		 * The rescan is about to end, we will not be scanning any
		 * further blocks. We cannot unset the RESCAN flag here, because
		 * we want to commit the transaction if everything went well.
		 * To make the live accounting work in this phase, we set our
		 * scan progress pointer such that every real extent objectid
		 * will be smaller.
		 */
		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
		btrfs_release_path(path);
		mutex_unlock(&fs_info->qgroup_rescan_lock);
		return ret;
	}
3155
	done = is_last_leaf(path);
J
Jan Schmidt 已提交
3156 3157 3158 3159 3160

	btrfs_item_key_to_cpu(path->nodes[0], &found,
			      btrfs_header_nritems(path->nodes[0]) - 1);
	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;

3161 3162 3163 3164 3165 3166
	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
	if (!scratch_leaf) {
		ret = -ENOMEM;
		mutex_unlock(&fs_info->qgroup_rescan_lock);
		goto out;
	}
J
Jan Schmidt 已提交
3167 3168 3169 3170 3171 3172
	slot = path->slots[0];
	btrfs_release_path(path);
	mutex_unlock(&fs_info->qgroup_rescan_lock);

	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3173 3174
		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
		    found.type != BTRFS_METADATA_ITEM_KEY)
J
Jan Schmidt 已提交
3175
			continue;
3176
		if (found.type == BTRFS_METADATA_ITEM_KEY)
3177
			num_bytes = fs_info->nodesize;
3178 3179 3180
		else
			num_bytes = found.offset;

J
Josef Bacik 已提交
3181
		ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
3182
					   &roots, false);
J
Jan Schmidt 已提交
3183 3184
		if (ret < 0)
			goto out;
3185
		/* For rescan, just pass old_roots as NULL */
3186 3187
		ret = btrfs_qgroup_account_extent(trans, found.objectid,
						  num_bytes, NULL, roots);
3188
		if (ret < 0)
J
Josef Bacik 已提交
3189
			goto out;
J
Jan Schmidt 已提交
3190 3191
	}
out:
3192
	if (scratch_leaf)
3193
		free_extent_buffer(scratch_leaf);
J
Jan Schmidt 已提交
3194

3195
	if (done && !ret) {
3196
		ret = 1;
3197 3198
		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
	}
J
Jan Schmidt 已提交
3199 3200 3201
	return ret;
}

3202 3203 3204 3205 3206 3207
static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
{
	return btrfs_fs_closing(fs_info) ||
		test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
}

3208
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
J
Jan Schmidt 已提交
3209
{
3210 3211
	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
						     qgroup_rescan_work);
J
Jan Schmidt 已提交
3212 3213 3214
	struct btrfs_path *path;
	struct btrfs_trans_handle *trans = NULL;
	int err = -ENOMEM;
3215
	int ret = 0;
3216
	bool stopped = false;
J
Jan Schmidt 已提交
3217 3218 3219 3220

	path = btrfs_alloc_path();
	if (!path)
		goto out;
3221 3222 3223 3224 3225 3226
	/*
	 * Rescan should only search for commit root, and any later difference
	 * should be recorded by qgroup
	 */
	path->search_commit_root = 1;
	path->skip_locking = 1;
J
Jan Schmidt 已提交
3227 3228

	err = 0;
3229
	while (!err && !(stopped = rescan_should_stop(fs_info))) {
J
Jan Schmidt 已提交
3230 3231 3232 3233 3234
		trans = btrfs_start_transaction(fs_info->fs_root, 0);
		if (IS_ERR(trans)) {
			err = PTR_ERR(trans);
			break;
		}
3235
		if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
J
Jan Schmidt 已提交
3236 3237
			err = -EINTR;
		} else {
3238
			err = qgroup_rescan_leaf(trans, path);
J
Jan Schmidt 已提交
3239 3240
		}
		if (err > 0)
3241
			btrfs_commit_transaction(trans);
J
Jan Schmidt 已提交
3242
		else
3243
			btrfs_end_transaction(trans);
J
Jan Schmidt 已提交
3244 3245 3246 3247 3248 3249
	}

out:
	btrfs_free_path(path);

	mutex_lock(&fs_info->qgroup_rescan_lock);
3250
	if (err > 0 &&
J
Jan Schmidt 已提交
3251 3252 3253 3254 3255 3256 3257
	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
	} else if (err < 0) {
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
	}
	mutex_unlock(&fs_info->qgroup_rescan_lock);

3258
	/*
3259
	 * only update status, since the previous part has already updated the
3260 3261 3262 3263 3264
	 * qgroup info.
	 */
	trans = btrfs_start_transaction(fs_info->quota_root, 1);
	if (IS_ERR(trans)) {
		err = PTR_ERR(trans);
3265
		trans = NULL;
3266
		btrfs_err(fs_info,
3267
			  "fail to start transaction for status update: %d",
3268 3269
			  err);
	}
3270 3271

	mutex_lock(&fs_info->qgroup_rescan_lock);
3272
	if (!stopped)
3273 3274 3275 3276 3277 3278 3279 3280
		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
	if (trans) {
		ret = update_qgroup_status_item(trans);
		if (ret < 0) {
			err = ret;
			btrfs_err(fs_info, "fail to update qgroup status: %d",
				  err);
		}
3281
	}
3282 3283 3284 3285 3286 3287 3288
	fs_info->qgroup_rescan_running = false;
	complete_all(&fs_info->qgroup_rescan_completion);
	mutex_unlock(&fs_info->qgroup_rescan_lock);

	if (!trans)
		return;

3289
	btrfs_end_transaction(trans);
3290

3291
	if (stopped) {
3292 3293
		btrfs_info(fs_info, "qgroup scan paused");
	} else if (err >= 0) {
3294
		btrfs_info(fs_info, "qgroup scan completed%s",
3295
			err > 0 ? " (inconsistency flag cleared)" : "");
J
Jan Schmidt 已提交
3296
	} else {
3297
		btrfs_err(fs_info, "qgroup scan failed with %d", err);
J
Jan Schmidt 已提交
3298 3299 3300
	}
}

3301 3302 3303 3304 3305 3306 3307
/*
 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
 * memory required for the rescan context.
 */
static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
		   int init_flags)
J
Jan Schmidt 已提交
3308 3309 3310
{
	int ret = 0;

3311 3312
	if (!init_flags) {
		/* we're resuming qgroup rescan at mount time */
3313 3314
		if (!(fs_info->qgroup_flags &
		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3315
			btrfs_warn(fs_info,
3316
			"qgroup rescan init failed, qgroup rescan is not queued");
3317 3318 3319
			ret = -EINVAL;
		} else if (!(fs_info->qgroup_flags &
			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3320
			btrfs_warn(fs_info,
3321
			"qgroup rescan init failed, qgroup is not enabled");
3322 3323 3324 3325 3326
			ret = -EINVAL;
		}

		if (ret)
			return ret;
3327
	}
J
Jan Schmidt 已提交
3328 3329

	mutex_lock(&fs_info->qgroup_rescan_lock);
3330 3331

	if (init_flags) {
3332 3333 3334
		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
			btrfs_warn(fs_info,
				   "qgroup rescan is already in progress");
3335
			ret = -EINPROGRESS;
3336 3337 3338 3339
		} else if (!(fs_info->qgroup_flags &
			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
			btrfs_warn(fs_info,
			"qgroup rescan init failed, qgroup is not enabled");
3340
			ret = -EINVAL;
3341
		}
3342 3343 3344

		if (ret) {
			mutex_unlock(&fs_info->qgroup_rescan_lock);
3345
			return ret;
3346 3347
		}
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
J
Jan Schmidt 已提交
3348 3349 3350 3351
	}

	memset(&fs_info->qgroup_rescan_progress, 0,
		sizeof(fs_info->qgroup_rescan_progress));
3352
	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3353
	init_completion(&fs_info->qgroup_rescan_completion);
3354 3355
	mutex_unlock(&fs_info->qgroup_rescan_lock);

3356 3357
	btrfs_init_work(&fs_info->qgroup_rescan_work,
			btrfs_qgroup_rescan_worker, NULL, NULL);
3358 3359 3360 3361 3362 3363 3364 3365 3366 3367
	return 0;
}

static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
{
	struct rb_node *n;
	struct btrfs_qgroup *qgroup;

	spin_lock(&fs_info->qgroup_lock);
J
Jan Schmidt 已提交
3368 3369 3370 3371 3372 3373 3374
	/* clear all current qgroup tracking information */
	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
		qgroup = rb_entry(n, struct btrfs_qgroup, node);
		qgroup->rfer = 0;
		qgroup->rfer_cmpr = 0;
		qgroup->excl = 0;
		qgroup->excl_cmpr = 0;
3375
		qgroup_dirty(fs_info, qgroup);
J
Jan Schmidt 已提交
3376 3377
	}
	spin_unlock(&fs_info->qgroup_lock);
3378
}
J
Jan Schmidt 已提交
3379

3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405
int
btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
{
	int ret = 0;
	struct btrfs_trans_handle *trans;

	ret = qgroup_rescan_init(fs_info, 0, 1);
	if (ret)
		return ret;

	/*
	 * We have set the rescan_progress to 0, which means no more
	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
	 * However, btrfs_qgroup_account_ref may be right after its call
	 * to btrfs_find_all_roots, in which case it would still do the
	 * accounting.
	 * To solve this, we're committing the transaction, which will
	 * ensure we run all delayed refs and only after that, we are
	 * going to clear all tracking information for a clean start.
	 */

	trans = btrfs_join_transaction(fs_info->fs_root);
	if (IS_ERR(trans)) {
		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
		return PTR_ERR(trans);
	}
3406
	ret = btrfs_commit_transaction(trans);
3407 3408 3409 3410 3411 3412 3413
	if (ret) {
		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
		return ret;
	}

	qgroup_rescan_zero_tracking(fs_info);

3414 3415
	mutex_lock(&fs_info->qgroup_rescan_lock);
	fs_info->qgroup_rescan_running = true;
3416 3417
	btrfs_queue_work(fs_info->qgroup_rescan_workers,
			 &fs_info->qgroup_rescan_work);
3418
	mutex_unlock(&fs_info->qgroup_rescan_lock);
J
Jan Schmidt 已提交
3419 3420 3421

	return 0;
}
3422

3423 3424
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
				     bool interruptible)
3425 3426 3427 3428 3429
{
	int running;
	int ret = 0;

	mutex_lock(&fs_info->qgroup_rescan_lock);
3430
	running = fs_info->qgroup_rescan_running;
3431 3432
	mutex_unlock(&fs_info->qgroup_rescan_lock);

3433 3434 3435 3436
	if (!running)
		return 0;

	if (interruptible)
3437 3438
		ret = wait_for_completion_interruptible(
					&fs_info->qgroup_rescan_completion);
3439 3440
	else
		wait_for_completion(&fs_info->qgroup_rescan_completion);
3441 3442 3443

	return ret;
}
3444 3445 3446 3447 3448 3449 3450 3451

/*
 * this is only called from open_ctree where we're still single threaded, thus
 * locking is omitted here.
 */
void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
{
3452 3453 3454
	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
		mutex_lock(&fs_info->qgroup_rescan_lock);
		fs_info->qgroup_rescan_running = true;
3455 3456
		btrfs_queue_work(fs_info->qgroup_rescan_workers,
				 &fs_info->qgroup_rescan_work);
3457 3458
		mutex_unlock(&fs_info->qgroup_rescan_lock);
	}
3459
}
3460

3461 3462 3463 3464 3465 3466 3467 3468 3469
#define rbtree_iterate_from_safe(node, next, start)				\
       for (node = start; node && ({ next = rb_next(node); 1;}); node = next)

static int qgroup_unreserve_range(struct btrfs_inode *inode,
				  struct extent_changeset *reserved, u64 start,
				  u64 len)
{
	struct rb_node *node;
	struct rb_node *next;
3470
	struct ulist_node *entry;
3471 3472 3473
	int ret = 0;

	node = reserved->range_changed.root.rb_node;
3474 3475
	if (!node)
		return 0;
3476 3477 3478 3479 3480
	while (node) {
		entry = rb_entry(node, struct ulist_node, rb_node);
		if (entry->val < start)
			node = node->rb_right;
		else
3481
			node = node->rb_left;
3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523
	}

	if (entry->val > start && rb_prev(&entry->rb_node))
		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
				 rb_node);

	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
		u64 entry_start;
		u64 entry_end;
		u64 entry_len;
		int clear_ret;

		entry = rb_entry(node, struct ulist_node, rb_node);
		entry_start = entry->val;
		entry_end = entry->aux;
		entry_len = entry_end - entry_start + 1;

		if (entry_start >= start + len)
			break;
		if (entry_start + entry_len <= start)
			continue;
		/*
		 * Now the entry is in [start, start + len), revert the
		 * EXTENT_QGROUP_RESERVED bit.
		 */
		clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
					      entry_end, EXTENT_QGROUP_RESERVED);
		if (!ret && clear_ret < 0)
			ret = clear_ret;

		ulist_del(&reserved->range_changed, entry->val, entry->aux);
		if (likely(reserved->bytes_changed >= entry_len)) {
			reserved->bytes_changed -= entry_len;
		} else {
			WARN_ON(1);
			reserved->bytes_changed = 0;
		}
	}

	return ret;
}

3524
/*
3525
 * Try to free some space for qgroup.
3526
 *
3527 3528 3529 3530 3531
 * For qgroup, there are only 3 ways to free qgroup space:
 * - Flush nodatacow write
 *   Any nodatacow write will free its reserved data space at run_delalloc_range().
 *   In theory, we should only flush nodatacow inodes, but it's not yet
 *   possible, so we need to flush the whole root.
3532
 *
3533 3534 3535 3536
 * - Wait for ordered extents
 *   When ordered extents are finished, their reserved metadata is finally
 *   converted to per_trans status, which can be freed by later commit
 *   transaction.
3537
 *
3538 3539 3540 3541
 * - Commit transaction
 *   This would free the meta_per_trans space.
 *   In theory this shouldn't provide much space, but any more qgroup space
 *   is needed.
3542
 */
3543 3544 3545 3546 3547
static int try_flush_qgroup(struct btrfs_root *root)
{
	struct btrfs_trans_handle *trans;
	int ret;

3548
	/* Can't hold an open transaction or we run the risk of deadlocking. */
3549 3550
	ASSERT(current->journal_info == NULL);
	if (WARN_ON(current->journal_info))
3551
		return 0;
3552

3553 3554 3555 3556 3557 3558 3559 3560 3561 3562
	/*
	 * We don't want to run flush again and again, so if there is a running
	 * one, we won't try to start a new flush, but exit directly.
	 */
	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
		wait_event(root->qgroup_flush_wait,
			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
		return 0;
	}

3563
	ret = btrfs_start_delalloc_snapshot(root, true);
3564 3565 3566 3567 3568 3569 3570 3571 3572 3573
	if (ret < 0)
		goto out;
	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

3574
	ret = btrfs_commit_transaction(trans);
3575 3576 3577 3578 3579 3580 3581
out:
	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
	wake_up(&root->qgroup_flush_wait);
	return ret;
}

static int qgroup_reserve_data(struct btrfs_inode *inode,
3582 3583
			struct extent_changeset **reserved_ret, u64 start,
			u64 len)
3584
{
3585
	struct btrfs_root *root = inode->root;
3586
	struct extent_changeset *reserved;
3587
	bool new_reserved = false;
3588 3589
	u64 orig_reserved;
	u64 to_reserve;
3590 3591
	int ret;

3592
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
3593
	    !is_fstree(root->root_key.objectid) || len == 0)
3594 3595
		return 0;

3596 3597 3598 3599
	/* @reserved parameter is mandatory for qgroup */
	if (WARN_ON(!reserved_ret))
		return -EINVAL;
	if (!*reserved_ret) {
3600
		new_reserved = true;
3601 3602 3603 3604 3605 3606 3607
		*reserved_ret = extent_changeset_alloc();
		if (!*reserved_ret)
			return -ENOMEM;
	}
	reserved = *reserved_ret;
	/* Record already reserved space */
	orig_reserved = reserved->bytes_changed;
3608
	ret = set_record_extent_bits(&inode->io_tree, start,
3609 3610 3611 3612
			start + len -1, EXTENT_QGROUP_RESERVED, reserved);

	/* Newly reserved space */
	to_reserve = reserved->bytes_changed - orig_reserved;
3613
	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
3614
					to_reserve, QGROUP_RESERVE);
3615
	if (ret < 0)
3616
		goto out;
3617
	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
3618 3619 3620 3621 3622 3623
	if (ret < 0)
		goto cleanup;

	return ret;

cleanup:
3624 3625 3626
	qgroup_unreserve_range(inode, reserved, start, len);
out:
	if (new_reserved) {
3627
		extent_changeset_free(reserved);
3628 3629
		*reserved_ret = NULL;
	}
3630 3631
	return ret;
}
3632

3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660
/*
 * Reserve qgroup space for range [start, start + len).
 *
 * This function will either reserve space from related qgroups or do nothing
 * if the range is already reserved.
 *
 * Return 0 for successful reservation
 * Return <0 for error (including -EQUOT)
 *
 * NOTE: This function may sleep for memory allocation, dirty page flushing and
 *	 commit transaction. So caller should not hold any dirty page locked.
 */
int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
			struct extent_changeset **reserved_ret, u64 start,
			u64 len)
{
	int ret;

	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
	if (ret <= 0 && ret != -EDQUOT)
		return ret;

	ret = try_flush_qgroup(inode->root);
	if (ret < 0)
		return ret;
	return qgroup_reserve_data(inode, reserved_ret, start, len);
}

3661
/* Free ranges specified by @reserved, normally in error path */
3662
static int qgroup_free_reserved_data(struct btrfs_inode *inode,
3663 3664
			struct extent_changeset *reserved, u64 start, u64 len)
{
3665
	struct btrfs_root *root = inode->root;
3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700
	struct ulist_node *unode;
	struct ulist_iterator uiter;
	struct extent_changeset changeset;
	int freed = 0;
	int ret;

	extent_changeset_init(&changeset);
	len = round_up(start + len, root->fs_info->sectorsize);
	start = round_down(start, root->fs_info->sectorsize);

	ULIST_ITER_INIT(&uiter);
	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
		u64 range_start = unode->val;
		/* unode->aux is the inclusive end */
		u64 range_len = unode->aux - range_start + 1;
		u64 free_start;
		u64 free_len;

		extent_changeset_release(&changeset);

		/* Only free range in range [start, start + len) */
		if (range_start >= start + len ||
		    range_start + range_len <= start)
			continue;
		free_start = max(range_start, start);
		free_len = min(start + len, range_start + range_len) -
			   free_start;
		/*
		 * TODO: To also modify reserved->ranges_reserved to reflect
		 * the modification.
		 *
		 * However as long as we free qgroup reserved according to
		 * EXTENT_QGROUP_RESERVED, we won't double free.
		 * So not need to rush.
		 */
3701 3702
		ret = clear_record_extent_bits(&inode->io_tree, free_start,
				free_start + free_len - 1,
3703 3704 3705 3706 3707
				EXTENT_QGROUP_RESERVED, &changeset);
		if (ret < 0)
			goto out;
		freed += changeset.bytes_changed;
	}
3708
	btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
3709
				  BTRFS_QGROUP_RSV_DATA);
3710 3711 3712 3713 3714 3715
	ret = freed;
out:
	extent_changeset_release(&changeset);
	return ret;
}

3716
static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
3717 3718
			struct extent_changeset *reserved, u64 start, u64 len,
			int free)
3719 3720
{
	struct extent_changeset changeset;
3721
	int trace_op = QGROUP_RELEASE;
3722 3723
	int ret;

3724
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags))
3725 3726
		return 0;

3727 3728 3729
	/* In release case, we shouldn't have @reserved */
	WARN_ON(!free && reserved);
	if (free && reserved)
3730
		return qgroup_free_reserved_data(inode, reserved, start, len);
3731
	extent_changeset_init(&changeset);
3732 3733
	ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
				       EXTENT_QGROUP_RESERVED, &changeset);
3734 3735 3736
	if (ret < 0)
		goto out;

3737
	if (free)
3738
		trace_op = QGROUP_FREE;
3739
	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
3740
					changeset.bytes_changed, trace_op);
3741
	if (free)
3742 3743
		btrfs_qgroup_free_refroot(inode->root->fs_info,
				inode->root->root_key.objectid,
3744
				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3745
	ret = changeset.bytes_changed;
3746
out:
3747
	extent_changeset_release(&changeset);
3748 3749 3750 3751 3752 3753 3754 3755
	return ret;
}

/*
 * Free a reserved space range from io_tree and related qgroups
 *
 * Should be called when a range of pages get invalidated before reaching disk.
 * Or for error cleanup case.
3756 3757
 * if @reserved is given, only reserved range in [@start, @start + @len) will
 * be freed.
3758 3759 3760 3761 3762
 *
 * For data written to disk, use btrfs_qgroup_release_data().
 *
 * NOTE: This function may sleep for memory allocation.
 */
3763
int btrfs_qgroup_free_data(struct btrfs_inode *inode,
3764
			struct extent_changeset *reserved, u64 start, u64 len)
3765
{
3766
	return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783
}

/*
 * Release a reserved space range from io_tree only.
 *
 * Should be called when a range of pages get written to disk and corresponding
 * FILE_EXTENT is inserted into corresponding root.
 *
 * Since new qgroup accounting framework will only update qgroup numbers at
 * commit_transaction() time, its reserved space shouldn't be freed from
 * related qgroups.
 *
 * But we should release the range from io_tree, to allow further write to be
 * COWed.
 *
 * NOTE: This function may sleep for memory allocation.
 */
3784
int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
3785
{
3786
	return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3787
}
3788

3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
			      enum btrfs_qgroup_rsv_type type)
{
	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
		return;
	if (num_bytes == 0)
		return;

	spin_lock(&root->qgroup_meta_rsv_lock);
	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
		root->qgroup_meta_rsv_prealloc += num_bytes;
	else
		root->qgroup_meta_rsv_pertrans += num_bytes;
	spin_unlock(&root->qgroup_meta_rsv_lock);
}

static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
			     enum btrfs_qgroup_rsv_type type)
{
	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
		return 0;
	if (num_bytes == 0)
		return 0;

	spin_lock(&root->qgroup_meta_rsv_lock);
	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
				  num_bytes);
		root->qgroup_meta_rsv_prealloc -= num_bytes;
	} else {
		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
				  num_bytes);
		root->qgroup_meta_rsv_pertrans -= num_bytes;
	}
	spin_unlock(&root->qgroup_meta_rsv_lock);
	return num_bytes;
}

3829 3830
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
			      enum btrfs_qgroup_rsv_type type, bool enforce)
3831
{
3832
	struct btrfs_fs_info *fs_info = root->fs_info;
3833 3834
	int ret;

3835
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3836
	    !is_fstree(root->root_key.objectid) || num_bytes == 0)
3837 3838
		return 0;

3839
	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3840
	trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
3841
	ret = qgroup_reserve(root, num_bytes, enforce, type);
3842 3843
	if (ret < 0)
		return ret;
3844 3845 3846 3847 3848 3849 3850 3851 3852
	/*
	 * Record what we have reserved into root.
	 *
	 * To avoid quota disabled->enabled underflow.
	 * In that case, we may try to free space we haven't reserved
	 * (since quota was disabled), so record what we reserved into root.
	 * And ensure later release won't underflow this number.
	 */
	add_root_meta_rsv(root, num_bytes, type);
3853 3854 3855
	return ret;
}

3856 3857 3858 3859 3860
int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
				enum btrfs_qgroup_rsv_type type, bool enforce)
{
	int ret;

3861
	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
3862 3863 3864 3865 3866 3867
	if (ret <= 0 && ret != -EDQUOT)
		return ret;

	ret = try_flush_qgroup(root);
	if (ret < 0)
		return ret;
3868
	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
3869 3870
}

3871
void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
3872
{
3873
	struct btrfs_fs_info *fs_info = root->fs_info;
3874

3875
	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3876
	    !is_fstree(root->root_key.objectid))
3877 3878
		return;

3879
	/* TODO: Update trace point to handle such free */
3880
	trace_qgroup_meta_free_all_pertrans(root);
3881
	/* Special value -1 means to free all reserved space */
3882
	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
3883
				  BTRFS_QGROUP_RSV_META_PERTRANS);
3884 3885
}

3886 3887
void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
			      enum btrfs_qgroup_rsv_type type)
3888
{
3889 3890 3891
	struct btrfs_fs_info *fs_info = root->fs_info;

	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3892
	    !is_fstree(root->root_key.objectid))
3893 3894
		return;

3895 3896 3897 3898 3899 3900
	/*
	 * reservation for META_PREALLOC can happen before quota is enabled,
	 * which can lead to underflow.
	 * Here ensure we will only free what we really have reserved.
	 */
	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
3901
	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3902
	trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
3903 3904
	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
				  num_bytes, type);
3905
}
3906

3907 3908 3909 3910 3911 3912 3913 3914 3915 3916
static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
				int num_bytes)
{
	struct btrfs_qgroup *qgroup;
	struct ulist_node *unode;
	struct ulist_iterator uiter;
	int ret = 0;

	if (num_bytes == 0)
		return;
3917
	if (!fs_info->quota_root)
3918 3919 3920 3921 3922 3923 3924 3925
		return;

	spin_lock(&fs_info->qgroup_lock);
	qgroup = find_qgroup_rb(fs_info, ref_root);
	if (!qgroup)
		goto out;
	ulist_reinit(fs_info->qgroup_ulist);
	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3926
		       qgroup_to_aux(qgroup), GFP_ATOMIC);
3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942
	if (ret < 0)
		goto out;
	ULIST_ITER_INIT(&uiter);
	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
		struct btrfs_qgroup *qg;
		struct btrfs_qgroup_list *glist;

		qg = unode_aux_to_qgroup(unode);

		qgroup_rsv_release(fs_info, qg, num_bytes,
				BTRFS_QGROUP_RSV_META_PREALLOC);
		qgroup_rsv_add(fs_info, qg, num_bytes,
				BTRFS_QGROUP_RSV_META_PERTRANS);
		list_for_each_entry(glist, &qg->groups, next_group) {
			ret = ulist_add(fs_info->qgroup_ulist,
					glist->group->qgroupid,
3943
					qgroup_to_aux(glist->group), GFP_ATOMIC);
3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956
			if (ret < 0)
				goto out;
		}
	}
out:
	spin_unlock(&fs_info->qgroup_lock);
}

void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
{
	struct btrfs_fs_info *fs_info = root->fs_info;

	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3957
	    !is_fstree(root->root_key.objectid))
3958
		return;
3959 3960 3961
	/* Same as btrfs_qgroup_free_meta_prealloc() */
	num_bytes = sub_root_meta_rsv(root, num_bytes,
				      BTRFS_QGROUP_RSV_META_PREALLOC);
3962
	trace_qgroup_meta_convert(root, num_bytes);
3963
	qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
3964 3965
}

3966
/*
3967
 * Check qgroup reserved space leaking, normally at destroy inode
3968 3969
 * time
 */
3970
void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
3971 3972 3973 3974 3975 3976
{
	struct extent_changeset changeset;
	struct ulist_node *unode;
	struct ulist_iterator iter;
	int ret;

3977
	extent_changeset_init(&changeset);
3978
	ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
3979
			EXTENT_QGROUP_RESERVED, &changeset);
3980 3981 3982 3983

	WARN_ON(ret < 0);
	if (WARN_ON(changeset.bytes_changed)) {
		ULIST_ITER_INIT(&iter);
3984
		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
3985 3986 3987
			btrfs_warn(inode->root->fs_info,
		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
				btrfs_ino(inode), unode->val, unode->aux);
3988
		}
3989 3990
		btrfs_qgroup_free_refroot(inode->root->fs_info,
				inode->root->root_key.objectid,
3991
				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3992

3993
	}
3994
	extent_changeset_release(&changeset);
3995
}
3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050

void btrfs_qgroup_init_swapped_blocks(
	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
{
	int i;

	spin_lock_init(&swapped_blocks->lock);
	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
		swapped_blocks->blocks[i] = RB_ROOT;
	swapped_blocks->swapped = false;
}

/*
 * Delete all swapped blocks record of @root.
 * Every record here means we skipped a full subtree scan for qgroup.
 *
 * Gets called when committing one transaction.
 */
void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
{
	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
	int i;

	swapped_blocks = &root->swapped_blocks;

	spin_lock(&swapped_blocks->lock);
	if (!swapped_blocks->swapped)
		goto out;
	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
		struct rb_root *cur_root = &swapped_blocks->blocks[i];
		struct btrfs_qgroup_swapped_block *entry;
		struct btrfs_qgroup_swapped_block *next;

		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
						     node)
			kfree(entry);
		swapped_blocks->blocks[i] = RB_ROOT;
	}
	swapped_blocks->swapped = false;
out:
	spin_unlock(&swapped_blocks->lock);
}

/*
 * Add subtree roots record into @subvol_root.
 *
 * @subvol_root:	tree root of the subvolume tree get swapped
 * @bg:			block group under balance
 * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
 * @reloc_parent/slot:	pointer to the subtree root in reloc tree
 *			BOTH POINTERS ARE BEFORE TREE SWAP
 * @last_snapshot:	last snapshot generation of the subvolume tree
 */
int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
		struct btrfs_root *subvol_root,
4051
		struct btrfs_block_group *bg,
4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094
		struct extent_buffer *subvol_parent, int subvol_slot,
		struct extent_buffer *reloc_parent, int reloc_slot,
		u64 last_snapshot)
{
	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
	struct btrfs_qgroup_swapped_block *block;
	struct rb_node **cur;
	struct rb_node *parent = NULL;
	int level = btrfs_header_level(subvol_parent) - 1;
	int ret = 0;

	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
		return 0;

	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
		btrfs_err_rl(fs_info,
		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
			__func__,
			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
		return -EUCLEAN;
	}

	block = kmalloc(sizeof(*block), GFP_NOFS);
	if (!block) {
		ret = -ENOMEM;
		goto out;
	}

	/*
	 * @reloc_parent/slot is still before swap, while @block is going to
	 * record the bytenr after swap, so we do the swap here.
	 */
	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
							     reloc_slot);
	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
							    subvol_slot);
	block->last_snapshot = last_snapshot;
	block->level = level;
4095 4096 4097 4098 4099 4100 4101

	/*
	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
	 * no one else can modify tree blocks thus we qgroup will not change
	 * no matter the value of trace_leaf.
	 */
	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151
		block->trace_leaf = true;
	else
		block->trace_leaf = false;
	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);

	/* Insert @block into @blocks */
	spin_lock(&blocks->lock);
	cur = &blocks->blocks[level].rb_node;
	while (*cur) {
		struct btrfs_qgroup_swapped_block *entry;

		parent = *cur;
		entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
				 node);

		if (entry->subvol_bytenr < block->subvol_bytenr) {
			cur = &(*cur)->rb_left;
		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
			cur = &(*cur)->rb_right;
		} else {
			if (entry->subvol_generation !=
					block->subvol_generation ||
			    entry->reloc_bytenr != block->reloc_bytenr ||
			    entry->reloc_generation !=
					block->reloc_generation) {
				/*
				 * Duplicated but mismatch entry found.
				 * Shouldn't happen.
				 *
				 * Marking qgroup inconsistent should be enough
				 * for end users.
				 */
				WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
				ret = -EEXIST;
			}
			kfree(block);
			goto out_unlock;
		}
	}
	rb_link_node(&block->node, parent, cur);
	rb_insert_color(&block->node, &blocks->blocks[level]);
	blocks->swapped = true;
out_unlock:
	spin_unlock(&blocks->lock);
out:
	if (ret < 0)
		fs_info->qgroup_flags |=
			BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
	return ret;
}
4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212

/*
 * Check if the tree block is a subtree root, and if so do the needed
 * delayed subtree trace for qgroup.
 *
 * This is called during btrfs_cow_block().
 */
int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
					 struct btrfs_root *root,
					 struct extent_buffer *subvol_eb)
{
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
	struct btrfs_qgroup_swapped_block *block;
	struct extent_buffer *reloc_eb = NULL;
	struct rb_node *node;
	bool found = false;
	bool swapped = false;
	int level = btrfs_header_level(subvol_eb);
	int ret = 0;
	int i;

	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
		return 0;
	if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
		return 0;

	spin_lock(&blocks->lock);
	if (!blocks->swapped) {
		spin_unlock(&blocks->lock);
		return 0;
	}
	node = blocks->blocks[level].rb_node;

	while (node) {
		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
		if (block->subvol_bytenr < subvol_eb->start) {
			node = node->rb_left;
		} else if (block->subvol_bytenr > subvol_eb->start) {
			node = node->rb_right;
		} else {
			found = true;
			break;
		}
	}
	if (!found) {
		spin_unlock(&blocks->lock);
		goto out;
	}
	/* Found one, remove it from @blocks first and update blocks->swapped */
	rb_erase(&block->node, &blocks->blocks[level]);
	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
			swapped = true;
			break;
		}
	}
	blocks->swapped = swapped;
	spin_unlock(&blocks->lock);

	/* Read out reloc subtree root */
4213
	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, 0,
4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239
				   block->reloc_generation, block->level,
				   &block->first_key);
	if (IS_ERR(reloc_eb)) {
		ret = PTR_ERR(reloc_eb);
		reloc_eb = NULL;
		goto free_out;
	}
	if (!extent_buffer_uptodate(reloc_eb)) {
		ret = -EIO;
		goto free_out;
	}

	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
			block->last_snapshot, block->trace_leaf);
free_out:
	kfree(block);
	free_extent_buffer(reloc_eb);
out:
	if (ret < 0) {
		btrfs_err_rl(fs_info,
			     "failed to account subtree at bytenr %llu: %d",
			     subvol_eb->start, ret);
		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
	}
	return ret;
}
4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252

void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
{
	struct btrfs_qgroup_extent_record *entry;
	struct btrfs_qgroup_extent_record *next;
	struct rb_root *root;

	root = &trans->delayed_refs.dirty_extent_root;
	rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
		ulist_free(entry->old_roots);
		kfree(entry);
	}
}