ioctl.c 122.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Christoph Hellwig 已提交
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/file.h>
#include <linux/fs.h>
10
#include <linux/fsnotify.h>
C
Christoph Hellwig 已提交
11 12 13 14 15
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
16 17
#include <linux/mount.h>
#include <linux/namei.h>
C
Christoph Hellwig 已提交
18 19
#include <linux/writeback.h>
#include <linux/compat.h>
20
#include <linux/security.h>
C
Christoph Hellwig 已提交
21
#include <linux/xattr.h>
22
#include <linux/mm.h>
23
#include <linux/slab.h>
24
#include <linux/blkdev.h>
25
#include <linux/uuid.h>
26
#include <linux/btrfs.h>
M
Mark Fasheh 已提交
27
#include <linux/uaccess.h>
28
#include <linux/iversion.h>
M
Miklos Szeredi 已提交
29
#include <linux/fileattr.h>
B
Boris Burkov 已提交
30
#include <linux/fsverity.h>
C
Christoph Hellwig 已提交
31 32
#include "ctree.h"
#include "disk-io.h"
33
#include "export.h"
C
Christoph Hellwig 已提交
34 35 36 37
#include "transaction.h"
#include "btrfs_inode.h"
#include "print-tree.h"
#include "volumes.h"
38
#include "locking.h"
39
#include "backref.h"
40
#include "rcu-string.h"
41
#include "send.h"
42
#include "dev-replace.h"
43
#include "props.h"
44
#include "sysfs.h"
J
Josef Bacik 已提交
45
#include "qgroup.h"
46
#include "tree-log.h"
47
#include "compression.h"
48
#include "space-info.h"
49
#include "delalloc-space.h"
50
#include "block-group.h"
51
#include "subpage.h"
C
Christoph Hellwig 已提交
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
#ifdef CONFIG_64BIT
/* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
 * structures are incorrect, as the timespec structure from userspace
 * is 4 bytes too small. We define these alternatives here to teach
 * the kernel about the 32-bit struct packing.
 */
struct btrfs_ioctl_timespec_32 {
	__u64 sec;
	__u32 nsec;
} __attribute__ ((__packed__));

struct btrfs_ioctl_received_subvol_args_32 {
	char	uuid[BTRFS_UUID_SIZE];	/* in */
	__u64	stransid;		/* in */
	__u64	rtransid;		/* out */
	struct btrfs_ioctl_timespec_32 stime; /* in */
	struct btrfs_ioctl_timespec_32 rtime; /* out */
	__u64	flags;			/* in */
	__u64	reserved[16];		/* in */
} __attribute__ ((__packed__));

#define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
				struct btrfs_ioctl_received_subvol_args_32)
#endif

78 79 80 81 82 83 84 85 86 87 88 89 90
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
struct btrfs_ioctl_send_args_32 {
	__s64 send_fd;			/* in */
	__u64 clone_sources_count;	/* in */
	compat_uptr_t clone_sources;	/* in */
	__u64 parent_root;		/* in */
	__u64 flags;			/* in */
	__u64 reserved[4];		/* in */
} __attribute__ ((__packed__));

#define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
			       struct btrfs_ioctl_send_args_32)
#endif
91

92
/* Mask out flags that are inappropriate for the given type of inode. */
93 94
static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
		unsigned int flags)
95
{
96
	if (S_ISDIR(inode->i_mode))
97
		return flags;
98
	else if (S_ISREG(inode->i_mode))
99 100 101 102 103 104
		return flags & ~FS_DIRSYNC_FL;
	else
		return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
}

/*
105 106
 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
 * ioctl.
107
 */
108
static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
109 110
{
	unsigned int iflags = 0;
111
	u32 flags = binode->flags;
B
Boris Burkov 已提交
112
	u32 ro_flags = binode->ro_flags;
113 114 115 116 117 118 119 120 121 122 123 124 125

	if (flags & BTRFS_INODE_SYNC)
		iflags |= FS_SYNC_FL;
	if (flags & BTRFS_INODE_IMMUTABLE)
		iflags |= FS_IMMUTABLE_FL;
	if (flags & BTRFS_INODE_APPEND)
		iflags |= FS_APPEND_FL;
	if (flags & BTRFS_INODE_NODUMP)
		iflags |= FS_NODUMP_FL;
	if (flags & BTRFS_INODE_NOATIME)
		iflags |= FS_NOATIME_FL;
	if (flags & BTRFS_INODE_DIRSYNC)
		iflags |= FS_DIRSYNC_FL;
L
Li Zefan 已提交
126 127
	if (flags & BTRFS_INODE_NODATACOW)
		iflags |= FS_NOCOW_FL;
B
Boris Burkov 已提交
128 129
	if (ro_flags & BTRFS_INODE_RO_VERITY)
		iflags |= FS_VERITY_FL;
L
Li Zefan 已提交
130

131
	if (flags & BTRFS_INODE_NOCOMPRESS)
L
Li Zefan 已提交
132
		iflags |= FS_NOCOMP_FL;
133 134
	else if (flags & BTRFS_INODE_COMPRESS)
		iflags |= FS_COMPR_FL;
135 136 137 138 139 140 141

	return iflags;
}

/*
 * Update inode->i_flags based on the btrfs internal flags.
 */
142
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
143
{
144
	struct btrfs_inode *binode = BTRFS_I(inode);
145
	unsigned int new_fl = 0;
146

147
	if (binode->flags & BTRFS_INODE_SYNC)
148
		new_fl |= S_SYNC;
149
	if (binode->flags & BTRFS_INODE_IMMUTABLE)
150
		new_fl |= S_IMMUTABLE;
151
	if (binode->flags & BTRFS_INODE_APPEND)
152
		new_fl |= S_APPEND;
153
	if (binode->flags & BTRFS_INODE_NOATIME)
154
		new_fl |= S_NOATIME;
155
	if (binode->flags & BTRFS_INODE_DIRSYNC)
156
		new_fl |= S_DIRSYNC;
B
Boris Burkov 已提交
157 158
	if (binode->ro_flags & BTRFS_INODE_RO_VERITY)
		new_fl |= S_VERITY;
159 160

	set_mask_bits(&inode->i_flags,
B
Boris Burkov 已提交
161 162
		      S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC |
		      S_VERITY, new_fl);
163 164
}

165 166 167 168 169
/*
 * Check if @flags are a supported and valid set of FS_*_FL flags and that
 * the old and new flags are not conflicting
 */
static int check_fsflags(unsigned int old_flags, unsigned int flags)
170 171 172 173
{
	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
		      FS_NOATIME_FL | FS_NODUMP_FL | \
		      FS_SYNC_FL | FS_DIRSYNC_FL | \
L
Li Zefan 已提交
174 175
		      FS_NOCOMP_FL | FS_COMPR_FL |
		      FS_NOCOW_FL))
176 177
		return -EOPNOTSUPP;

178
	/* COMPR and NOCOMP on new/old are valid */
179 180 181
	if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
		return -EINVAL;

182 183 184 185 186 187 188 189 190
	if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL))
		return -EINVAL;

	/* NOCOW and compression options are mutually exclusive */
	if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
		return -EINVAL;
	if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
		return -EINVAL;

191 192 193
	return 0;
}

194 195 196 197 198 199 200 201 202
static int check_fsflags_compatible(struct btrfs_fs_info *fs_info,
				    unsigned int flags)
{
	if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL))
		return -EPERM;

	return 0;
}

M
Miklos Szeredi 已提交
203 204 205 206 207
/*
 * Set flags/xflags from the internal inode flags. The remaining items of
 * fsxattr are zeroed.
 */
int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
208
{
M
Miklos Szeredi 已提交
209 210
	struct btrfs_inode *binode = BTRFS_I(d_inode(dentry));

211
	fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(binode));
M
Miklos Szeredi 已提交
212 213 214 215 216 217 218
	return 0;
}

int btrfs_fileattr_set(struct user_namespace *mnt_userns,
		       struct dentry *dentry, struct fileattr *fa)
{
	struct inode *inode = d_inode(dentry);
219
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
220 221
	struct btrfs_inode *binode = BTRFS_I(inode);
	struct btrfs_root *root = binode->root;
222
	struct btrfs_trans_handle *trans;
223
	unsigned int fsflags, old_fsflags;
224
	int ret;
225
	const char *comp = NULL;
226
	u32 binode_flags;
227

L
Li Zefan 已提交
228 229 230
	if (btrfs_root_readonly(root))
		return -EROFS;

M
Miklos Szeredi 已提交
231 232
	if (fileattr_has_fsx(fa))
		return -EOPNOTSUPP;
233

M
Miklos Szeredi 已提交
234
	fsflags = btrfs_mask_fsflags_for_type(inode, fa->flags);
235
	old_fsflags = btrfs_inode_flags_to_fsflags(binode);
236 237
	ret = check_fsflags(old_fsflags, fsflags);
	if (ret)
M
Miklos Szeredi 已提交
238
		return ret;
239

240 241
	ret = check_fsflags_compatible(fs_info, fsflags);
	if (ret)
M
Miklos Szeredi 已提交
242
		return ret;
243

244
	binode_flags = binode->flags;
245
	if (fsflags & FS_SYNC_FL)
246
		binode_flags |= BTRFS_INODE_SYNC;
247
	else
248
		binode_flags &= ~BTRFS_INODE_SYNC;
249
	if (fsflags & FS_IMMUTABLE_FL)
250
		binode_flags |= BTRFS_INODE_IMMUTABLE;
251
	else
252
		binode_flags &= ~BTRFS_INODE_IMMUTABLE;
253
	if (fsflags & FS_APPEND_FL)
254
		binode_flags |= BTRFS_INODE_APPEND;
255
	else
256
		binode_flags &= ~BTRFS_INODE_APPEND;
257
	if (fsflags & FS_NODUMP_FL)
258
		binode_flags |= BTRFS_INODE_NODUMP;
259
	else
260
		binode_flags &= ~BTRFS_INODE_NODUMP;
261
	if (fsflags & FS_NOATIME_FL)
262
		binode_flags |= BTRFS_INODE_NOATIME;
263
	else
264
		binode_flags &= ~BTRFS_INODE_NOATIME;
M
Miklos Szeredi 已提交
265 266 267 268 269

	/* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */
	if (!fa->flags_valid) {
		/* 1 item for the inode */
		trans = btrfs_start_transaction(root, 1);
270 271
		if (IS_ERR(trans))
			return PTR_ERR(trans);
M
Miklos Szeredi 已提交
272 273 274
		goto update_flags;
	}

275
	if (fsflags & FS_DIRSYNC_FL)
276
		binode_flags |= BTRFS_INODE_DIRSYNC;
277
	else
278
		binode_flags &= ~BTRFS_INODE_DIRSYNC;
279
	if (fsflags & FS_NOCOW_FL) {
280
		if (S_ISREG(inode->i_mode)) {
281 282 283 284 285 286
			/*
			 * It's safe to turn csums off here, no extents exist.
			 * Otherwise we want the flag to reflect the real COW
			 * status of the file and will not set it.
			 */
			if (inode->i_size == 0)
287 288
				binode_flags |= BTRFS_INODE_NODATACOW |
						BTRFS_INODE_NODATASUM;
289
		} else {
290
			binode_flags |= BTRFS_INODE_NODATACOW;
291 292 293
		}
	} else {
		/*
294
		 * Revert back under same assumptions as above
295
		 */
296
		if (S_ISREG(inode->i_mode)) {
297
			if (inode->i_size == 0)
298 299
				binode_flags &= ~(BTRFS_INODE_NODATACOW |
						  BTRFS_INODE_NODATASUM);
300
		} else {
301
			binode_flags &= ~BTRFS_INODE_NODATACOW;
302 303
		}
	}
304

305 306 307 308 309
	/*
	 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
	 * flag may be changed automatically if compression code won't make
	 * things smaller.
	 */
310
	if (fsflags & FS_NOCOMP_FL) {
311 312
		binode_flags &= ~BTRFS_INODE_COMPRESS;
		binode_flags |= BTRFS_INODE_NOCOMPRESS;
313
	} else if (fsflags & FS_COMPR_FL) {
314

M
Miklos Szeredi 已提交
315 316
		if (IS_SWAPFILE(inode))
			return -ETXTBSY;
317

318 319
		binode_flags |= BTRFS_INODE_COMPRESS;
		binode_flags &= ~BTRFS_INODE_NOCOMPRESS;
320

321 322 323
		comp = btrfs_compress_type2str(fs_info->compress_type);
		if (!comp || comp[0] == 0)
			comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
L
Li Zefan 已提交
324
	} else {
325
		binode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
326
	}
327

328 329 330 331 332
	/*
	 * 1 for inode item
	 * 2 for properties
	 */
	trans = btrfs_start_transaction(root, 3);
M
Miklos Szeredi 已提交
333 334
	if (IS_ERR(trans))
		return PTR_ERR(trans);
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	if (comp) {
		ret = btrfs_set_prop(trans, inode, "btrfs.compression", comp,
				     strlen(comp), 0);
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto out_end_trans;
		}
	} else {
		ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL,
				     0, 0);
		if (ret && ret != -ENODATA) {
			btrfs_abort_transaction(trans, ret);
			goto out_end_trans;
		}
	}

M
Miklos Szeredi 已提交
352
update_flags:
353
	binode->flags = binode_flags;
354
	btrfs_sync_inode_flags_to_i_flags(inode);
355
	inode_inc_iversion(inode);
356
	inode->i_ctime = current_time(inode);
357
	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
358

359
 out_end_trans:
360
	btrfs_end_transaction(trans);
361
	return ret;
362 363
}

364 365 366
/*
 * Start exclusive operation @type, return true on success
 */
367 368 369
bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
			enum btrfs_exclusive_operation type)
{
370 371 372 373 374 375 376 377 378 379
	bool ret = false;

	spin_lock(&fs_info->super_lock);
	if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
		fs_info->exclusive_operation = type;
		ret = true;
	}
	spin_unlock(&fs_info->super_lock);

	return ret;
380 381
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/*
 * Conditionally allow to enter the exclusive operation in case it's compatible
 * with the running one.  This must be paired with btrfs_exclop_start_unlock and
 * btrfs_exclop_finish.
 *
 * Compatibility:
 * - the same type is already running
 * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
 *   must check the condition first that would allow none -> @type
 */
bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
				 enum btrfs_exclusive_operation type)
{
	spin_lock(&fs_info->super_lock);
	if (fs_info->exclusive_operation == type)
		return true;

	spin_unlock(&fs_info->super_lock);
	return false;
}

void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
{
	spin_unlock(&fs_info->super_lock);
}

408 409
void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
{
410
	spin_lock(&fs_info->super_lock);
411
	WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
412
	spin_unlock(&fs_info->super_lock);
413
	sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
414 415
}

416 417
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
{
A
Al Viro 已提交
418
	struct inode *inode = file_inode(file);
419 420 421

	return put_user(inode->i_generation, arg);
}
C
Christoph Hellwig 已提交
422

423 424
static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
					void __user *arg)
425 426 427 428 429 430 431 432 433 434 435
{
	struct btrfs_device *device;
	struct request_queue *q;
	struct fstrim_range range;
	u64 minlen = ULLONG_MAX;
	u64 num_devices = 0;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

436 437 438 439 440 441 442 443
	/*
	 * btrfs_trim_block_group() depends on space cache, which is not
	 * available in zoned filesystem. So, disallow fitrim on a zoned
	 * filesystem for now.
	 */
	if (btrfs_is_zoned(fs_info))
		return -EOPNOTSUPP;

444 445 446 447 448 449 450 451 452 453
	/*
	 * If the fs is mounted with nologreplay, which requires it to be
	 * mounted in RO mode as well, we can not allow discard on free space
	 * inside block groups, because log trees refer to extents that are not
	 * pinned in a block group's free space cache (pinning the extents is
	 * precisely the first phase of replaying a log tree).
	 */
	if (btrfs_test_opt(fs_info, NOLOGREPLAY))
		return -EROFS;

454 455 456
	rcu_read_lock();
	list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
				dev_list) {
457 458 459 460 461
		if (!device->bdev)
			continue;
		q = bdev_get_queue(device->bdev);
		if (blk_queue_discard(q)) {
			num_devices++;
462
			minlen = min_t(u64, q->limits.discard_granularity,
463 464 465
				     minlen);
		}
	}
466
	rcu_read_unlock();
467

468 469 470 471
	if (!num_devices)
		return -EOPNOTSUPP;
	if (copy_from_user(&range, arg, sizeof(range)))
		return -EFAULT;
472 473 474 475 476 477 478

	/*
	 * NOTE: Don't truncate the range using super->total_bytes.  Bytenr of
	 * block group is in the logical address space, which can be any
	 * sectorsize aligned bytenr in  the range [0, U64_MAX].
	 */
	if (range.len < fs_info->sb->s_blocksize)
479
		return -EINVAL;
480 481

	range.minlen = max(range.minlen, minlen);
482
	ret = btrfs_trim_fs(fs_info, &range);
483 484 485 486 487 488 489 490 491
	if (ret < 0)
		return ret;

	if (copy_to_user(arg, &range, sizeof(range)))
		return -EFAULT;

	return 0;
}

492
int __pure btrfs_is_empty_uuid(u8 *uuid)
493
{
C
Chris Mason 已提交
494 495 496 497 498 499 500
	int i;

	for (i = 0; i < BTRFS_UUID_SIZE; i++) {
		if (uuid[i])
			return 0;
	}
	return 1;
501 502
}

503 504
static noinline int create_subvol(struct user_namespace *mnt_userns,
				  struct inode *dir, struct dentry *dentry,
505
				  const char *name, int namelen,
506
				  struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
507
{
508
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
C
Christoph Hellwig 已提交
509 510
	struct btrfs_trans_handle *trans;
	struct btrfs_key key;
511
	struct btrfs_root_item *root_item;
C
Christoph Hellwig 已提交
512 513
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
514
	struct btrfs_root *root = BTRFS_I(dir)->root;
515
	struct btrfs_root *new_root;
516
	struct btrfs_block_rsv block_rsv;
517
	struct timespec64 cur_time = current_time(dir);
518
	struct inode *inode;
C
Christoph Hellwig 已提交
519 520
	int ret;
	int err;
521
	dev_t anon_dev = 0;
C
Christoph Hellwig 已提交
522
	u64 objectid;
523
	u64 index = 0;
C
Christoph Hellwig 已提交
524

525 526 527 528
	root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
	if (!root_item)
		return -ENOMEM;

529
	ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid);
530
	if (ret)
531
		goto fail_free;
532

533 534 535 536
	ret = get_anon_bdev(&anon_dev);
	if (ret < 0)
		goto fail_free;

537 538
	/*
	 * Don't create subvolume whose level is not zero. Or qgroup will be
539
	 * screwed up since it assumes subvolume qgroup's level to be 0.
540
	 */
541 542 543 544
	if (btrfs_qgroup_level(objectid)) {
		ret = -ENOSPC;
		goto fail_free;
	}
545

546
	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
J
Josef Bacik 已提交
547
	/*
548 549
	 * The same as the snapshot creation, please see the comment
	 * of create_snapshot().
J
Josef Bacik 已提交
550
	 */
551
	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
552
	if (ret)
553
		goto fail_free;
554 555 556 557

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
558
		btrfs_subvolume_release_metadata(root, &block_rsv);
559
		goto fail_free;
560 561 562
	}
	trans->block_rsv = &block_rsv;
	trans->bytes_reserved = block_rsv.size;
C
Christoph Hellwig 已提交
563

564
	ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
A
Arne Jansen 已提交
565 566 567
	if (ret)
		goto fail;

568 569
	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
				      BTRFS_NESTING_NORMAL);
570 571 572 573
	if (IS_ERR(leaf)) {
		ret = PTR_ERR(leaf);
		goto fail;
	}
C
Christoph Hellwig 已提交
574 575 576

	btrfs_mark_buffer_dirty(leaf);

577
	inode_item = &root_item->inode;
578 579 580
	btrfs_set_stack_inode_generation(inode_item, 1);
	btrfs_set_stack_inode_size(inode_item, 3);
	btrfs_set_stack_inode_nlink(inode_item, 1);
581
	btrfs_set_stack_inode_nbytes(inode_item,
582
				     fs_info->nodesize);
583
	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
C
Christoph Hellwig 已提交
584

585 586
	btrfs_set_root_flags(root_item, 0);
	btrfs_set_root_limit(root_item, 0);
587
	btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
588

589 590 591 592 593 594
	btrfs_set_root_bytenr(root_item, leaf->start);
	btrfs_set_root_generation(root_item, trans->transid);
	btrfs_set_root_level(root_item, 0);
	btrfs_set_root_refs(root_item, 1);
	btrfs_set_root_used(root_item, leaf->len);
	btrfs_set_root_last_snapshot(root_item, 0);
C
Christoph Hellwig 已提交
595

596 597
	btrfs_set_root_generation_v2(root_item,
			btrfs_root_generation(root_item));
598
	generate_random_guid(root_item->uuid);
599 600 601 602 603
	btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
	btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
	root_item->ctime = root_item->otime;
	btrfs_set_root_ctransid(root_item, trans->transid);
	btrfs_set_root_otransid(root_item, trans->transid);
C
Christoph Hellwig 已提交
604

605
	btrfs_tree_unlock(leaf);
C
Christoph Hellwig 已提交
606

607
	btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
C
Christoph Hellwig 已提交
608 609

	key.objectid = objectid;
610
	key.offset = 0;
611
	key.type = BTRFS_ROOT_ITEM_KEY;
612
	ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
613
				root_item);
614 615 616 617 618 619 620 621 622 623 624
	if (ret) {
		/*
		 * Since we don't abort the transaction in this case, free the
		 * tree block so that we don't leak space and leave the
		 * filesystem in an inconsistent state (an extent item in the
		 * extent tree without backreferences). Also no need to have
		 * the tree block locked since it is not in any tree at this
		 * point, so no other task can find it and use it.
		 */
		btrfs_free_tree_block(trans, root, leaf, 0, 1);
		free_extent_buffer(leaf);
C
Christoph Hellwig 已提交
625
		goto fail;
626 627 628 629
	}

	free_extent_buffer(leaf);
	leaf = NULL;
C
Christoph Hellwig 已提交
630

631
	key.offset = (u64)-1;
632
	new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
633
	if (IS_ERR(new_root)) {
634
		free_anon_bdev(anon_dev);
635
		ret = PTR_ERR(new_root);
636
		btrfs_abort_transaction(trans, ret);
637 638
		goto fail;
	}
639 640
	/* Freeing will be done in btrfs_put_root() of new_root */
	anon_dev = 0;
641

642 643 644 645 646 647
	ret = btrfs_record_root_in_trans(trans, new_root);
	if (ret) {
		btrfs_put_root(new_root);
		btrfs_abort_transaction(trans, ret);
		goto fail;
	}
648

649
	ret = btrfs_create_subvol_root(trans, new_root, root, mnt_userns);
650
	btrfs_put_root(new_root);
651 652
	if (ret) {
		/* We potentially lose an unused inode item here */
653
		btrfs_abort_transaction(trans, ret);
654 655 656
		goto fail;
	}

C
Christoph Hellwig 已提交
657 658 659
	/*
	 * insert the directory item
	 */
660
	ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
661
	if (ret) {
662
		btrfs_abort_transaction(trans, ret);
663 664
		goto fail;
	}
665

666
	ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
667
				    BTRFS_FT_DIR, index);
668
	if (ret) {
669
		btrfs_abort_transaction(trans, ret);
C
Christoph Hellwig 已提交
670
		goto fail;
671
	}
672

673
	btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
674
	ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
675 676 677 678
	if (ret) {
		btrfs_abort_transaction(trans, ret);
		goto fail;
	}
679

680
	ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
681
				 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
682 683 684 685
	if (ret) {
		btrfs_abort_transaction(trans, ret);
		goto fail;
	}
C
Christoph Hellwig 已提交
686

687
	ret = btrfs_uuid_tree_add(trans, root_item->uuid,
688
				  BTRFS_UUID_KEY_SUBVOL, objectid);
689
	if (ret)
690
		btrfs_abort_transaction(trans, ret);
691

C
Christoph Hellwig 已提交
692
fail:
693
	kfree(root_item);
694 695
	trans->block_rsv = NULL;
	trans->bytes_reserved = 0;
696
	btrfs_subvolume_release_metadata(root, &block_rsv);
697

698
	err = btrfs_commit_transaction(trans);
C
Christoph Hellwig 已提交
699 700
	if (err && !ret)
		ret = err;
701

702 703
	if (!ret) {
		inode = btrfs_lookup_dentry(dir, dentry);
704 705
		if (IS_ERR(inode))
			return PTR_ERR(inode);
706 707
		d_instantiate(dentry, inode);
	}
C
Christoph Hellwig 已提交
708
	return ret;
709 710

fail_free:
711 712
	if (anon_dev)
		free_anon_bdev(anon_dev);
713 714
	kfree(root_item);
	return ret;
C
Christoph Hellwig 已提交
715 716
}

717
static int create_snapshot(struct btrfs_root *root, struct inode *dir,
718
			   struct dentry *dentry, bool readonly,
719
			   struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
720
{
721
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
722
	struct inode *inode;
C
Christoph Hellwig 已提交
723 724
	struct btrfs_pending_snapshot *pending_snapshot;
	struct btrfs_trans_handle *trans;
725
	int ret;
C
Christoph Hellwig 已提交
726

727
	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
C
Christoph Hellwig 已提交
728 729
		return -EINVAL;

730 731 732 733 734 735
	if (atomic_read(&root->nr_swapfiles)) {
		btrfs_warn(fs_info,
			   "cannot snapshot subvolume with active swapfile");
		return -ETXTBSY;
	}

736
	pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
737 738 739
	if (!pending_snapshot)
		return -ENOMEM;

740 741 742
	ret = get_anon_bdev(&pending_snapshot->anon_dev);
	if (ret < 0)
		goto free_pending;
743
	pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
744
			GFP_KERNEL);
745 746
	pending_snapshot->path = btrfs_alloc_path();
	if (!pending_snapshot->root_item || !pending_snapshot->path) {
747 748 749 750
		ret = -ENOMEM;
		goto free_pending;
	}

751 752
	btrfs_init_block_rsv(&pending_snapshot->block_rsv,
			     BTRFS_BLOCK_RSV_TEMP);
753 754 755 756 757 758
	/*
	 * 1 - parent dir inode
	 * 2 - dir entries
	 * 1 - root item
	 * 2 - root ref/backref
	 * 1 - root of snapshot
759
	 * 1 - UUID item
760 761
	 */
	ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
762
					&pending_snapshot->block_rsv, 8,
763
					false);
764
	if (ret)
765
		goto free_pending;
766

767
	pending_snapshot->dentry = dentry;
C
Christoph Hellwig 已提交
768
	pending_snapshot->root = root;
L
Li Zefan 已提交
769
	pending_snapshot->readonly = readonly;
770
	pending_snapshot->dir = dir;
771
	pending_snapshot->inherit = inherit;
772

773
	trans = btrfs_start_transaction(root, 0);
774 775 776 777 778
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto fail;
	}

779
	spin_lock(&fs_info->trans_lock);
C
Christoph Hellwig 已提交
780 781
	list_add(&pending_snapshot->list,
		 &trans->transaction->pending_snapshots);
782
	spin_unlock(&fs_info->trans_lock);
783 784

	ret = btrfs_commit_transaction(trans);
785
	if (ret)
786
		goto fail;
787 788 789 790 791

	ret = pending_snapshot->error;
	if (ret)
		goto fail;

792 793 794 795
	ret = btrfs_orphan_cleanup(pending_snapshot->snap);
	if (ret)
		goto fail;

796
	inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
797 798 799 800
	if (IS_ERR(inode)) {
		ret = PTR_ERR(inode);
		goto fail;
	}
801

802 803
	d_instantiate(dentry, inode);
	ret = 0;
804
	pending_snapshot->anon_dev = 0;
805
fail:
806 807 808
	/* Prevent double freeing of anon_dev */
	if (ret && pending_snapshot->snap)
		pending_snapshot->snap->anon_dev = 0;
809
	btrfs_put_root(pending_snapshot->snap);
810
	btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
811
free_pending:
812 813
	if (pending_snapshot->anon_dev)
		free_anon_bdev(pending_snapshot->anon_dev);
814
	kfree(pending_snapshot->root_item);
815
	btrfs_free_path(pending_snapshot->path);
816 817
	kfree(pending_snapshot);

C
Christoph Hellwig 已提交
818 819 820
	return ret;
}

821 822 823 824 825 826 827 828 829 830 831
/*  copy of may_delete in fs/namei.c()
 *	Check whether we can remove a link victim from directory dir, check
 *  whether the type of victim is right.
 *  1. We can't do it if dir is read-only (done in permission())
 *  2. We should have write and exec permissions on dir
 *  3. We can't remove anything from append-only dir
 *  4. We can't do anything with immutable dir (done in permission())
 *  5. If the sticky bit on dir is set we should either
 *	a. be owner of dir, or
 *	b. be owner of victim, or
 *	c. have CAP_FOWNER capability
832
 *  6. If the victim is append-only or immutable we can't do anything with
833 834 835 836 837 838 839 840
 *     links pointing to it.
 *  7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
 *  8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
 *  9. We can't remove a root or mountpoint.
 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
 *     nfs_async_unlink().
 */

841 842
static int btrfs_may_delete(struct user_namespace *mnt_userns,
			    struct inode *dir, struct dentry *victim, int isdir)
843 844 845
{
	int error;

846
	if (d_really_is_negative(victim))
847 848
		return -ENOENT;

849
	BUG_ON(d_inode(victim->d_parent) != dir);
850
	audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
851

852
	error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
853 854 855 856
	if (error)
		return error;
	if (IS_APPEND(dir))
		return -EPERM;
857
	if (check_sticky(mnt_userns, dir, d_inode(victim)) ||
858 859
	    IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) ||
	    IS_SWAPFILE(d_inode(victim)))
860 861
		return -EPERM;
	if (isdir) {
862
		if (!d_is_dir(victim))
863 864 865
			return -ENOTDIR;
		if (IS_ROOT(victim))
			return -EBUSY;
866
	} else if (d_is_dir(victim))
867 868 869 870 871 872 873 874
		return -EISDIR;
	if (IS_DEADDIR(dir))
		return -ENOENT;
	if (victim->d_flags & DCACHE_NFSFS_RENAMED)
		return -EBUSY;
	return 0;
}

875
/* copy of may_create in fs/namei.c() */
876 877
static inline int btrfs_may_create(struct user_namespace *mnt_userns,
				   struct inode *dir, struct dentry *child)
878
{
879
	if (d_really_is_positive(child))
880 881 882
		return -EEXIST;
	if (IS_DEADDIR(dir))
		return -ENOENT;
883
	if (!fsuidgid_has_mapping(dir->i_sb, mnt_userns))
884
		return -EOVERFLOW;
885
	return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
886 887 888 889 890 891 892
}

/*
 * Create a new subvolume below @parent.  This is largely modeled after
 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
 * inside this filesystem so it's quite a bit simpler.
 */
A
Al Viro 已提交
893
static noinline int btrfs_mksubvol(const struct path *parent,
894
				   struct user_namespace *mnt_userns,
895
				   const char *name, int namelen,
S
Sage Weil 已提交
896
				   struct btrfs_root *snap_src,
897
				   bool readonly,
898
				   struct btrfs_qgroup_inherit *inherit)
899
{
900 901
	struct inode *dir = d_inode(parent->dentry);
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
902 903 904
	struct dentry *dentry;
	int error;

905 906 907
	error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (error == -EINTR)
		return error;
908

909
	dentry = lookup_one(mnt_userns, name, parent->dentry, namelen);
910 911 912 913
	error = PTR_ERR(dentry);
	if (IS_ERR(dentry))
		goto out_unlock;

914
	error = btrfs_may_create(mnt_userns, dir, dentry);
915
	if (error)
916
		goto out_dput;
917

C
Chris Mason 已提交
918 919 920 921 922 923 924 925 926 927
	/*
	 * even if this name doesn't exist, we may get hash collisions.
	 * check for them now when we can safely fail
	 */
	error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
					       dir->i_ino, name,
					       namelen);
	if (error)
		goto out_dput;

928
	down_read(&fs_info->subvol_sem);
929 930 931 932

	if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
		goto out_up_read;

933 934 935
	if (snap_src)
		error = create_snapshot(snap_src, dir, dentry, readonly, inherit);
	else
936
		error = create_subvol(mnt_userns, dir, dentry, name, namelen, inherit);
937

938 939 940
	if (!error)
		fsnotify_mkdir(dir, dentry);
out_up_read:
941
	up_read(&fs_info->subvol_sem);
942 943 944
out_dput:
	dput(dentry);
out_unlock:
945
	btrfs_inode_unlock(dir, 0);
946 947 948
	return error;
}

949
static noinline int btrfs_mksnapshot(const struct path *parent,
950
				   struct user_namespace *mnt_userns,
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
				   const char *name, int namelen,
				   struct btrfs_root *root,
				   bool readonly,
				   struct btrfs_qgroup_inherit *inherit)
{
	int ret;
	bool snapshot_force_cow = false;

	/*
	 * Force new buffered writes to reserve space even when NOCOW is
	 * possible. This is to avoid later writeback (running dealloc) to
	 * fallback to COW mode and unexpectedly fail with ENOSPC.
	 */
	btrfs_drew_read_lock(&root->snapshot_lock);

966
	ret = btrfs_start_delalloc_snapshot(root, false);
967 968 969 970 971 972 973 974 975 976 977 978 979
	if (ret)
		goto out;

	/*
	 * All previous writes have started writeback in NOCOW mode, so now
	 * we force future writes to fallback to COW mode during snapshot
	 * creation.
	 */
	atomic_inc(&root->snapshot_force_cow);
	snapshot_force_cow = true;

	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);

980
	ret = btrfs_mksubvol(parent, mnt_userns, name, namelen,
981 982 983 984 985 986 987 988
			     root, readonly, inherit);
out:
	if (snapshot_force_cow)
		atomic_dec(&root->snapshot_force_cow);
	btrfs_drew_read_unlock(&root->snapshot_lock);
	return ret;
}

989 990
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
					       bool locked)
991 992
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
L
Li Zefan 已提交
993 994
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct extent_map *em;
995
	const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
996

L
Li Zefan 已提交
997 998 999 1000
	/*
	 * hopefully we have this extent in the tree already, try without
	 * the full extent lock
	 */
1001
	read_lock(&em_tree->lock);
1002
	em = lookup_extent_mapping(em_tree, start, sectorsize);
1003 1004
	read_unlock(&em_tree->lock);

L
Li Zefan 已提交
1005
	if (!em) {
1006
		struct extent_state *cached = NULL;
1007
		u64 end = start + sectorsize - 1;
1008

L
Li Zefan 已提交
1009
		/* get the big lock and read metadata off disk */
1010 1011
		if (!locked)
			lock_extent_bits(io_tree, start, end, &cached);
1012
		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, sectorsize);
1013 1014
		if (!locked)
			unlock_extent_cached(io_tree, start, end, &cached);
L
Li Zefan 已提交
1015 1016 1017 1018 1019 1020 1021

		if (IS_ERR(em))
			return NULL;
	}

	return em;
}
1022

1023 1024
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
				     bool locked)
L
Li Zefan 已提交
1025 1026 1027 1028 1029 1030 1031 1032
{
	struct extent_map *next;
	bool ret = true;

	/* this is the last extent */
	if (em->start + em->len >= i_size_read(inode))
		return false;

1033
	next = defrag_lookup_extent(inode, em->start + em->len, locked);
1034 1035 1036
	if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
		ret = false;
	else if ((em->block_start + em->block_len == next->block_start) &&
1037
		 (em->block_len > SZ_128K && next->block_len > SZ_128K))
L
Li Zefan 已提交
1038 1039 1040
		ret = false;

	free_extent_map(next);
1041 1042 1043
	return ret;
}

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
/*
 * Prepare one page to be defragged.
 *
 * This will ensure:
 *
 * - Returned page is locked and has been set up properly.
 * - No ordered extent exists in the page.
 * - The page is uptodate.
 *
 * NOTE: Caller should also wait for page writeback after the cluster is
 * prepared, here we don't do writeback wait for each page.
 */
static struct page *defrag_prepare_one_page(struct btrfs_inode *inode,
					    pgoff_t index)
{
	struct address_space *mapping = inode->vfs_inode.i_mapping;
	gfp_t mask = btrfs_alloc_write_mask(mapping);
	u64 page_start = (u64)index << PAGE_SHIFT;
	u64 page_end = page_start + PAGE_SIZE - 1;
	struct extent_state *cached_state = NULL;
	struct page *page;
	int ret;

again:
	page = find_or_create_page(mapping, index, mask);
	if (!page)
		return ERR_PTR(-ENOMEM);

	ret = set_page_extent_mapped(page);
	if (ret < 0) {
		unlock_page(page);
		put_page(page);
		return ERR_PTR(ret);
	}

	/* Wait for any existing ordered extent in the range */
	while (1) {
		struct btrfs_ordered_extent *ordered;

		lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
		ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
		unlock_extent_cached(&inode->io_tree, page_start, page_end,
				     &cached_state);
		if (!ordered)
			break;

		unlock_page(page);
		btrfs_start_ordered_extent(ordered, 1);
		btrfs_put_ordered_extent(ordered);
		lock_page(page);
		/*
		 * We unlocked the page above, so we need check if it was
		 * released or not.
		 */
		if (page->mapping != mapping || !PagePrivate(page)) {
			unlock_page(page);
			put_page(page);
			goto again;
		}
	}

	/*
	 * Now the page range has no ordered extent any more.  Read the page to
	 * make it uptodate.
	 */
	if (!PageUptodate(page)) {
		btrfs_readpage(NULL, page);
		lock_page(page);
		if (page->mapping != mapping || !PagePrivate(page)) {
			unlock_page(page);
			put_page(page);
			goto again;
		}
		if (!PageUptodate(page)) {
			unlock_page(page);
			put_page(page);
			return ERR_PTR(-EIO);
		}
	}
	return page;
}

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
struct defrag_target_range {
	struct list_head list;
	u64 start;
	u64 len;
};

/*
 * Collect all valid target extents.
 *
 * @start:	   file offset to lookup
 * @len:	   length to lookup
 * @extent_thresh: file extent size threshold, any extent size >= this value
 *		   will be ignored
 * @newer_than:    only defrag extents newer than this value
 * @do_compress:   whether the defrag is doing compression
 *		   if true, @extent_thresh will be ignored and all regular
 *		   file extents meeting @newer_than will be targets.
1143
 * @locked:	   if the range has already held extent lock
1144 1145 1146 1147 1148
 * @target_list:   list of targets file extents
 */
static int defrag_collect_targets(struct btrfs_inode *inode,
				  u64 start, u64 len, u32 extent_thresh,
				  u64 newer_than, bool do_compress,
1149
				  bool locked, struct list_head *target_list)
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
{
	u64 cur = start;
	int ret = 0;

	while (cur < start + len) {
		struct extent_map *em;
		struct defrag_target_range *new;
		bool next_mergeable = true;
		u64 range_len;

1160
		em = defrag_lookup_extent(&inode->vfs_inode, cur, locked);
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
		if (!em)
			break;

		/* Skip hole/inline/preallocated extents */
		if (em->block_start >= EXTENT_MAP_LAST_BYTE ||
		    test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			goto next;

		/* Skip older extent */
		if (em->generation < newer_than)
			goto next;

		/*
		 * For do_compress case, we want to compress all valid file
		 * extents, thus no @extent_thresh or mergeable check.
		 */
		if (do_compress)
			goto add;

		/* Skip too large extent */
		if (em->len >= extent_thresh)
			goto next;

1184 1185
		next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
							  locked);
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
		if (!next_mergeable) {
			struct defrag_target_range *last;

			/* Empty target list, no way to merge with last entry */
			if (list_empty(target_list))
				goto next;
			last = list_entry(target_list->prev,
					  struct defrag_target_range, list);
			/* Not mergeable with last entry */
			if (last->start + last->len != cur)
				goto next;

			/* Mergeable, fall through to add it to @target_list. */
		}

add:
		range_len = min(extent_map_end(em), start + len) - cur;
		/*
		 * This one is a good target, check if it can be merged into
		 * last range of the target list.
		 */
		if (!list_empty(target_list)) {
			struct defrag_target_range *last;

			last = list_entry(target_list->prev,
					  struct defrag_target_range, list);
			ASSERT(last->start + last->len <= cur);
			if (last->start + last->len == cur) {
				/* Mergeable, enlarge the last entry */
				last->len += range_len;
				goto next;
			}
			/* Fall through to allocate a new entry */
		}

		/* Allocate new defrag_target_range */
		new = kmalloc(sizeof(*new), GFP_NOFS);
		if (!new) {
			free_extent_map(em);
			ret = -ENOMEM;
			break;
		}
		new->start = cur;
		new->len = range_len;
		list_add_tail(&new->list, target_list);

next:
		cur = extent_map_end(em);
		free_extent_map(em);
	}
	if (ret < 0) {
		struct defrag_target_range *entry;
		struct defrag_target_range *tmp;

		list_for_each_entry_safe(entry, tmp, target_list, list) {
			list_del_init(&entry->list);
			kfree(entry);
		}
	}
	return ret;
}

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
#define CLUSTER_SIZE	(SZ_256K)

/*
 * Defrag one contiguous target range.
 *
 * @inode:	target inode
 * @target:	target range to defrag
 * @pages:	locked pages covering the defrag range
 * @nr_pages:	number of locked pages
 *
 * Caller should ensure:
 *
 * - Pages are prepared
 *   Pages should be locked, no ordered extent in the pages range,
 *   no writeback.
 *
 * - Extent bits are locked
 */
static int defrag_one_locked_target(struct btrfs_inode *inode,
				    struct defrag_target_range *target,
				    struct page **pages, int nr_pages,
				    struct extent_state **cached_state)
{
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
	struct extent_changeset *data_reserved = NULL;
	const u64 start = target->start;
	const u64 len = target->len;
	unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
	unsigned long start_index = start >> PAGE_SHIFT;
	unsigned long first_index = page_index(pages[0]);
	int ret = 0;
	int i;

	ASSERT(last_index - first_index + 1 <= nr_pages);

	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
	if (ret < 0)
		return ret;
	clear_extent_bit(&inode->io_tree, start, start + len - 1,
			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
			 EXTENT_DEFRAG, 0, 0, cached_state);
	set_extent_defrag(&inode->io_tree, start, start + len - 1, cached_state);

	/* Update the page status */
	for (i = start_index - first_index; i <= last_index - first_index; i++) {
		ClearPageChecked(pages[i]);
		btrfs_page_clamp_set_dirty(fs_info, pages[i], start, len);
	}
	btrfs_delalloc_release_extents(inode, len);
	extent_changeset_free(data_reserved);

	return ret;
}

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
			    u32 extent_thresh, u64 newer_than, bool do_compress)
{
	struct extent_state *cached_state = NULL;
	struct defrag_target_range *entry;
	struct defrag_target_range *tmp;
	LIST_HEAD(target_list);
	struct page **pages;
	const u32 sectorsize = inode->root->fs_info->sectorsize;
	u64 last_index = (start + len - 1) >> PAGE_SHIFT;
	u64 start_index = start >> PAGE_SHIFT;
	unsigned int nr_pages = last_index - start_index + 1;
	int ret = 0;
	int i;

	ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));

	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
	if (!pages)
		return -ENOMEM;

	/* Prepare all pages */
	for (i = 0; i < nr_pages; i++) {
		pages[i] = defrag_prepare_one_page(inode, start_index + i);
		if (IS_ERR(pages[i])) {
			ret = PTR_ERR(pages[i]);
			pages[i] = NULL;
			goto free_pages;
		}
	}
	for (i = 0; i < nr_pages; i++)
		wait_on_page_writeback(pages[i]);

	/* Lock the pages range */
	lock_extent_bits(&inode->io_tree, start_index << PAGE_SHIFT,
			 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
			 &cached_state);
	/*
	 * Now we have a consistent view about the extent map, re-check
	 * which range really needs to be defragged.
	 *
	 * And this time we have extent locked already, pass @locked = true
	 * so that we won't relock the extent range and cause deadlock.
	 */
	ret = defrag_collect_targets(inode, start, len, extent_thresh,
				     newer_than, do_compress, true,
				     &target_list);
	if (ret < 0)
		goto unlock_extent;

	list_for_each_entry(entry, &target_list, list) {
		ret = defrag_one_locked_target(inode, entry, pages, nr_pages,
					       &cached_state);
		if (ret < 0)
			break;
	}

	list_for_each_entry_safe(entry, tmp, &target_list, list) {
		list_del_init(&entry->list);
		kfree(entry);
	}
unlock_extent:
	unlock_extent_cached(&inode->io_tree, start_index << PAGE_SHIFT,
			     (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
			     &cached_state);
free_pages:
	for (i = 0; i < nr_pages; i++) {
		if (pages[i]) {
			unlock_page(pages[i]);
			put_page(pages[i]);
		}
	}
	kfree(pages);
	return ret;
}

1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
static int defrag_one_cluster(struct btrfs_inode *inode,
			      struct file_ra_state *ra,
			      u64 start, u32 len, u32 extent_thresh,
			      u64 newer_than, bool do_compress,
			      unsigned long *sectors_defragged,
			      unsigned long max_sectors)
{
	const u32 sectorsize = inode->root->fs_info->sectorsize;
	struct defrag_target_range *entry;
	struct defrag_target_range *tmp;
	LIST_HEAD(target_list);
	int ret;

	BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
	ret = defrag_collect_targets(inode, start, len, extent_thresh,
				     newer_than, do_compress, false,
				     &target_list);
	if (ret < 0)
		goto out;

	list_for_each_entry(entry, &target_list, list) {
		u32 range_len = entry->len;

		/* Reached the limit */
		if (max_sectors && max_sectors == *sectors_defragged)
			break;

		if (max_sectors)
			range_len = min_t(u32, range_len,
				(max_sectors - *sectors_defragged) * sectorsize);

		if (ra)
			page_cache_sync_readahead(inode->vfs_inode.i_mapping,
				ra, NULL, entry->start >> PAGE_SHIFT,
				((entry->start + range_len - 1) >> PAGE_SHIFT) -
				(entry->start >> PAGE_SHIFT) + 1);
		/*
		 * Here we may not defrag any range if holes are punched before
		 * we locked the pages.
		 * But that's fine, it only affects the @sectors_defragged
		 * accounting.
		 */
		ret = defrag_one_range(inode, entry->start, range_len,
				       extent_thresh, newer_than, do_compress);
		if (ret < 0)
			break;
		*sectors_defragged += range_len;
	}
out:
	list_for_each_entry_safe(entry, tmp, &target_list, list) {
		list_del_init(&entry->list);
		kfree(entry);
	}
	return ret;
}

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
/*
 * Entry point to file defragmentation.
 *
 * @inode:	   inode to be defragged
 * @ra:		   readahead state (can be NUL)
 * @range:	   defrag options including range and flags
 * @newer_than:	   minimum transid to defrag
 * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
 *		   will be defragged.
 */
int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
C
Chris Mason 已提交
1446 1447 1448
		      struct btrfs_ioctl_defrag_range_args *range,
		      u64 newer_than, unsigned long max_to_defrag)
{
1449
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1450
	unsigned long sectors_defragged = 0;
1451
	u64 isize = i_size_read(inode);
1452 1453
	u64 cur;
	u64 last_byte;
1454
	bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1455
	bool ra_allocated = false;
1456 1457 1458
	int compress_type = BTRFS_COMPRESS_ZLIB;
	int ret = 0;
	u32 extent_thresh = range->extent_thresh;
C
Chris Mason 已提交
1459

1460 1461 1462 1463 1464
	if (isize == 0)
		return 0;

	if (range->start >= isize)
		return -EINVAL;
1465

1466
	if (do_compress) {
1467
		if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
1468 1469 1470 1471
			return -EINVAL;
		if (range->compress_type)
			compress_type = range->compress_type;
	}
C
Christoph Hellwig 已提交
1472

1473
	if (extent_thresh == 0)
1474
		extent_thresh = SZ_256K;
1475

1476 1477 1478 1479 1480 1481 1482 1483
	if (range->start + range->len > range->start) {
		/* Got a specific range */
		last_byte = min(isize, range->start + range->len) - 1;
	} else {
		/* Defrag until file end */
		last_byte = isize - 1;
	}

C
Chris Mason 已提交
1484
	/*
1485
	 * If we were not given a ra, allocate a readahead context. As
1486 1487
	 * readahead is just an optimization, defrag will work without it so
	 * we don't error out.
C
Chris Mason 已提交
1488
	 */
1489 1490
	if (!ra) {
		ra_allocated = true;
1491
		ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1492 1493
		if (ra)
			file_ra_state_init(ra, inode->i_mapping);
C
Chris Mason 已提交
1494 1495
	}

1496 1497 1498
	/* Align the range */
	cur = round_down(range->start, fs_info->sectorsize);
	last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
1499

1500 1501
	while (cur < last_byte) {
		u64 cluster_end;
1502

1503 1504
		/* The cluster size 256K should always be page aligned */
		BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
1505

1506 1507 1508 1509
		/* We want the cluster end at page boundary when possible */
		cluster_end = (((cur >> PAGE_SHIFT) +
			       (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
		cluster_end = min(cluster_end, last_byte);
1510

1511
		btrfs_inode_lock(inode, 0);
1512 1513
		if (IS_SWAPFILE(inode)) {
			ret = -ETXTBSY;
1514 1515
			btrfs_inode_unlock(inode, 0);
			break;
1516
		}
1517
		if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
1518
			btrfs_inode_unlock(inode, 0);
1519
			break;
1520
		}
1521 1522 1523 1524 1525 1526
		if (do_compress)
			BTRFS_I(inode)->defrag_compress = compress_type;
		ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
				cluster_end + 1 - cur, extent_thresh,
				newer_than, do_compress,
				&sectors_defragged, max_to_defrag);
1527
		btrfs_inode_unlock(inode, 0);
1528 1529 1530
		if (ret < 0)
			break;
		cur = cluster_end + 1;
C
Christoph Hellwig 已提交
1531 1532
	}

1533 1534 1535 1536 1537 1538 1539 1540
	if (ra_allocated)
		kfree(ra);
	if (sectors_defragged) {
		/*
		 * We have defragged some sectors, for compression case they
		 * need to be written back immediately.
		 */
		if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
1541
			filemap_flush(inode->i_mapping);
1542 1543 1544 1545 1546 1547 1548 1549 1550
			if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
				     &BTRFS_I(inode)->runtime_flags))
				filemap_flush(inode->i_mapping);
		}
		if (range->compress_type == BTRFS_COMPRESS_LZO)
			btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
		else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
			btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
		ret = sectors_defragged;
1551
	}
1552
	if (do_compress) {
1553
		btrfs_inode_lock(inode, 0);
1554
		BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1555
		btrfs_inode_unlock(inode, 0);
1556
	}
1557
	return ret;
C
Christoph Hellwig 已提交
1558 1559
}

1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
/*
 * Try to start exclusive operation @type or cancel it if it's running.
 *
 * Return:
 *   0        - normal mode, newly claimed op started
 *  >0        - normal mode, something else is running,
 *              return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS to user space
 * ECANCELED  - cancel mode, successful cancel
 * ENOTCONN   - cancel mode, operation not running anymore
 */
static int exclop_start_or_cancel_reloc(struct btrfs_fs_info *fs_info,
			enum btrfs_exclusive_operation type, bool cancel)
{
	if (!cancel) {
		/* Start normal op */
		if (!btrfs_exclop_start(fs_info, type))
			return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
		/* Exclusive operation is now claimed */
		return 0;
	}

	/* Cancel running op */
	if (btrfs_exclop_start_try_lock(fs_info, type)) {
		/*
		 * This blocks any exclop finish from setting it to NONE, so we
		 * request cancellation. Either it runs and we will wait for it,
		 * or it has finished and no waiting will happen.
		 */
		atomic_inc(&fs_info->reloc_cancel_req);
		btrfs_exclop_start_unlock(fs_info);

		if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
			wait_on_bit(&fs_info->flags, BTRFS_FS_RELOC_RUNNING,
				    TASK_INTERRUPTIBLE);

		return -ECANCELED;
	}

	/* Something else is running or none */
	return -ENOTCONN;
}

1602
static noinline int btrfs_ioctl_resize(struct file *file,
1603
					void __user *arg)
C
Christoph Hellwig 已提交
1604
{
1605 1606
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
1607 1608 1609
	u64 new_size;
	u64 old_size;
	u64 devid = 1;
1610
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Christoph Hellwig 已提交
1611 1612 1613 1614
	struct btrfs_ioctl_vol_args *vol_args;
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device = NULL;
	char *sizestr;
1615
	char *retptr;
C
Christoph Hellwig 已提交
1616 1617 1618
	char *devstr = NULL;
	int ret = 0;
	int mod = 0;
D
David Sterba 已提交
1619
	bool cancel;
C
Christoph Hellwig 已提交
1620

1621 1622 1623
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

1624 1625 1626 1627
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

D
David Sterba 已提交
1628 1629 1630 1631
	/*
	 * Read the arguments before checking exclusivity to be able to
	 * distinguish regular resize and cancel
	 */
L
Li Zefan 已提交
1632
	vol_args = memdup_user(arg, sizeof(*vol_args));
1633 1634
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
David Sterba 已提交
1635
		goto out_drop;
1636
	}
1637
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
C
Christoph Hellwig 已提交
1638
	sizestr = vol_args->name;
D
David Sterba 已提交
1639 1640 1641 1642 1643 1644
	cancel = (strcmp("cancel", sizestr) == 0);
	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_RESIZE, cancel);
	if (ret)
		goto out_free;
	/* Exclusive operation is now claimed */

C
Christoph Hellwig 已提交
1645 1646 1647 1648 1649
	devstr = strchr(sizestr, ':');
	if (devstr) {
		sizestr = devstr + 1;
		*devstr = '\0';
		devstr = vol_args->name;
1650 1651
		ret = kstrtoull(devstr, 10, &devid);
		if (ret)
D
David Sterba 已提交
1652
			goto out_finish;
1653 1654
		if (!devid) {
			ret = -EINVAL;
D
David Sterba 已提交
1655
			goto out_finish;
1656
		}
1657
		btrfs_info(fs_info, "resizing devid %llu", devid);
C
Christoph Hellwig 已提交
1658
	}
M
Miao Xie 已提交
1659

1660
	device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
C
Christoph Hellwig 已提交
1661
	if (!device) {
1662 1663
		btrfs_info(fs_info, "resizer unable to find device %llu",
			   devid);
1664
		ret = -ENODEV;
D
David Sterba 已提交
1665
		goto out_finish;
C
Christoph Hellwig 已提交
1666
	}
M
Miao Xie 已提交
1667

1668
	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1669
		btrfs_info(fs_info,
1670
			   "resizer unable to apply on readonly device %llu",
1671
		       devid);
1672
		ret = -EPERM;
D
David Sterba 已提交
1673
		goto out_finish;
L
Liu Bo 已提交
1674 1675
	}

C
Christoph Hellwig 已提交
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
	if (!strcmp(sizestr, "max"))
		new_size = device->bdev->bd_inode->i_size;
	else {
		if (sizestr[0] == '-') {
			mod = -1;
			sizestr++;
		} else if (sizestr[0] == '+') {
			mod = 1;
			sizestr++;
		}
1686 1687
		new_size = memparse(sizestr, &retptr);
		if (*retptr != '\0' || new_size == 0) {
C
Christoph Hellwig 已提交
1688
			ret = -EINVAL;
D
David Sterba 已提交
1689
			goto out_finish;
C
Christoph Hellwig 已提交
1690 1691 1692
		}
	}

1693
	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1694
		ret = -EPERM;
D
David Sterba 已提交
1695
		goto out_finish;
1696 1697
	}

1698
	old_size = btrfs_device_get_total_bytes(device);
C
Christoph Hellwig 已提交
1699 1700 1701 1702

	if (mod < 0) {
		if (new_size > old_size) {
			ret = -EINVAL;
D
David Sterba 已提交
1703
			goto out_finish;
C
Christoph Hellwig 已提交
1704 1705 1706
		}
		new_size = old_size - new_size;
	} else if (mod > 0) {
1707
		if (new_size > ULLONG_MAX - old_size) {
1708
			ret = -ERANGE;
D
David Sterba 已提交
1709
			goto out_finish;
1710
		}
C
Christoph Hellwig 已提交
1711 1712 1713
		new_size = old_size + new_size;
	}

1714
	if (new_size < SZ_256M) {
C
Christoph Hellwig 已提交
1715
		ret = -EINVAL;
D
David Sterba 已提交
1716
		goto out_finish;
C
Christoph Hellwig 已提交
1717 1718 1719
	}
	if (new_size > device->bdev->bd_inode->i_size) {
		ret = -EFBIG;
D
David Sterba 已提交
1720
		goto out_finish;
C
Christoph Hellwig 已提交
1721 1722
	}

1723
	new_size = round_down(new_size, fs_info->sectorsize);
C
Christoph Hellwig 已提交
1724 1725

	if (new_size > old_size) {
1726
		trans = btrfs_start_transaction(root, 0);
1727 1728
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
D
David Sterba 已提交
1729
			goto out_finish;
1730
		}
C
Christoph Hellwig 已提交
1731
		ret = btrfs_grow_device(trans, device, new_size);
1732
		btrfs_commit_transaction(trans);
1733
	} else if (new_size < old_size) {
C
Christoph Hellwig 已提交
1734
		ret = btrfs_shrink_device(device, new_size);
1735
	} /* equal, nothing need to do */
C
Christoph Hellwig 已提交
1736

1737 1738 1739 1740 1741
	if (ret == 0 && new_size != old_size)
		btrfs_info_in_rcu(fs_info,
			"resize device %s (devid %llu) from %llu to %llu",
			rcu_str_deref(device->name), device->devid,
			old_size, new_size);
D
David Sterba 已提交
1742 1743
out_finish:
	btrfs_exclop_finish(fs_info);
1744
out_free:
C
Christoph Hellwig 已提交
1745
	kfree(vol_args);
D
David Sterba 已提交
1746
out_drop:
1747
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
1748 1749 1750
	return ret;
}

1751
static noinline int __btrfs_ioctl_snap_create(struct file *file,
1752
				struct user_namespace *mnt_userns,
1753
				const char *name, unsigned long fd, int subvol,
1754
				bool readonly,
1755
				struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
1756 1757
{
	int namelen;
1758
	int ret = 0;
C
Christoph Hellwig 已提交
1759

1760 1761 1762
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1763 1764 1765 1766
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;

S
Sage Weil 已提交
1767 1768
	namelen = strlen(name);
	if (strchr(name, '/')) {
C
Christoph Hellwig 已提交
1769
		ret = -EINVAL;
1770
		goto out_drop_write;
C
Christoph Hellwig 已提交
1771 1772
	}

1773 1774 1775
	if (name[0] == '.' &&
	   (namelen == 1 || (name[1] == '.' && namelen == 2))) {
		ret = -EEXIST;
1776
		goto out_drop_write;
1777 1778
	}

1779
	if (subvol) {
1780 1781
		ret = btrfs_mksubvol(&file->f_path, mnt_userns, name,
				     namelen, NULL, readonly, inherit);
1782
	} else {
1783
		struct fd src = fdget(fd);
1784
		struct inode *src_inode;
1785
		if (!src.file) {
1786
			ret = -EINVAL;
1787
			goto out_drop_write;
1788 1789
		}

A
Al Viro 已提交
1790 1791
		src_inode = file_inode(src.file);
		if (src_inode->i_sb != file_inode(file)->i_sb) {
J
Josef Bacik 已提交
1792
			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1793
				   "Snapshot src from another FS");
1794
			ret = -EXDEV;
1795
		} else if (!inode_owner_or_capable(mnt_userns, src_inode)) {
1796 1797 1798 1799 1800
			/*
			 * Subvolume creation is not restricted, but snapshots
			 * are limited to own subvolumes only
			 */
			ret = -EPERM;
1801
		} else {
1802 1803 1804 1805
			ret = btrfs_mksnapshot(&file->f_path, mnt_userns,
					       name, namelen,
					       BTRFS_I(src_inode)->root,
					       readonly, inherit);
1806
		}
1807
		fdput(src);
1808
	}
1809 1810
out_drop_write:
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
1811
out:
S
Sage Weil 已提交
1812 1813 1814 1815
	return ret;
}

static noinline int btrfs_ioctl_snap_create(struct file *file,
1816
					    void __user *arg, int subvol)
S
Sage Weil 已提交
1817
{
1818
	struct btrfs_ioctl_vol_args *vol_args;
S
Sage Weil 已提交
1819 1820
	int ret;

1821 1822 1823
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1824 1825 1826 1827
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
S
Sage Weil 已提交
1828

1829 1830 1831
	ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
					vol_args->name, vol_args->fd, subvol,
					false, NULL);
1832

1833 1834 1835
	kfree(vol_args);
	return ret;
}
1836

1837 1838 1839 1840 1841
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
					       void __user *arg, int subvol)
{
	struct btrfs_ioctl_vol_args_v2 *vol_args;
	int ret;
L
Li Zefan 已提交
1842
	bool readonly = false;
A
Arne Jansen 已提交
1843
	struct btrfs_qgroup_inherit *inherit = NULL;
1844

1845 1846 1847
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1848 1849 1850 1851
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1852

1853
	if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ARGS_MASK) {
L
Li Zefan 已提交
1854
		ret = -EOPNOTSUPP;
D
Dan Carpenter 已提交
1855
		goto free_args;
S
Sage Weil 已提交
1856
	}
1857

L
Li Zefan 已提交
1858 1859
	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
		readonly = true;
A
Arne Jansen 已提交
1860
	if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1861 1862 1863 1864
		u64 nums;

		if (vol_args->size < sizeof(*inherit) ||
		    vol_args->size > PAGE_SIZE) {
A
Arne Jansen 已提交
1865
			ret = -EINVAL;
D
Dan Carpenter 已提交
1866
			goto free_args;
A
Arne Jansen 已提交
1867 1868 1869 1870
		}
		inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
		if (IS_ERR(inherit)) {
			ret = PTR_ERR(inherit);
D
Dan Carpenter 已提交
1871
			goto free_args;
A
Arne Jansen 已提交
1872
		}
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886

		if (inherit->num_qgroups > PAGE_SIZE ||
		    inherit->num_ref_copies > PAGE_SIZE ||
		    inherit->num_excl_copies > PAGE_SIZE) {
			ret = -EINVAL;
			goto free_inherit;
		}

		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
		       2 * inherit->num_excl_copies;
		if (vol_args->size != struct_size(inherit, qgroups, nums)) {
			ret = -EINVAL;
			goto free_inherit;
		}
A
Arne Jansen 已提交
1887
	}
1888

1889 1890 1891
	ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
					vol_args->name, vol_args->fd, subvol,
					readonly, inherit);
D
Dan Carpenter 已提交
1892 1893 1894
	if (ret)
		goto free_inherit;
free_inherit:
A
Arne Jansen 已提交
1895
	kfree(inherit);
D
Dan Carpenter 已提交
1896 1897
free_args:
	kfree(vol_args);
C
Christoph Hellwig 已提交
1898 1899 1900
	return ret;
}

1901 1902 1903
static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
						void __user *arg)
{
A
Al Viro 已提交
1904
	struct inode *inode = file_inode(file);
1905
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1906 1907 1908 1909
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
	u64 flags = 0;

1910
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
1911 1912
		return -EINVAL;

1913
	down_read(&fs_info->subvol_sem);
1914 1915
	if (btrfs_root_readonly(root))
		flags |= BTRFS_SUBVOL_RDONLY;
1916
	up_read(&fs_info->subvol_sem);
1917 1918 1919 1920 1921 1922 1923 1924 1925 1926

	if (copy_to_user(arg, &flags, sizeof(flags)))
		ret = -EFAULT;

	return ret;
}

static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
					      void __user *arg)
{
A
Al Viro 已提交
1927
	struct inode *inode = file_inode(file);
1928
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1929 1930 1931 1932 1933 1934
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	u64 root_flags;
	u64 flags;
	int ret = 0;

1935
	if (!inode_owner_or_capable(file_mnt_user_ns(file), inode))
1936 1937
		return -EPERM;

1938 1939 1940
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;
1941

1942
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
1943 1944 1945
		ret = -EINVAL;
		goto out_drop_write;
	}
1946

1947 1948 1949 1950
	if (copy_from_user(&flags, arg, sizeof(flags))) {
		ret = -EFAULT;
		goto out_drop_write;
	}
1951

1952 1953 1954 1955
	if (flags & ~BTRFS_SUBVOL_RDONLY) {
		ret = -EOPNOTSUPP;
		goto out_drop_write;
	}
1956

1957
	down_write(&fs_info->subvol_sem);
1958 1959 1960

	/* nothing to do */
	if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1961
		goto out_drop_sem;
1962 1963

	root_flags = btrfs_root_flags(&root->root_item);
1964
	if (flags & BTRFS_SUBVOL_RDONLY) {
1965 1966
		btrfs_set_root_flags(&root->root_item,
				     root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1967 1968 1969 1970 1971 1972 1973 1974
	} else {
		/*
		 * Block RO -> RW transition if this subvolume is involved in
		 * send
		 */
		spin_lock(&root->root_item_lock);
		if (root->send_in_progress == 0) {
			btrfs_set_root_flags(&root->root_item,
1975
				     root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1976 1977 1978
			spin_unlock(&root->root_item_lock);
		} else {
			spin_unlock(&root->root_item_lock);
1979 1980 1981
			btrfs_warn(fs_info,
				   "Attempt to set subvolume %llu read-write during send",
				   root->root_key.objectid);
1982 1983 1984 1985
			ret = -EPERM;
			goto out_drop_sem;
		}
	}
1986 1987 1988 1989 1990 1991 1992

	trans = btrfs_start_transaction(root, 1);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_reset;
	}

1993
	ret = btrfs_update_root(trans, fs_info->tree_root,
1994
				&root->root_key, &root->root_item);
1995 1996 1997 1998 1999 2000
	if (ret < 0) {
		btrfs_end_transaction(trans);
		goto out_reset;
	}

	ret = btrfs_commit_transaction(trans);
2001 2002 2003 2004

out_reset:
	if (ret)
		btrfs_set_root_flags(&root->root_item, root_flags);
2005
out_drop_sem:
2006
	up_write(&fs_info->subvol_sem);
2007 2008 2009
out_drop_write:
	mnt_drop_write_file(file);
out:
2010 2011 2012
	return ret;
}

2013 2014 2015
static noinline int key_in_sk(struct btrfs_key *key,
			      struct btrfs_ioctl_search_key *sk)
{
2016 2017 2018 2019 2020 2021 2022 2023 2024
	struct btrfs_key test;
	int ret;

	test.objectid = sk->min_objectid;
	test.type = sk->min_type;
	test.offset = sk->min_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret < 0)
2025
		return 0;
2026 2027 2028 2029 2030 2031 2032

	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret > 0)
2033 2034 2035 2036
		return 0;
	return 1;
}

2037
static noinline int copy_to_sk(struct btrfs_path *path,
2038 2039
			       struct btrfs_key *key,
			       struct btrfs_ioctl_search_key *sk,
2040
			       size_t *buf_size,
2041
			       char __user *ubuf,
2042 2043 2044 2045 2046 2047
			       unsigned long *sk_offset,
			       int *num_found)
{
	u64 found_transid;
	struct extent_buffer *leaf;
	struct btrfs_ioctl_search_header sh;
2048
	struct btrfs_key test;
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
	unsigned long item_off;
	unsigned long item_len;
	int nritems;
	int i;
	int slot;
	int ret = 0;

	leaf = path->nodes[0];
	slot = path->slots[0];
	nritems = btrfs_header_nritems(leaf);

	if (btrfs_header_generation(leaf) > sk->max_transid) {
		i = nritems;
		goto advance_key;
	}
	found_transid = btrfs_header_generation(leaf);

	for (i = slot; i < nritems; i++) {
		item_off = btrfs_item_ptr_offset(leaf, i);
		item_len = btrfs_item_size_nr(leaf, i);

2070 2071 2072 2073
		btrfs_item_key_to_cpu(leaf, key, i);
		if (!key_in_sk(key, sk))
			continue;

2074
		if (sizeof(sh) + item_len > *buf_size) {
2075 2076 2077 2078 2079 2080 2081 2082 2083 2084
			if (*num_found) {
				ret = 1;
				goto out;
			}

			/*
			 * return one empty item back for v1, which does not
			 * handle -EOVERFLOW
			 */

2085
			*buf_size = sizeof(sh) + item_len;
2086
			item_len = 0;
2087 2088
			ret = -EOVERFLOW;
		}
2089

2090
		if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2091
			ret = 1;
2092
			goto out;
2093 2094 2095 2096 2097 2098 2099 2100
		}

		sh.objectid = key->objectid;
		sh.offset = key->offset;
		sh.type = key->type;
		sh.len = item_len;
		sh.transid = found_transid;

2101 2102 2103 2104 2105 2106 2107 2108
		/*
		 * Copy search result header. If we fault then loop again so we
		 * can fault in the pages and -EFAULT there if there's a
		 * problem. Otherwise we'll fault and then copy the buffer in
		 * properly this next time through
		 */
		if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) {
			ret = 0;
2109 2110 2111
			goto out;
		}

2112 2113 2114
		*sk_offset += sizeof(sh);

		if (item_len) {
2115
			char __user *up = ubuf + *sk_offset;
2116 2117 2118 2119 2120 2121 2122 2123
			/*
			 * Copy the item, same behavior as above, but reset the
			 * * sk_offset so we copy the full thing again.
			 */
			if (read_extent_buffer_to_user_nofault(leaf, up,
						item_off, item_len)) {
				ret = 0;
				*sk_offset -= sizeof(sh);
2124 2125 2126
				goto out;
			}

2127 2128
			*sk_offset += item_len;
		}
2129
		(*num_found)++;
2130

2131 2132 2133
		if (ret) /* -EOVERFLOW from above */
			goto out;

2134 2135 2136 2137
		if (*num_found >= sk->nr_items) {
			ret = 1;
			goto out;
		}
2138 2139
	}
advance_key:
2140
	ret = 0;
2141 2142 2143 2144 2145 2146
	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;
	if (btrfs_comp_cpu_keys(key, &test) >= 0)
		ret = 1;
	else if (key->offset < (u64)-1)
2147
		key->offset++;
2148
	else if (key->type < (u8)-1) {
2149
		key->offset = 0;
2150
		key->type++;
2151
	} else if (key->objectid < (u64)-1) {
2152 2153
		key->offset = 0;
		key->type = 0;
2154
		key->objectid++;
2155 2156
	} else
		ret = 1;
2157
out:
2158 2159 2160 2161 2162 2163 2164 2165 2166
	/*
	 *  0: all items from this leaf copied, continue with next
	 *  1: * more items can be copied, but unused buffer is too small
	 *     * all items were found
	 *     Either way, it will stops the loop which iterates to the next
	 *     leaf
	 *  -EOVERFLOW: item was to large for buffer
	 *  -EFAULT: could not copy extent buffer back to userspace
	 */
2167 2168 2169 2170
	return ret;
}

static noinline int search_ioctl(struct inode *inode,
2171
				 struct btrfs_ioctl_search_key *sk,
2172
				 size_t *buf_size,
2173
				 char __user *ubuf)
2174
{
2175
	struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2176 2177 2178 2179 2180 2181 2182
	struct btrfs_root *root;
	struct btrfs_key key;
	struct btrfs_path *path;
	int ret;
	int num_found = 0;
	unsigned long sk_offset = 0;

2183 2184
	if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
		*buf_size = sizeof(struct btrfs_ioctl_search_header);
2185
		return -EOVERFLOW;
2186
	}
2187

2188 2189 2190 2191 2192 2193
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	if (sk->tree_id == 0) {
		/* search the root of the inode that was passed */
2194
		root = btrfs_grab_root(BTRFS_I(inode)->root);
2195
	} else {
D
David Sterba 已提交
2196
		root = btrfs_get_fs_root(info, sk->tree_id, true);
2197 2198
		if (IS_ERR(root)) {
			btrfs_free_path(path);
2199
			return PTR_ERR(root);
2200 2201 2202 2203 2204 2205 2206
		}
	}

	key.objectid = sk->min_objectid;
	key.type = sk->min_type;
	key.offset = sk->min_offset;

2207
	while (1) {
2208 2209
		ret = fault_in_pages_writeable(ubuf + sk_offset,
					       *buf_size - sk_offset);
2210 2211 2212
		if (ret)
			break;

2213
		ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2214 2215 2216 2217 2218
		if (ret != 0) {
			if (ret > 0)
				ret = 0;
			goto err;
		}
2219
		ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2220
				 &sk_offset, &num_found);
2221
		btrfs_release_path(path);
2222
		if (ret)
2223 2224 2225
			break;

	}
2226 2227
	if (ret > 0)
		ret = 0;
2228 2229
err:
	sk->nr_items = num_found;
2230
	btrfs_put_root(root);
2231 2232 2233 2234 2235 2236 2237
	btrfs_free_path(path);
	return ret;
}

static noinline int btrfs_ioctl_tree_search(struct file *file,
					   void __user *argp)
{
2238 2239
	struct btrfs_ioctl_search_args __user *uargs;
	struct btrfs_ioctl_search_key sk;
2240 2241 2242
	struct inode *inode;
	int ret;
	size_t buf_size;
2243 2244 2245 2246

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2247 2248 2249 2250
	uargs = (struct btrfs_ioctl_search_args __user *)argp;

	if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
		return -EFAULT;
2251

2252
	buf_size = sizeof(uargs->buf);
2253

A
Al Viro 已提交
2254
	inode = file_inode(file);
2255
	ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2256 2257 2258 2259 2260 2261 2262 2263

	/*
	 * In the origin implementation an overflow is handled by returning a
	 * search header with a len of zero, so reset ret.
	 */
	if (ret == -EOVERFLOW)
		ret = 0;

2264
	if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2265 2266 2267 2268
		ret = -EFAULT;
	return ret;
}

G
Gerhard Heift 已提交
2269 2270 2271 2272 2273 2274 2275 2276
static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
					       void __user *argp)
{
	struct btrfs_ioctl_search_args_v2 __user *uarg;
	struct btrfs_ioctl_search_args_v2 args;
	struct inode *inode;
	int ret;
	size_t buf_size;
2277
	const size_t buf_limit = SZ_16M;
G
Gerhard Heift 已提交
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	/* copy search header and buffer size */
	uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
	if (copy_from_user(&args, uarg, sizeof(args)))
		return -EFAULT;

	buf_size = args.buf_size;

	/* limit result size to 16MB */
	if (buf_size > buf_limit)
		buf_size = buf_limit;

	inode = file_inode(file);
	ret = search_ioctl(inode, &args.key, &buf_size,
2295
			   (char __user *)(&uarg->buf[0]));
G
Gerhard Heift 已提交
2296 2297 2298 2299 2300 2301
	if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
		ret = -EFAULT;
	else if (ret == -EOVERFLOW &&
		copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
		ret = -EFAULT;

2302 2303 2304
	return ret;
}

2305
/*
2306 2307 2308
 * Search INODE_REFs to identify path name of 'dirid' directory
 * in a 'tree_id' tree. and sets path name to 'name'.
 */
2309 2310 2311 2312 2313
static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
				u64 tree_id, u64 dirid, char *name)
{
	struct btrfs_root *root;
	struct btrfs_key key;
2314
	char *ptr;
2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
	int ret = -1;
	int slot;
	int len;
	int total_len = 0;
	struct btrfs_inode_ref *iref;
	struct extent_buffer *l;
	struct btrfs_path *path;

	if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
		name[0]='\0';
		return 0;
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2332
	ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2333

D
David Sterba 已提交
2334
	root = btrfs_get_fs_root(info, tree_id, true);
2335
	if (IS_ERR(root)) {
2336
		ret = PTR_ERR(root);
2337 2338 2339
		root = NULL;
		goto out;
	}
2340 2341 2342

	key.objectid = dirid;
	key.type = BTRFS_INODE_REF_KEY;
2343
	key.offset = (u64)-1;
2344

2345
	while (1) {
2346
		ret = btrfs_search_backwards(root, &key, path);
2347 2348
		if (ret < 0)
			goto out;
2349
		else if (ret > 0) {
2350 2351
			ret = -ENOENT;
			goto out;
2352
		}
2353 2354 2355 2356 2357 2358 2359 2360

		l = path->nodes[0];
		slot = path->slots[0];

		iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
		len = btrfs_inode_ref_name_len(l, iref);
		ptr -= len + 1;
		total_len += len + 1;
2361 2362
		if (ptr < name) {
			ret = -ENAMETOOLONG;
2363
			goto out;
2364
		}
2365 2366

		*(ptr + len) = '/';
2367
		read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2368 2369 2370 2371

		if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
			break;

2372
		btrfs_release_path(path);
2373
		key.objectid = key.offset;
2374
		key.offset = (u64)-1;
2375 2376
		dirid = key.objectid;
	}
2377
	memmove(name, ptr, total_len);
2378
	name[total_len] = '\0';
2379 2380
	ret = 0;
out:
2381
	btrfs_put_root(root);
2382
	btrfs_free_path(path);
2383 2384 2385
	return ret;
}

2386 2387
static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
				struct inode *inode,
2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398
				struct btrfs_ioctl_ino_lookup_user_args *args)
{
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
	struct super_block *sb = inode->i_sb;
	struct btrfs_key upper_limit = BTRFS_I(inode)->location;
	u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
	u64 dirid = args->dirid;
	unsigned long item_off;
	unsigned long item_len;
	struct btrfs_inode_ref *iref;
	struct btrfs_root_ref *rref;
2399
	struct btrfs_root *root = NULL;
2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
	struct btrfs_path *path;
	struct btrfs_key key, key2;
	struct extent_buffer *leaf;
	struct inode *temp_inode;
	char *ptr;
	int slot;
	int len;
	int total_len = 0;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	/*
	 * If the bottom subvolume does not exist directly under upper_limit,
	 * construct the path in from the bottom up.
	 */
	if (dirid != upper_limit.objectid) {
		ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];

D
David Sterba 已提交
2421
		root = btrfs_get_fs_root(fs_info, treeid, true);
2422 2423 2424 2425 2426 2427 2428 2429 2430
		if (IS_ERR(root)) {
			ret = PTR_ERR(root);
			goto out;
		}

		key.objectid = dirid;
		key.type = BTRFS_INODE_REF_KEY;
		key.offset = (u64)-1;
		while (1) {
2431 2432 2433 2434 2435
			ret = btrfs_search_backwards(root, &key, path);
			if (ret < 0)
				goto out_put;
			else if (ret > 0) {
				ret = -ENOENT;
2436
				goto out_put;
2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447
			}

			leaf = path->nodes[0];
			slot = path->slots[0];

			iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
			len = btrfs_inode_ref_name_len(leaf, iref);
			ptr -= len + 1;
			total_len += len + 1;
			if (ptr < args->path) {
				ret = -ENAMETOOLONG;
2448
				goto out_put;
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
			}

			*(ptr + len) = '/';
			read_extent_buffer(leaf, ptr,
					(unsigned long)(iref + 1), len);

			/* Check the read+exec permission of this directory */
			ret = btrfs_previous_item(root, path, dirid,
						  BTRFS_INODE_ITEM_KEY);
			if (ret < 0) {
2459
				goto out_put;
2460 2461
			} else if (ret > 0) {
				ret = -ENOENT;
2462
				goto out_put;
2463 2464 2465 2466 2467 2468 2469
			}

			leaf = path->nodes[0];
			slot = path->slots[0];
			btrfs_item_key_to_cpu(leaf, &key2, slot);
			if (key2.objectid != dirid) {
				ret = -ENOENT;
2470
				goto out_put;
2471 2472
			}

D
David Sterba 已提交
2473
			temp_inode = btrfs_iget(sb, key2.objectid, root);
2474 2475
			if (IS_ERR(temp_inode)) {
				ret = PTR_ERR(temp_inode);
2476
				goto out_put;
2477
			}
2478
			ret = inode_permission(mnt_userns, temp_inode,
2479
					       MAY_READ | MAY_EXEC);
2480 2481 2482
			iput(temp_inode);
			if (ret) {
				ret = -EACCES;
2483
				goto out_put;
2484 2485 2486 2487 2488 2489
			}

			if (key.offset == upper_limit.objectid)
				break;
			if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
				ret = -EACCES;
2490
				goto out_put;
2491 2492 2493 2494 2495 2496 2497 2498 2499 2500
			}

			btrfs_release_path(path);
			key.objectid = key.offset;
			key.offset = (u64)-1;
			dirid = key.objectid;
		}

		memmove(args->path, ptr, total_len);
		args->path[total_len] = '\0';
2501
		btrfs_put_root(root);
2502
		root = NULL;
2503 2504 2505 2506 2507 2508 2509
		btrfs_release_path(path);
	}

	/* Get the bottom subvolume's name from ROOT_REF */
	key.objectid = treeid;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = args->treeid;
2510
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536
	if (ret < 0) {
		goto out;
	} else if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	slot = path->slots[0];
	btrfs_item_key_to_cpu(leaf, &key, slot);

	item_off = btrfs_item_ptr_offset(leaf, slot);
	item_len = btrfs_item_size_nr(leaf, slot);
	/* Check if dirid in ROOT_REF corresponds to passed dirid */
	rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
	if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
		ret = -EINVAL;
		goto out;
	}

	/* Copy subvolume's name */
	item_off += sizeof(struct btrfs_root_ref);
	item_len -= sizeof(struct btrfs_root_ref);
	read_extent_buffer(leaf, args->name, item_off, item_len);
	args->name[item_len] = 0;

2537
out_put:
2538
	btrfs_put_root(root);
2539 2540 2541 2542 2543
out:
	btrfs_free_path(path);
	return ret;
}

2544 2545 2546
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
					   void __user *argp)
{
2547 2548
	struct btrfs_ioctl_ino_lookup_args *args;
	struct inode *inode;
2549
	int ret = 0;
2550

J
Julia Lawall 已提交
2551 2552 2553
	args = memdup_user(argp, sizeof(*args));
	if (IS_ERR(args))
		return PTR_ERR(args);
2554

A
Al Viro 已提交
2555
	inode = file_inode(file);
2556

2557 2558 2559 2560
	/*
	 * Unprivileged query to obtain the containing subvolume root id. The
	 * path is reset so it's consistent with btrfs_search_path_in_tree.
	 */
2561 2562 2563
	if (args->treeid == 0)
		args->treeid = BTRFS_I(inode)->root->root_key.objectid;

2564 2565 2566 2567 2568 2569 2570 2571 2572 2573
	if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
		args->name[0] = 0;
		goto out;
	}

	if (!capable(CAP_SYS_ADMIN)) {
		ret = -EPERM;
		goto out;
	}

2574 2575 2576 2577
	ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
					args->treeid, args->objectid,
					args->name);

2578
out:
2579 2580 2581 2582
	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
		ret = -EFAULT;

	kfree(args);
2583 2584 2585
	return ret;
}

2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619
/*
 * Version of ino_lookup ioctl (unprivileged)
 *
 * The main differences from ino_lookup ioctl are:
 *
 *   1. Read + Exec permission will be checked using inode_permission() during
 *      path construction. -EACCES will be returned in case of failure.
 *   2. Path construction will be stopped at the inode number which corresponds
 *      to the fd with which this ioctl is called. If constructed path does not
 *      exist under fd's inode, -EACCES will be returned.
 *   3. The name of bottom subvolume is also searched and filled.
 */
static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_ino_lookup_user_args *args;
	struct inode *inode;
	int ret;

	args = memdup_user(argp, sizeof(*args));
	if (IS_ERR(args))
		return PTR_ERR(args);

	inode = file_inode(file);

	if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
	    BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
		/*
		 * The subvolume does not exist under fd with which this is
		 * called
		 */
		kfree(args);
		return -EACCES;
	}

2620
	ret = btrfs_search_path_in_tree_user(file_mnt_user_ns(file), inode, args);
2621 2622 2623 2624 2625 2626 2627 2628

	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
		ret = -EFAULT;

	kfree(args);
	return ret;
}

2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
/* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_get_subvol_info_args *subvol_info;
	struct btrfs_fs_info *fs_info;
	struct btrfs_root *root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_root_item *root_item;
	struct btrfs_root_ref *rref;
	struct extent_buffer *leaf;
	unsigned long item_off;
	unsigned long item_len;
	struct inode *inode;
	int slot;
	int ret = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
	if (!subvol_info) {
		btrfs_free_path(path);
		return -ENOMEM;
	}

	inode = file_inode(file);
	fs_info = BTRFS_I(inode)->root->fs_info;

	/* Get root_item of inode's subvolume */
	key.objectid = BTRFS_I(inode)->root->root_key.objectid;
D
David Sterba 已提交
2661
	root = btrfs_get_fs_root(fs_info, key.objectid, true);
2662 2663
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
2664 2665
		goto out_free;
	}
2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698
	root_item = &root->root_item;

	subvol_info->treeid = key.objectid;

	subvol_info->generation = btrfs_root_generation(root_item);
	subvol_info->flags = btrfs_root_flags(root_item);

	memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
	memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
						    BTRFS_UUID_SIZE);
	memcpy(subvol_info->received_uuid, root_item->received_uuid,
						    BTRFS_UUID_SIZE);

	subvol_info->ctransid = btrfs_root_ctransid(root_item);
	subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
	subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);

	subvol_info->otransid = btrfs_root_otransid(root_item);
	subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
	subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);

	subvol_info->stransid = btrfs_root_stransid(root_item);
	subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
	subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);

	subvol_info->rtransid = btrfs_root_rtransid(root_item);
	subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
	subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);

	if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
		/* Search root tree for ROOT_BACKREF of this subvolume */
		key.type = BTRFS_ROOT_BACKREF_KEY;
		key.offset = 0;
2699
		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2700 2701 2702 2703
		if (ret < 0) {
			goto out;
		} else if (path->slots[0] >=
			   btrfs_header_nritems(path->nodes[0])) {
2704
			ret = btrfs_next_leaf(fs_info->tree_root, path);
2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
			if (ret < 0) {
				goto out;
			} else if (ret > 0) {
				ret = -EUCLEAN;
				goto out;
			}
		}

		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == subvol_info->treeid &&
		    key.type == BTRFS_ROOT_BACKREF_KEY) {
			subvol_info->parent_id = key.offset;

			rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
			subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);

			item_off = btrfs_item_ptr_offset(leaf, slot)
					+ sizeof(struct btrfs_root_ref);
			item_len = btrfs_item_size_nr(leaf, slot)
					- sizeof(struct btrfs_root_ref);
			read_extent_buffer(leaf, subvol_info->name,
					   item_off, item_len);
		} else {
			ret = -ENOENT;
			goto out;
		}
	}

	if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
		ret = -EFAULT;

out:
2739
	btrfs_put_root(root);
2740
out_free:
2741
	btrfs_free_path(path);
2742
	kfree(subvol_info);
2743 2744 2745
	return ret;
}

2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842
/*
 * Return ROOT_REF information of the subvolume containing this inode
 * except the subvolume name.
 */
static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
	struct btrfs_root_ref *rref;
	struct btrfs_root *root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct inode *inode;
	u64 objectid;
	int slot;
	int ret;
	u8 found;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	rootrefs = memdup_user(argp, sizeof(*rootrefs));
	if (IS_ERR(rootrefs)) {
		btrfs_free_path(path);
		return PTR_ERR(rootrefs);
	}

	inode = file_inode(file);
	root = BTRFS_I(inode)->root->fs_info->tree_root;
	objectid = BTRFS_I(inode)->root->root_key.objectid;

	key.objectid = objectid;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = rootrefs->min_treeid;
	found = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0) {
		goto out;
	} else if (path->slots[0] >=
		   btrfs_header_nritems(path->nodes[0])) {
		ret = btrfs_next_leaf(root, path);
		if (ret < 0) {
			goto out;
		} else if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}
	while (1) {
		leaf = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
			ret = 0;
			goto out;
		}

		if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
			ret = -EOVERFLOW;
			goto out;
		}

		rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
		rootrefs->rootref[found].treeid = key.offset;
		rootrefs->rootref[found].dirid =
				  btrfs_root_ref_dirid(leaf, rref);
		found++;

		ret = btrfs_next_item(root, path);
		if (ret < 0) {
			goto out;
		} else if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}

out:
	if (!ret || ret == -EOVERFLOW) {
		rootrefs->num_items = found;
		/* update min_treeid for next search */
		if (found)
			rootrefs->min_treeid =
				rootrefs->rootref[found - 1].treeid + 1;
		if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
			ret = -EFAULT;
	}

	kfree(rootrefs);
	btrfs_free_path(path);

	return ret;
}

2843
static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2844 2845
					     void __user *arg,
					     bool destroy_v2)
2846
{
A
Al Viro 已提交
2847
	struct dentry *parent = file->f_path.dentry;
2848
	struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2849
	struct dentry *dentry;
2850
	struct inode *dir = d_inode(parent);
2851 2852 2853
	struct inode *inode;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_root *dest = NULL;
2854 2855
	struct btrfs_ioctl_vol_args *vol_args = NULL;
	struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL;
2856
	struct user_namespace *mnt_userns = file_mnt_user_ns(file);
2857 2858
	char *subvol_name, *subvol_name_ptr = NULL;
	int subvol_namelen;
2859
	int err = 0;
2860
	bool destroy_parent = false;
2861

2862 2863 2864 2865
	if (destroy_v2) {
		vol_args2 = memdup_user(arg, sizeof(*vol_args2));
		if (IS_ERR(vol_args2))
			return PTR_ERR(vol_args2);
2866

2867 2868 2869 2870
		if (vol_args2->flags & ~BTRFS_SUBVOL_DELETE_ARGS_MASK) {
			err = -EOPNOTSUPP;
			goto out;
		}
2871

2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883
		/*
		 * If SPEC_BY_ID is not set, we are looking for the subvolume by
		 * name, same as v1 currently does.
		 */
		if (!(vol_args2->flags & BTRFS_SUBVOL_SPEC_BY_ID)) {
			vol_args2->name[BTRFS_SUBVOL_NAME_MAX] = 0;
			subvol_name = vol_args2->name;

			err = mnt_want_write_file(file);
			if (err)
				goto out;
		} else {
2884
			struct inode *old_dir;
2885

2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921
			if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) {
				err = -EINVAL;
				goto out;
			}

			err = mnt_want_write_file(file);
			if (err)
				goto out;

			dentry = btrfs_get_dentry(fs_info->sb,
					BTRFS_FIRST_FREE_OBJECTID,
					vol_args2->subvolid, 0, 0);
			if (IS_ERR(dentry)) {
				err = PTR_ERR(dentry);
				goto out_drop_write;
			}

			/*
			 * Change the default parent since the subvolume being
			 * deleted can be outside of the current mount point.
			 */
			parent = btrfs_get_parent(dentry);

			/*
			 * At this point dentry->d_name can point to '/' if the
			 * subvolume we want to destroy is outsite of the
			 * current mount point, so we need to release the
			 * current dentry and execute the lookup to return a new
			 * one with ->d_name pointing to the
			 * <mount point>/subvol_name.
			 */
			dput(dentry);
			if (IS_ERR(parent)) {
				err = PTR_ERR(parent);
				goto out_drop_write;
			}
2922
			old_dir = dir;
2923 2924 2925 2926 2927 2928 2929 2930 2931 2932
			dir = d_inode(parent);

			/*
			 * If v2 was used with SPEC_BY_ID, a new parent was
			 * allocated since the subvolume can be outside of the
			 * current mount point. Later on we need to release this
			 * new parent dentry.
			 */
			destroy_parent = true;

2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946
			/*
			 * On idmapped mounts, deletion via subvolid is
			 * restricted to subvolumes that are immediate
			 * ancestors of the inode referenced by the file
			 * descriptor in the ioctl. Otherwise the idmapping
			 * could potentially be abused to delete subvolumes
			 * anywhere in the filesystem the user wouldn't be able
			 * to delete without an idmapped mount.
			 */
			if (old_dir != dir && mnt_userns != &init_user_ns) {
				err = -EOPNOTSUPP;
				goto free_parent;
			}

2947 2948 2949 2950 2951 2952
			subvol_name_ptr = btrfs_get_subvol_name_from_objectid(
						fs_info, vol_args2->subvolid);
			if (IS_ERR(subvol_name_ptr)) {
				err = PTR_ERR(subvol_name_ptr);
				goto free_parent;
			}
D
David Sterba 已提交
2953
			/* subvol_name_ptr is already nul terminated */
2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966
			subvol_name = (char *)kbasename(subvol_name_ptr);
		}
	} else {
		vol_args = memdup_user(arg, sizeof(*vol_args));
		if (IS_ERR(vol_args))
			return PTR_ERR(vol_args);

		vol_args->name[BTRFS_PATH_NAME_MAX] = 0;
		subvol_name = vol_args->name;

		err = mnt_want_write_file(file);
		if (err)
			goto out;
2967 2968
	}

2969
	subvol_namelen = strlen(subvol_name);
2970

2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
	if (strchr(subvol_name, '/') ||
	    strncmp(subvol_name, "..", subvol_namelen) == 0) {
		err = -EINVAL;
		goto free_subvol_name;
	}

	if (!S_ISDIR(dir->i_mode)) {
		err = -ENOTDIR;
		goto free_subvol_name;
	}
2981

2982 2983
	err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (err == -EINTR)
2984
		goto free_subvol_name;
2985
	dentry = lookup_one(mnt_userns, subvol_name, parent, subvol_namelen);
2986 2987 2988 2989 2990
	if (IS_ERR(dentry)) {
		err = PTR_ERR(dentry);
		goto out_unlock_dir;
	}

2991
	if (d_really_is_negative(dentry)) {
2992 2993 2994 2995
		err = -ENOENT;
		goto out_dput;
	}

2996
	inode = d_inode(dentry);
2997
	dest = BTRFS_I(inode)->root;
2998
	if (!capable(CAP_SYS_ADMIN)) {
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012
		/*
		 * Regular user.  Only allow this with a special mount
		 * option, when the user has write+exec access to the
		 * subvol root, and when rmdir(2) would have been
		 * allowed.
		 *
		 * Note that this is _not_ check that the subvol is
		 * empty or doesn't contain data that we wouldn't
		 * otherwise be able to delete.
		 *
		 * Users who want to delete empty subvols should try
		 * rmdir(2).
		 */
		err = -EPERM;
3013
		if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026
			goto out_dput;

		/*
		 * Do not allow deletion if the parent dir is the same
		 * as the dir to be deleted.  That means the ioctl
		 * must be called on the dentry referencing the root
		 * of the subvol, not a random directory contained
		 * within it.
		 */
		err = -EINVAL;
		if (root == dest)
			goto out_dput;

3027
		err = inode_permission(mnt_userns, inode, MAY_WRITE | MAY_EXEC);
3028 3029 3030 3031
		if (err)
			goto out_dput;
	}

3032
	/* check if subvolume may be deleted by a user */
3033
	err = btrfs_may_delete(mnt_userns, dir, dentry, 1);
3034 3035 3036
	if (err)
		goto out_dput;

3037
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
3038 3039 3040 3041
		err = -EINVAL;
		goto out_dput;
	}

3042
	btrfs_inode_lock(inode, 0);
3043
	err = btrfs_delete_subvolume(dir, dentry);
3044
	btrfs_inode_unlock(inode, 0);
3045 3046
	if (!err) {
		fsnotify_rmdir(dir, dentry);
3047
		d_delete(dentry);
3048
	}
3049

3050 3051 3052
out_dput:
	dput(dentry);
out_unlock_dir:
3053
	btrfs_inode_unlock(dir, 0);
3054 3055 3056 3057 3058
free_subvol_name:
	kfree(subvol_name_ptr);
free_parent:
	if (destroy_parent)
		dput(parent);
3059
out_drop_write:
A
Al Viro 已提交
3060
	mnt_drop_write_file(file);
3061
out:
3062
	kfree(vol_args2);
3063 3064 3065 3066
	kfree(vol_args);
	return err;
}

C
Chris Mason 已提交
3067
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
C
Christoph Hellwig 已提交
3068
{
A
Al Viro 已提交
3069
	struct inode *inode = file_inode(file);
C
Christoph Hellwig 已提交
3070
	struct btrfs_root *root = BTRFS_I(inode)->root;
3071
	struct btrfs_ioctl_defrag_range_args range = {0};
Y
Yan Zheng 已提交
3072 3073
	int ret;

3074 3075 3076
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
L
Li Zefan 已提交
3077

3078 3079 3080
	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
3081
	}
C
Christoph Hellwig 已提交
3082 3083 3084

	switch (inode->i_mode & S_IFMT) {
	case S_IFDIR:
3085 3086 3087 3088
		if (!capable(CAP_SYS_ADMIN)) {
			ret = -EPERM;
			goto out;
		}
3089
		ret = btrfs_defrag_root(root);
C
Christoph Hellwig 已提交
3090 3091
		break;
	case S_IFREG:
3092 3093 3094 3095 3096 3097
		/*
		 * Note that this does not check the file descriptor for write
		 * access. This prevents defragmenting executables that are
		 * running and allows defrag on files open in read-only mode.
		 */
		if (!capable(CAP_SYS_ADMIN) &&
3098
		    inode_permission(&init_user_ns, inode, MAY_WRITE)) {
3099
			ret = -EPERM;
3100 3101
			goto out;
		}
C
Chris Mason 已提交
3102 3103

		if (argp) {
3104
			if (copy_from_user(&range, argp, sizeof(range))) {
C
Chris Mason 已提交
3105
				ret = -EFAULT;
3106
				goto out;
C
Chris Mason 已提交
3107 3108
			}
			/* compression requires us to start the IO */
3109 3110 3111
			if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
				range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
				range.extent_thresh = (u32)-1;
C
Chris Mason 已提交
3112 3113 3114
			}
		} else {
			/* the rest are all set to zero by kzalloc */
3115
			range.len = (u64)-1;
C
Chris Mason 已提交
3116
		}
3117
		ret = btrfs_defrag_file(file_inode(file), &file->f_ra,
3118
					&range, BTRFS_OLDEST_GENERATION, 0);
C
Chris Mason 已提交
3119 3120
		if (ret > 0)
			ret = 0;
C
Christoph Hellwig 已提交
3121
		break;
3122 3123
	default:
		ret = -EINVAL;
C
Christoph Hellwig 已提交
3124
	}
3125
out:
3126
	mnt_drop_write_file(file);
3127
	return ret;
C
Christoph Hellwig 已提交
3128 3129
}

3130
static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
C
Christoph Hellwig 已提交
3131 3132 3133 3134
{
	struct btrfs_ioctl_vol_args *vol_args;
	int ret;

3135 3136 3137
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3138
	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD))
3139
		return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3140

L
Li Zefan 已提交
3141
	vol_args = memdup_user(arg, sizeof(*vol_args));
3142 3143 3144 3145
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
		goto out;
	}
C
Christoph Hellwig 已提交
3146

3147
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3148
	ret = btrfs_init_new_device(fs_info, vol_args->name);
C
Christoph Hellwig 已提交
3149

A
Anand Jain 已提交
3150
	if (!ret)
3151
		btrfs_info(fs_info, "disk added %s", vol_args->name);
A
Anand Jain 已提交
3152

C
Christoph Hellwig 已提交
3153
	kfree(vol_args);
3154
out:
3155
	btrfs_exclop_finish(fs_info);
C
Christoph Hellwig 已提交
3156 3157 3158
	return ret;
}

3159
static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
3160
{
3161 3162
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3163
	struct btrfs_ioctl_vol_args_v2 *vol_args;
3164 3165
	struct block_device *bdev = NULL;
	fmode_t mode;
C
Christoph Hellwig 已提交
3166
	int ret;
D
David Sterba 已提交
3167
	bool cancel = false;
C
Christoph Hellwig 已提交
3168

3169 3170 3171
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3172 3173 3174
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3175

L
Li Zefan 已提交
3176
	vol_args = memdup_user(arg, sizeof(*vol_args));
3177 3178
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
Dan Carpenter 已提交
3179
		goto err_drop;
3180
	}
C
Christoph Hellwig 已提交
3181

3182
	if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
3183 3184 3185
		ret = -EOPNOTSUPP;
		goto out;
	}
D
David Sterba 已提交
3186 3187 3188 3189
	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
	if (!(vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) &&
	    strcmp("cancel", vol_args->name) == 0)
		cancel = true;
C
Christoph Hellwig 已提交
3190

D
David Sterba 已提交
3191 3192 3193
	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
					   cancel);
	if (ret)
3194
		goto out;
D
David Sterba 已提交
3195
	/* Exclusive operation is now claimed */
3196

D
David Sterba 已提交
3197
	if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3198
		ret = btrfs_rm_device(fs_info, NULL, vol_args->devid, &bdev, &mode);
D
David Sterba 已提交
3199
	else
3200
		ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
D
David Sterba 已提交
3201

3202
	btrfs_exclop_finish(fs_info);
3203

3204
	if (!ret) {
3205
		if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3206
			btrfs_info(fs_info, "device deleted: id %llu",
3207 3208
					vol_args->devid);
		else
3209
			btrfs_info(fs_info, "device deleted: %s",
3210 3211
					vol_args->name);
	}
3212 3213
out:
	kfree(vol_args);
D
Dan Carpenter 已提交
3214
err_drop:
3215
	mnt_drop_write_file(file);
3216 3217
	if (bdev)
		blkdev_put(bdev, mode);
C
Christoph Hellwig 已提交
3218 3219 3220
	return ret;
}

3221
static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
3222
{
3223 3224
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
3225
	struct btrfs_ioctl_vol_args *vol_args;
3226 3227
	struct block_device *bdev = NULL;
	fmode_t mode;
C
Christoph Hellwig 已提交
3228
	int ret;
D
David Sterba 已提交
3229
	bool cancel;
C
Christoph Hellwig 已提交
3230

3231 3232 3233
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3234 3235 3236
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3237

3238 3239 3240
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
David Sterba 已提交
3241
		goto out_drop_write;
3242
	}
3243
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
D
David Sterba 已提交
3244 3245 3246 3247 3248
	cancel = (strcmp("cancel", vol_args->name) == 0);

	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
					   cancel);
	if (ret == 0) {
3249
		ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
D
David Sterba 已提交
3250 3251 3252 3253
		if (!ret)
			btrfs_info(fs_info, "disk deleted %s", vol_args->name);
		btrfs_exclop_finish(fs_info);
	}
3254 3255

	kfree(vol_args);
3256
out_drop_write:
3257
	mnt_drop_write_file(file);
3258 3259
	if (bdev)
		blkdev_put(bdev, mode);
C
Christoph Hellwig 已提交
3260 3261 3262
	return ret;
}

3263 3264
static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
				void __user *arg)
J
Jan Schmidt 已提交
3265
{
3266
	struct btrfs_ioctl_fs_info_args *fi_args;
J
Jan Schmidt 已提交
3267
	struct btrfs_device *device;
3268
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3269
	u64 flags_in;
3270
	int ret = 0;
J
Jan Schmidt 已提交
3271

3272 3273 3274 3275 3276 3277
	fi_args = memdup_user(arg, sizeof(*fi_args));
	if (IS_ERR(fi_args))
		return PTR_ERR(fi_args);

	flags_in = fi_args->flags;
	memset(fi_args, 0, sizeof(*fi_args));
3278

3279
	rcu_read_lock();
3280
	fi_args->num_devices = fs_devices->num_devices;
J
Jan Schmidt 已提交
3281

3282
	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3283 3284
		if (device->devid > fi_args->max_id)
			fi_args->max_id = device->devid;
J
Jan Schmidt 已提交
3285
	}
3286
	rcu_read_unlock();
J
Jan Schmidt 已提交
3287

3288
	memcpy(&fi_args->fsid, fs_devices->fsid, sizeof(fi_args->fsid));
3289 3290 3291
	fi_args->nodesize = fs_info->nodesize;
	fi_args->sectorsize = fs_info->sectorsize;
	fi_args->clone_alignment = fs_info->sectorsize;
3292

3293 3294 3295 3296 3297 3298
	if (flags_in & BTRFS_FS_INFO_FLAG_CSUM_INFO) {
		fi_args->csum_type = btrfs_super_csum_type(fs_info->super_copy);
		fi_args->csum_size = btrfs_super_csum_size(fs_info->super_copy);
		fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO;
	}

3299 3300 3301 3302 3303
	if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) {
		fi_args->generation = fs_info->generation;
		fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION;
	}

3304 3305 3306 3307 3308 3309
	if (flags_in & BTRFS_FS_INFO_FLAG_METADATA_UUID) {
		memcpy(&fi_args->metadata_uuid, fs_devices->metadata_uuid,
		       sizeof(fi_args->metadata_uuid));
		fi_args->flags |= BTRFS_FS_INFO_FLAG_METADATA_UUID;
	}

3310 3311
	if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
		ret = -EFAULT;
J
Jan Schmidt 已提交
3312

3313 3314
	kfree(fi_args);
	return ret;
J
Jan Schmidt 已提交
3315 3316
}

3317 3318
static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
				 void __user *arg)
J
Jan Schmidt 已提交
3319 3320 3321 3322 3323 3324 3325 3326 3327 3328
{
	struct btrfs_ioctl_dev_info_args *di_args;
	struct btrfs_device *dev;
	int ret = 0;
	char *s_uuid = NULL;

	di_args = memdup_user(arg, sizeof(*di_args));
	if (IS_ERR(di_args))
		return PTR_ERR(di_args);

3329
	if (!btrfs_is_empty_uuid(di_args->uuid))
J
Jan Schmidt 已提交
3330 3331
		s_uuid = di_args->uuid;

3332
	rcu_read_lock();
3333
	dev = btrfs_find_device(fs_info->fs_devices, di_args->devid, s_uuid,
3334
				NULL);
J
Jan Schmidt 已提交
3335 3336 3337 3338 3339 3340 3341

	if (!dev) {
		ret = -ENODEV;
		goto out;
	}

	di_args->devid = dev->devid;
3342 3343
	di_args->bytes_used = btrfs_device_get_bytes_used(dev);
	di_args->total_bytes = btrfs_device_get_total_bytes(dev);
J
Jan Schmidt 已提交
3344
	memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3345
	if (dev->name) {
3346 3347
		strncpy(di_args->path, rcu_str_deref(dev->name),
				sizeof(di_args->path) - 1);
3348 3349
		di_args->path[sizeof(di_args->path) - 1] = 0;
	} else {
3350
		di_args->path[0] = '\0';
3351
	}
J
Jan Schmidt 已提交
3352 3353

out:
3354
	rcu_read_unlock();
J
Jan Schmidt 已提交
3355 3356 3357 3358 3359 3360 3361
	if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
		ret = -EFAULT;

	kfree(di_args);
	return ret;
}

3362 3363
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
{
A
Al Viro 已提交
3364
	struct inode *inode = file_inode(file);
3365
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3366 3367 3368 3369
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root *new_root;
	struct btrfs_dir_item *di;
	struct btrfs_trans_handle *trans;
3370
	struct btrfs_path *path = NULL;
3371 3372 3373
	struct btrfs_disk_key disk_key;
	u64 objectid = 0;
	u64 dir_id;
3374
	int ret;
3375 3376 3377 3378

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3379 3380 3381 3382 3383 3384 3385 3386
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	if (copy_from_user(&objectid, argp, sizeof(objectid))) {
		ret = -EFAULT;
		goto out;
	}
3387 3388

	if (!objectid)
3389
		objectid = BTRFS_FS_TREE_OBJECTID;
3390

D
David Sterba 已提交
3391
	new_root = btrfs_get_fs_root(fs_info, objectid, true);
3392 3393 3394 3395
	if (IS_ERR(new_root)) {
		ret = PTR_ERR(new_root);
		goto out;
	}
3396 3397 3398 3399
	if (!is_fstree(new_root->root_key.objectid)) {
		ret = -ENOENT;
		goto out_free;
	}
3400 3401

	path = btrfs_alloc_path();
3402 3403
	if (!path) {
		ret = -ENOMEM;
3404
		goto out_free;
3405
	}
3406 3407

	trans = btrfs_start_transaction(root, 1);
3408
	if (IS_ERR(trans)) {
3409
		ret = PTR_ERR(trans);
3410
		goto out_free;
3411 3412
	}

3413 3414
	dir_id = btrfs_super_root_dir(fs_info->super_copy);
	di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
3415
				   dir_id, "default", 7, 1);
3416
	if (IS_ERR_OR_NULL(di)) {
3417
		btrfs_release_path(path);
3418
		btrfs_end_transaction(trans);
3419
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
3420
			  "Umm, you don't have the default diritem, this isn't going to work");
3421
		ret = -ENOENT;
3422
		goto out_free;
3423 3424 3425 3426 3427
	}

	btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
	btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
	btrfs_mark_buffer_dirty(path->nodes[0]);
3428
	btrfs_release_path(path);
3429

3430
	btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
3431
	btrfs_end_transaction(trans);
3432
out_free:
3433
	btrfs_put_root(new_root);
3434
	btrfs_free_path(path);
3435 3436 3437
out:
	mnt_drop_write_file(file);
	return ret;
3438 3439
}

3440 3441
static void get_block_group_info(struct list_head *groups_list,
				 struct btrfs_ioctl_space_info *space)
3442
{
3443
	struct btrfs_block_group *block_group;
3444 3445 3446 3447 3448 3449

	space->total_bytes = 0;
	space->used_bytes = 0;
	space->flags = 0;
	list_for_each_entry(block_group, groups_list, list) {
		space->flags = block_group->flags;
3450
		space->total_bytes += block_group->length;
3451
		space->used_bytes += block_group->used;
3452 3453 3454
	}
}

3455 3456
static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
				   void __user *arg)
J
Josef Bacik 已提交
3457 3458 3459 3460
{
	struct btrfs_ioctl_space_args space_args;
	struct btrfs_ioctl_space_info space;
	struct btrfs_ioctl_space_info *dest;
3461
	struct btrfs_ioctl_space_info *dest_orig;
3462
	struct btrfs_ioctl_space_info __user *user_dest;
J
Josef Bacik 已提交
3463
	struct btrfs_space_info *info;
3464 3465 3466 3467 3468 3469
	static const u64 types[] = {
		BTRFS_BLOCK_GROUP_DATA,
		BTRFS_BLOCK_GROUP_SYSTEM,
		BTRFS_BLOCK_GROUP_METADATA,
		BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
	};
3470
	int num_types = 4;
3471
	int alloc_size;
J
Josef Bacik 已提交
3472
	int ret = 0;
3473
	u64 slot_count = 0;
3474
	int i, c;
J
Josef Bacik 已提交
3475 3476 3477 3478 3479 3480

	if (copy_from_user(&space_args,
			   (struct btrfs_ioctl_space_args __user *)arg,
			   sizeof(space_args)))
		return -EFAULT;

3481 3482 3483 3484
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

		info = NULL;
3485
		list_for_each_entry(tmp, &fs_info->space_info, list) {
3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}

		if (!info)
			continue;

		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c]))
				slot_count++;
		}
		up_read(&info->groups_sem);
	}
3502

3503 3504 3505 3506 3507
	/*
	 * Global block reserve, exported as a space_info
	 */
	slot_count++;

3508 3509 3510 3511 3512
	/* space_slots == 0 means they are asking for a count */
	if (space_args.space_slots == 0) {
		space_args.total_spaces = slot_count;
		goto out;
	}
3513

3514
	slot_count = min_t(u64, space_args.space_slots, slot_count);
3515

3516
	alloc_size = sizeof(*dest) * slot_count;
3517

3518 3519 3520
	/* we generally have at most 6 or so space infos, one for each raid
	 * level.  So, a whole page should be more than enough for everyone
	 */
3521
	if (alloc_size > PAGE_SIZE)
3522 3523
		return -ENOMEM;

J
Josef Bacik 已提交
3524
	space_args.total_spaces = 0;
3525
	dest = kmalloc(alloc_size, GFP_KERNEL);
3526 3527 3528
	if (!dest)
		return -ENOMEM;
	dest_orig = dest;
J
Josef Bacik 已提交
3529

3530
	/* now we have a buffer to copy into */
3531 3532 3533
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

3534 3535 3536
		if (!slot_count)
			break;

3537
		info = NULL;
3538
		list_for_each_entry(tmp, &fs_info->space_info, list) {
3539 3540 3541 3542 3543
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}
3544

3545 3546 3547 3548 3549
		if (!info)
			continue;
		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c])) {
3550 3551
				get_block_group_info(&info->block_groups[c],
						     &space);
3552 3553 3554
				memcpy(dest, &space, sizeof(space));
				dest++;
				space_args.total_spaces++;
3555
				slot_count--;
3556
			}
3557 3558
			if (!slot_count)
				break;
3559 3560
		}
		up_read(&info->groups_sem);
J
Josef Bacik 已提交
3561 3562
	}

3563 3564 3565 3566
	/*
	 * Add global block reserve
	 */
	if (slot_count) {
3567
		struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3568 3569 3570 3571 3572 3573 3574 3575 3576 3577

		spin_lock(&block_rsv->lock);
		space.total_bytes = block_rsv->size;
		space.used_bytes = block_rsv->size - block_rsv->reserved;
		spin_unlock(&block_rsv->lock);
		space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
		memcpy(dest, &space, sizeof(space));
		space_args.total_spaces++;
	}

D
Daniel J Blueman 已提交
3578
	user_dest = (struct btrfs_ioctl_space_info __user *)
3579 3580 3581 3582 3583 3584 3585 3586
		(arg + sizeof(struct btrfs_ioctl_space_args));

	if (copy_to_user(user_dest, dest_orig, alloc_size))
		ret = -EFAULT;

	kfree(dest_orig);
out:
	if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
J
Josef Bacik 已提交
3587 3588 3589 3590 3591
		ret = -EFAULT;

	return ret;
}

3592 3593
static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
					    void __user *argp)
3594 3595 3596
{
	struct btrfs_trans_handle *trans;
	u64 transid;
T
Tsutomu Itoh 已提交
3597
	int ret;
3598

M
Miao Xie 已提交
3599
	trans = btrfs_attach_transaction_barrier(root);
3600 3601 3602 3603 3604 3605 3606 3607
	if (IS_ERR(trans)) {
		if (PTR_ERR(trans) != -ENOENT)
			return PTR_ERR(trans);

		/* No running transaction, don't bother */
		transid = root->fs_info->last_trans_committed;
		goto out;
	}
3608
	transid = trans->transid;
3609
	ret = btrfs_commit_transaction_async(trans);
3610
	if (ret) {
3611
		btrfs_end_transaction(trans);
T
Tsutomu Itoh 已提交
3612
		return ret;
3613
	}
3614
out:
3615 3616 3617 3618 3619 3620
	if (argp)
		if (copy_to_user(argp, &transid, sizeof(transid)))
			return -EFAULT;
	return 0;
}

3621
static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
3622
					   void __user *argp)
3623 3624 3625 3626 3627 3628 3629 3630 3631
{
	u64 transid;

	if (argp) {
		if (copy_from_user(&transid, argp, sizeof(transid)))
			return -EFAULT;
	} else {
		transid = 0;  /* current trans */
	}
3632
	return btrfs_wait_for_commit(fs_info, transid);
3633 3634
}

M
Miao Xie 已提交
3635
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
J
Jan Schmidt 已提交
3636
{
3637
	struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
J
Jan Schmidt 已提交
3638
	struct btrfs_ioctl_scrub_args *sa;
M
Miao Xie 已提交
3639
	int ret;
J
Jan Schmidt 已提交
3640 3641 3642 3643 3644 3645 3646 3647

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

M
Miao Xie 已提交
3648 3649 3650 3651 3652 3653
	if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
		ret = mnt_want_write_file(file);
		if (ret)
			goto out;
	}

3654
	ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
3655 3656
			      &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
			      0);
J
Jan Schmidt 已提交
3657

3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670
	/*
	 * Copy scrub args to user space even if btrfs_scrub_dev() returned an
	 * error. This is important as it allows user space to know how much
	 * progress scrub has done. For example, if scrub is canceled we get
	 * -ECANCELED from btrfs_scrub_dev() and return that error back to user
	 * space. Later user space can inspect the progress from the structure
	 * btrfs_ioctl_scrub_args and resume scrub from where it left off
	 * previously (btrfs-progs does this).
	 * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
	 * then return -EFAULT to signal the structure was not copied or it may
	 * be corrupt and unreliable due to a partial copy.
	 */
	if (copy_to_user(arg, sa, sizeof(*sa)))
J
Jan Schmidt 已提交
3671 3672
		ret = -EFAULT;

M
Miao Xie 已提交
3673 3674 3675
	if (!(sa->flags & BTRFS_SCRUB_READONLY))
		mnt_drop_write_file(file);
out:
J
Jan Schmidt 已提交
3676 3677 3678 3679
	kfree(sa);
	return ret;
}

3680
static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
J
Jan Schmidt 已提交
3681 3682 3683 3684
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3685
	return btrfs_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
3686 3687
}

3688
static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
J
Jan Schmidt 已提交
3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700
				       void __user *arg)
{
	struct btrfs_ioctl_scrub_args *sa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

3701
	ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
J
Jan Schmidt 已提交
3702

3703
	if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
J
Jan Schmidt 已提交
3704 3705 3706 3707 3708 3709
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

3710
static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
3711
				      void __user *arg)
3712 3713 3714 3715 3716 3717 3718 3719
{
	struct btrfs_ioctl_get_dev_stats *sa;
	int ret;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

3720 3721 3722 3723 3724
	if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
		kfree(sa);
		return -EPERM;
	}

3725
	ret = btrfs_get_dev_stats(fs_info, sa);
3726

3727
	if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
3728 3729 3730 3731 3732 3733
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

3734 3735
static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
				    void __user *arg)
3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748
{
	struct btrfs_ioctl_dev_replace_args *p;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	p = memdup_user(arg, sizeof(*p));
	if (IS_ERR(p))
		return PTR_ERR(p);

	switch (p->cmd) {
	case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
3749
		if (sb_rdonly(fs_info->sb)) {
3750 3751 3752
			ret = -EROFS;
			goto out;
		}
3753
		if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) {
3754
			ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3755
		} else {
3756
			ret = btrfs_dev_replace_by_ioctl(fs_info, p);
3757
			btrfs_exclop_finish(fs_info);
3758 3759 3760
		}
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
3761
		btrfs_dev_replace_status(fs_info, p);
3762 3763 3764
		ret = 0;
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
3765
		p->result = btrfs_dev_replace_cancel(fs_info);
3766
		ret = 0;
3767 3768 3769 3770 3771 3772
		break;
	default:
		ret = -EINVAL;
		break;
	}

3773
	if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p)))
3774
		ret = -EFAULT;
3775
out:
3776 3777 3778 3779
	kfree(p);
	return ret;
}

3780 3781 3782 3783
static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
{
	int ret = 0;
	int i;
3784
	u64 rel_ptr;
3785
	int size;
3786
	struct btrfs_ioctl_ino_path_args *ipa = NULL;
3787 3788 3789
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_path *path;

3790
	if (!capable(CAP_DAC_READ_SEARCH))
3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818
		return -EPERM;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	ipa = memdup_user(arg, sizeof(*ipa));
	if (IS_ERR(ipa)) {
		ret = PTR_ERR(ipa);
		ipa = NULL;
		goto out;
	}

	size = min_t(u32, ipa->size, 4096);
	ipath = init_ipath(size, root, path);
	if (IS_ERR(ipath)) {
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto out;
	}

	ret = paths_from_inode(ipa->inum, ipath);
	if (ret < 0)
		goto out;

	for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
3819 3820
		rel_ptr = ipath->fspath->val[i] -
			  (u64)(unsigned long)ipath->fspath->val;
3821
		ipath->fspath->val[i] = rel_ptr;
3822 3823
	}

3824 3825
	ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
			   ipath->fspath, size);
3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858
	if (ret) {
		ret = -EFAULT;
		goto out;
	}

out:
	btrfs_free_path(path);
	free_ipath(ipath);
	kfree(ipa);

	return ret;
}

static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
{
	struct btrfs_data_container *inodes = ctx;
	const size_t c = 3 * sizeof(u64);

	if (inodes->bytes_left >= c) {
		inodes->bytes_left -= c;
		inodes->val[inodes->elem_cnt] = inum;
		inodes->val[inodes->elem_cnt + 1] = offset;
		inodes->val[inodes->elem_cnt + 2] = root;
		inodes->elem_cnt += 3;
	} else {
		inodes->bytes_missing += c - inodes->bytes_left;
		inodes->bytes_left = 0;
		inodes->elem_missed += 3;
	}

	return 0;
}

3859
static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
3860
					void __user *arg, int version)
3861 3862 3863 3864 3865 3866
{
	int ret = 0;
	int size;
	struct btrfs_ioctl_logical_ino_args *loi;
	struct btrfs_data_container *inodes = NULL;
	struct btrfs_path *path = NULL;
3867
	bool ignore_offset;
3868 3869 3870 3871 3872

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	loi = memdup_user(arg, sizeof(*loi));
3873 3874
	if (IS_ERR(loi))
		return PTR_ERR(loi);
3875

3876 3877
	if (version == 1) {
		ignore_offset = false;
3878
		size = min_t(u32, loi->size, SZ_64K);
3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
	} else {
		/* All reserved bits must be 0 for now */
		if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
			ret = -EINVAL;
			goto out_loi;
		}
		/* Only accept flags we have defined so far */
		if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
			ret = -EINVAL;
			goto out_loi;
		}
		ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
3891
		size = min_t(u32, loi->size, SZ_16M);
3892 3893
	}

3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906
	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	inodes = init_data_container(size);
	if (IS_ERR(inodes)) {
		ret = PTR_ERR(inodes);
		inodes = NULL;
		goto out;
	}

3907
	ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
3908
					  build_ino_list, inodes, ignore_offset);
L
Liu Bo 已提交
3909
	if (ret == -EINVAL)
3910 3911 3912 3913
		ret = -ENOENT;
	if (ret < 0)
		goto out;

3914 3915
	ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
			   size);
3916 3917 3918 3919 3920
	if (ret)
		ret = -EFAULT;

out:
	btrfs_free_path(path);
3921
	kvfree(inodes);
3922
out_loi:
3923 3924 3925 3926 3927
	kfree(loi);

	return ret;
}

3928
void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
3929 3930 3931 3932 3933 3934
			       struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	bargs->flags = bctl->flags;

3935
	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
3936 3937 3938
		bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
	if (atomic_read(&fs_info->balance_pause_req))
		bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
3939 3940
	if (atomic_read(&fs_info->balance_cancel_req))
		bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
3941

3942 3943 3944
	memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
	memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
	memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
3945

3946 3947 3948
	spin_lock(&fs_info->balance_lock);
	memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
	spin_unlock(&fs_info->balance_lock);
3949 3950
}

3951
static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3952
{
A
Al Viro 已提交
3953
	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
3954 3955 3956
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_ioctl_balance_args *bargs;
	struct btrfs_balance_control *bctl;
3957
	bool need_unlock; /* for mut. excl. ops lock */
3958 3959 3960 3961 3962
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3963
	ret = mnt_want_write_file(file);
3964 3965 3966
	if (ret)
		return ret;

3967
again:
3968
	if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
3969 3970 3971 3972 3973 3974
		mutex_lock(&fs_info->balance_mutex);
		need_unlock = true;
		goto locked;
	}

	/*
3975
	 * mut. excl. ops lock is locked.  Three possibilities:
3976 3977 3978 3979
	 *   (1) some other op is running
	 *   (2) balance is running
	 *   (3) balance is paused -- special case (think resume)
	 */
3980
	mutex_lock(&fs_info->balance_mutex);
3981 3982
	if (fs_info->balance_ctl) {
		/* this is either (2) or (3) */
3983
		if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
3984
			mutex_unlock(&fs_info->balance_mutex);
3985 3986 3987 3988
			/*
			 * Lock released to allow other waiters to continue,
			 * we'll reexamine the status again.
			 */
3989 3990 3991
			mutex_lock(&fs_info->balance_mutex);

			if (fs_info->balance_ctl &&
3992
			    !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008
				/* this is (3) */
				need_unlock = false;
				goto locked;
			}

			mutex_unlock(&fs_info->balance_mutex);
			goto again;
		} else {
			/* this is (2) */
			mutex_unlock(&fs_info->balance_mutex);
			ret = -EINPROGRESS;
			goto out;
		}
	} else {
		/* this is (1) */
		mutex_unlock(&fs_info->balance_mutex);
4009
		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4010 4011 4012 4013
		goto out;
	}

locked:
4014 4015 4016 4017 4018

	if (arg) {
		bargs = memdup_user(arg, sizeof(*bargs));
		if (IS_ERR(bargs)) {
			ret = PTR_ERR(bargs);
4019
			goto out_unlock;
4020
		}
4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034

		if (bargs->flags & BTRFS_BALANCE_RESUME) {
			if (!fs_info->balance_ctl) {
				ret = -ENOTCONN;
				goto out_bargs;
			}

			bctl = fs_info->balance_ctl;
			spin_lock(&fs_info->balance_lock);
			bctl->flags |= BTRFS_BALANCE_RESUME;
			spin_unlock(&fs_info->balance_lock);

			goto do_balance;
		}
4035 4036 4037 4038
	} else {
		bargs = NULL;
	}

4039
	if (fs_info->balance_ctl) {
4040 4041 4042 4043
		ret = -EINPROGRESS;
		goto out_bargs;
	}

4044
	bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055
	if (!bctl) {
		ret = -ENOMEM;
		goto out_bargs;
	}

	if (arg) {
		memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
		memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
		memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));

		bctl->flags = bargs->flags;
4056 4057 4058
	} else {
		/* balance everything - no filters */
		bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4059 4060
	}

4061 4062
	if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
		ret = -EINVAL;
4063
		goto out_bctl;
4064 4065
	}

4066
do_balance:
4067
	/*
4068 4069 4070 4071
	 * Ownership of bctl and exclusive operation goes to btrfs_balance.
	 * bctl is freed in reset_balance_state, or, if restriper was paused
	 * all the way until unmount, in free_fs_info.  The flag should be
	 * cleared after reset_balance_state.
4072
	 */
4073 4074
	need_unlock = false;

4075
	ret = btrfs_balance(fs_info, bctl, bargs);
4076
	bctl = NULL;
4077

4078
	if ((ret == 0 || ret == -ECANCELED) && arg) {
4079 4080 4081 4082
		if (copy_to_user(arg, bargs, sizeof(*bargs)))
			ret = -EFAULT;
	}

4083 4084
out_bctl:
	kfree(bctl);
4085 4086
out_bargs:
	kfree(bargs);
4087
out_unlock:
4088
	mutex_unlock(&fs_info->balance_mutex);
4089
	if (need_unlock)
4090
		btrfs_exclop_finish(fs_info);
4091
out:
4092
	mnt_drop_write_file(file);
4093 4094 4095
	return ret;
}

4096
static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
4097 4098 4099 4100 4101 4102
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	switch (cmd) {
	case BTRFS_BALANCE_CTL_PAUSE:
4103
		return btrfs_pause_balance(fs_info);
4104
	case BTRFS_BALANCE_CTL_CANCEL:
4105
		return btrfs_cancel_balance(fs_info);
4106 4107 4108 4109 4110
	}

	return -EINVAL;
}

4111
static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125
					 void __user *arg)
{
	struct btrfs_ioctl_balance_args *bargs;
	int ret = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		ret = -ENOTCONN;
		goto out;
	}

4126
	bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4127 4128 4129 4130 4131
	if (!bargs) {
		ret = -ENOMEM;
		goto out;
	}

4132
	btrfs_update_ioctl_balance_args(fs_info, bargs);
4133 4134 4135 4136 4137 4138 4139 4140 4141 4142

	if (copy_to_user(arg, bargs, sizeof(*bargs)))
		ret = -EFAULT;

	kfree(bargs);
out:
	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

4143
static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4144
{
4145 4146
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
A
Arne Jansen 已提交
4147 4148 4149 4150 4151 4152
	struct btrfs_ioctl_quota_ctl_args *sa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4153 4154 4155
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4156 4157

	sa = memdup_user(arg, sizeof(*sa));
4158 4159 4160 4161
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4162

4163
	down_write(&fs_info->subvol_sem);
A
Arne Jansen 已提交
4164 4165 4166

	switch (sa->cmd) {
	case BTRFS_QUOTA_CTL_ENABLE:
4167
		ret = btrfs_quota_enable(fs_info);
A
Arne Jansen 已提交
4168 4169
		break;
	case BTRFS_QUOTA_CTL_DISABLE:
4170
		ret = btrfs_quota_disable(fs_info);
A
Arne Jansen 已提交
4171 4172 4173 4174 4175 4176 4177
		break;
	default:
		ret = -EINVAL;
		break;
	}

	kfree(sa);
4178
	up_write(&fs_info->subvol_sem);
4179 4180
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4181 4182 4183
	return ret;
}

4184
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4185
{
4186 4187 4188
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4189 4190 4191 4192 4193 4194 4195 4196
	struct btrfs_ioctl_qgroup_assign_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4197 4198 4199
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4200 4201

	sa = memdup_user(arg, sizeof(*sa));
4202 4203 4204 4205
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4206 4207 4208 4209 4210 4211 4212 4213

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	if (sa->assign) {
4214
		ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
A
Arne Jansen 已提交
4215
	} else {
4216
		ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
A
Arne Jansen 已提交
4217 4218
	}

4219
	/* update qgroup status and info */
4220
	err = btrfs_run_qgroups(trans);
4221
	if (err < 0)
4222 4223
		btrfs_handle_fs_error(fs_info, err,
				      "failed to update qgroup status and info");
4224
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4225 4226 4227 4228 4229
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4230 4231
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4232 4233 4234
	return ret;
}

4235
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4236
{
4237 4238
	struct inode *inode = file_inode(file);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4239 4240 4241 4242 4243 4244 4245 4246
	struct btrfs_ioctl_qgroup_create_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4247 4248 4249
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4250 4251

	sa = memdup_user(arg, sizeof(*sa));
4252 4253 4254 4255
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4256

M
Miao Xie 已提交
4257 4258 4259 4260 4261
	if (!sa->qgroupid) {
		ret = -EINVAL;
		goto out;
	}

A
Arne Jansen 已提交
4262 4263 4264 4265 4266 4267 4268
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	if (sa->create) {
4269
		ret = btrfs_create_qgroup(trans, sa->qgroupid);
A
Arne Jansen 已提交
4270
	} else {
4271
		ret = btrfs_remove_qgroup(trans, sa->qgroupid);
A
Arne Jansen 已提交
4272 4273
	}

4274
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4275 4276 4277 4278 4279
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4280 4281
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4282 4283 4284
	return ret;
}

4285
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4286
{
4287 4288
	struct inode *inode = file_inode(file);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4289 4290 4291 4292 4293 4294 4295 4296 4297
	struct btrfs_ioctl_qgroup_limit_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;
	u64 qgroupid;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4298 4299 4300
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4301 4302

	sa = memdup_user(arg, sizeof(*sa));
4303 4304 4305 4306
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	qgroupid = sa->qgroupid;
	if (!qgroupid) {
		/* take the current subvol as qgroup */
		qgroupid = root->root_key.objectid;
	}

4320
	ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
A
Arne Jansen 已提交
4321

4322
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4323 4324 4325 4326 4327
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4328 4329
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4330 4331 4332
	return ret;
}

J
Jan Schmidt 已提交
4333 4334
static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
{
4335 4336
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
J
Jan Schmidt 已提交
4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357
	struct btrfs_ioctl_quota_rescan_args *qsa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	qsa = memdup_user(arg, sizeof(*qsa));
	if (IS_ERR(qsa)) {
		ret = PTR_ERR(qsa);
		goto drop_write;
	}

	if (qsa->flags) {
		ret = -EINVAL;
		goto out;
	}

4358
	ret = btrfs_qgroup_rescan(fs_info);
J
Jan Schmidt 已提交
4359 4360 4361 4362 4363 4364 4365 4366

out:
	kfree(qsa);
drop_write:
	mnt_drop_write_file(file);
	return ret;
}

4367 4368
static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
						void __user *arg)
J
Jan Schmidt 已提交
4369
{
4370
	struct btrfs_ioctl_quota_rescan_args qsa = {0};
J
Jan Schmidt 已提交
4371 4372 4373 4374

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4375
	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4376 4377
		qsa.flags = 1;
		qsa.progress = fs_info->qgroup_rescan_progress.objectid;
J
Jan Schmidt 已提交
4378 4379
	}

4380
	if (copy_to_user(arg, &qsa, sizeof(qsa)))
4381
		return -EFAULT;
J
Jan Schmidt 已提交
4382

4383
	return 0;
J
Jan Schmidt 已提交
4384 4385
}

4386 4387
static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
						void __user *arg)
4388 4389 4390 4391
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4392
	return btrfs_qgroup_wait_for_completion(fs_info, true);
4393 4394
}

4395
static long _btrfs_ioctl_set_received_subvol(struct file *file,
4396
					    struct user_namespace *mnt_userns,
4397
					    struct btrfs_ioctl_received_subvol_args *sa)
4398
{
A
Al Viro 已提交
4399
	struct inode *inode = file_inode(file);
4400
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4401 4402 4403
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root_item *root_item = &root->root_item;
	struct btrfs_trans_handle *trans;
4404
	struct timespec64 ct = current_time(inode);
4405
	int ret = 0;
4406
	int received_uuid_changed;
4407

4408
	if (!inode_owner_or_capable(mnt_userns, inode))
4409 4410
		return -EPERM;

4411 4412 4413 4414
	ret = mnt_want_write_file(file);
	if (ret < 0)
		return ret;

4415
	down_write(&fs_info->subvol_sem);
4416

4417
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
4418 4419 4420 4421 4422 4423 4424 4425 4426
		ret = -EINVAL;
		goto out;
	}

	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
	}

4427 4428 4429 4430 4431
	/*
	 * 1 - root item
	 * 2 - uuid items (received uuid + subvol uuid)
	 */
	trans = btrfs_start_transaction(root, 3);
4432 4433 4434 4435 4436 4437 4438 4439 4440 4441
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		trans = NULL;
		goto out;
	}

	sa->rtransid = trans->transid;
	sa->rtime.sec = ct.tv_sec;
	sa->rtime.nsec = ct.tv_nsec;

4442 4443 4444
	received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
				       BTRFS_UUID_SIZE);
	if (received_uuid_changed &&
4445
	    !btrfs_is_empty_uuid(root_item->received_uuid)) {
4446
		ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
4447 4448 4449 4450 4451 4452 4453 4454
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  root->root_key.objectid);
		if (ret && ret != -ENOENT) {
		        btrfs_abort_transaction(trans, ret);
		        btrfs_end_transaction(trans);
		        goto out;
		}
	}
4455 4456 4457
	memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
	btrfs_set_root_stransid(root_item, sa->stransid);
	btrfs_set_root_rtransid(root_item, sa->rtransid);
4458 4459 4460 4461
	btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
	btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
4462

4463
	ret = btrfs_update_root(trans, fs_info->tree_root,
4464 4465
				&root->root_key, &root->root_item);
	if (ret < 0) {
4466
		btrfs_end_transaction(trans);
4467
		goto out;
4468 4469
	}
	if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
4470
		ret = btrfs_uuid_tree_add(trans, sa->uuid,
4471 4472 4473
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  root->root_key.objectid);
		if (ret < 0 && ret != -EEXIST) {
4474
			btrfs_abort_transaction(trans, ret);
4475
			btrfs_end_transaction(trans);
4476
			goto out;
4477 4478
		}
	}
4479
	ret = btrfs_commit_transaction(trans);
4480
out:
4481
	up_write(&fs_info->subvol_sem);
4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494
	mnt_drop_write_file(file);
	return ret;
}

#ifdef CONFIG_64BIT
static long btrfs_ioctl_set_received_subvol_32(struct file *file,
						void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
	struct btrfs_ioctl_received_subvol_args *args64 = NULL;
	int ret = 0;

	args32 = memdup_user(arg, sizeof(*args32));
4495 4496
	if (IS_ERR(args32))
		return PTR_ERR(args32);
4497

4498
	args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
4499 4500
	if (!args64) {
		ret = -ENOMEM;
4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512
		goto out;
	}

	memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
	args64->stransid = args32->stransid;
	args64->rtransid = args32->rtransid;
	args64->stime.sec = args32->stime.sec;
	args64->stime.nsec = args32->stime.nsec;
	args64->rtime.sec = args32->rtime.sec;
	args64->rtime.nsec = args32->rtime.nsec;
	args64->flags = args32->flags;

4513
	ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), args64);
4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543
	if (ret)
		goto out;

	memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
	args32->stransid = args64->stransid;
	args32->rtransid = args64->rtransid;
	args32->stime.sec = args64->stime.sec;
	args32->stime.nsec = args64->stime.nsec;
	args32->rtime.sec = args64->rtime.sec;
	args32->rtime.nsec = args64->rtime.nsec;
	args32->flags = args64->flags;

	ret = copy_to_user(arg, args32, sizeof(*args32));
	if (ret)
		ret = -EFAULT;

out:
	kfree(args32);
	kfree(args64);
	return ret;
}
#endif

static long btrfs_ioctl_set_received_subvol(struct file *file,
					    void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args *sa = NULL;
	int ret = 0;

	sa = memdup_user(arg, sizeof(*sa));
4544 4545
	if (IS_ERR(sa))
		return PTR_ERR(sa);
4546

4547
	ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), sa);
4548 4549 4550 4551

	if (ret)
		goto out;

4552 4553 4554 4555 4556 4557 4558 4559 4560
	ret = copy_to_user(arg, sa, sizeof(*sa));
	if (ret)
		ret = -EFAULT;

out:
	kfree(sa);
	return ret;
}

4561 4562
static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info,
					void __user *arg)
4563
{
4564
	size_t len;
4565
	int ret;
4566 4567
	char label[BTRFS_LABEL_SIZE];

4568 4569 4570
	spin_lock(&fs_info->super_lock);
	memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
	spin_unlock(&fs_info->super_lock);
4571 4572

	len = strnlen(label, BTRFS_LABEL_SIZE);
4573 4574

	if (len == BTRFS_LABEL_SIZE) {
4575 4576 4577
		btrfs_warn(fs_info,
			   "label is too long, return the first %zu bytes",
			   --len);
4578 4579 4580 4581 4582 4583 4584
	}

	ret = copy_to_user(arg, label, len);

	return ret ? -EFAULT : 0;
}

4585 4586
static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
{
4587 4588 4589 4590
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601
	struct btrfs_trans_handle *trans;
	char label[BTRFS_LABEL_SIZE];
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(label, arg, sizeof(label)))
		return -EFAULT;

	if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
4602
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
4603 4604
			  "unable to set label with more than %d bytes",
			  BTRFS_LABEL_SIZE - 1);
4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617
		return -EINVAL;
	}

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_unlock;
	}

4618
	spin_lock(&fs_info->super_lock);
4619
	strcpy(super_block->label, label);
4620
	spin_unlock(&fs_info->super_lock);
4621
	ret = btrfs_commit_transaction(trans);
4622 4623 4624 4625 4626 4627

out_unlock:
	mnt_drop_write_file(file);
	return ret;
}

4628 4629 4630 4631 4632
#define INIT_FEATURE_FLAGS(suffix) \
	{ .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
	  .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
	  .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }

4633
int btrfs_ioctl_get_supported_features(void __user *arg)
4634
{
D
David Sterba 已提交
4635
	static const struct btrfs_ioctl_feature_flags features[3] = {
4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646
		INIT_FEATURE_FLAGS(SUPP),
		INIT_FEATURE_FLAGS(SAFE_SET),
		INIT_FEATURE_FLAGS(SAFE_CLEAR)
	};

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

4647 4648
static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
					void __user *arg)
4649
{
4650
	struct btrfs_super_block *super_block = fs_info->super_copy;
4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662
	struct btrfs_ioctl_feature_flags features;

	features.compat_flags = btrfs_super_compat_flags(super_block);
	features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
	features.incompat_flags = btrfs_super_incompat_flags(super_block);

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

4663
static int check_feature_bits(struct btrfs_fs_info *fs_info,
4664
			      enum btrfs_feature_set set,
4665 4666 4667
			      u64 change_mask, u64 flags, u64 supported_flags,
			      u64 safe_set, u64 safe_clear)
{
4668
	const char *type = btrfs_feature_set_name(set);
4669
	char *names;
4670 4671 4672 4673 4674 4675
	u64 disallowed, unsupported;
	u64 set_mask = flags & change_mask;
	u64 clear_mask = ~flags & change_mask;

	unsupported = set_mask & ~supported_flags;
	if (unsupported) {
4676 4677
		names = btrfs_printable_features(set, unsupported);
		if (names) {
4678 4679 4680
			btrfs_warn(fs_info,
				   "this kernel does not support the %s feature bit%s",
				   names, strchr(names, ',') ? "s" : "");
4681 4682
			kfree(names);
		} else
4683 4684 4685
			btrfs_warn(fs_info,
				   "this kernel does not support %s bits 0x%llx",
				   type, unsupported);
4686 4687 4688 4689 4690
		return -EOPNOTSUPP;
	}

	disallowed = set_mask & ~safe_set;
	if (disallowed) {
4691 4692
		names = btrfs_printable_features(set, disallowed);
		if (names) {
4693 4694 4695
			btrfs_warn(fs_info,
				   "can't set the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
4696 4697
			kfree(names);
		} else
4698 4699 4700
			btrfs_warn(fs_info,
				   "can't set %s bits 0x%llx while mounted",
				   type, disallowed);
4701 4702 4703 4704 4705
		return -EPERM;
	}

	disallowed = clear_mask & ~safe_clear;
	if (disallowed) {
4706 4707
		names = btrfs_printable_features(set, disallowed);
		if (names) {
4708 4709 4710
			btrfs_warn(fs_info,
				   "can't clear the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
4711 4712
			kfree(names);
		} else
4713 4714 4715
			btrfs_warn(fs_info,
				   "can't clear %s bits 0x%llx while mounted",
				   type, disallowed);
4716 4717 4718 4719 4720 4721
		return -EPERM;
	}

	return 0;
}

4722 4723
#define check_feature(fs_info, change_mask, flags, mask_base)	\
check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags,	\
4724 4725 4726 4727 4728 4729
		   BTRFS_FEATURE_ ## mask_base ## _SUPP,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_SET,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)

static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
{
4730 4731 4732 4733
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749
	struct btrfs_ioctl_feature_flags flags[2];
	struct btrfs_trans_handle *trans;
	u64 newflags;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(flags, arg, sizeof(flags)))
		return -EFAULT;

	/* Nothing to do */
	if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
	    !flags[0].incompat_flags)
		return 0;

4750
	ret = check_feature(fs_info, flags[0].compat_flags,
4751 4752 4753 4754
			    flags[1].compat_flags, COMPAT);
	if (ret)
		return ret;

4755
	ret = check_feature(fs_info, flags[0].compat_ro_flags,
4756 4757 4758 4759
			    flags[1].compat_ro_flags, COMPAT_RO);
	if (ret)
		return ret;

4760
	ret = check_feature(fs_info, flags[0].incompat_flags,
4761 4762 4763 4764
			    flags[1].incompat_flags, INCOMPAT);
	if (ret)
		return ret;

4765 4766 4767 4768
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

4769
	trans = btrfs_start_transaction(root, 0);
4770 4771 4772 4773
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_drop_write;
	}
4774

4775
	spin_lock(&fs_info->super_lock);
4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789
	newflags = btrfs_super_compat_flags(super_block);
	newflags |= flags[0].compat_flags & flags[1].compat_flags;
	newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
	btrfs_set_super_compat_flags(super_block, newflags);

	newflags = btrfs_super_compat_ro_flags(super_block);
	newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
	newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
	btrfs_set_super_compat_ro_flags(super_block, newflags);

	newflags = btrfs_super_incompat_flags(super_block);
	newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
	newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
	btrfs_set_super_incompat_flags(super_block, newflags);
4790
	spin_unlock(&fs_info->super_lock);
4791

4792
	ret = btrfs_commit_transaction(trans);
4793 4794 4795 4796
out_drop_write:
	mnt_drop_write_file(file);

	return ret;
4797 4798
}

4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833
static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
{
	struct btrfs_ioctl_send_args *arg;
	int ret;

	if (compat) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
		struct btrfs_ioctl_send_args_32 args32;

		ret = copy_from_user(&args32, argp, sizeof(args32));
		if (ret)
			return -EFAULT;
		arg = kzalloc(sizeof(*arg), GFP_KERNEL);
		if (!arg)
			return -ENOMEM;
		arg->send_fd = args32.send_fd;
		arg->clone_sources_count = args32.clone_sources_count;
		arg->clone_sources = compat_ptr(args32.clone_sources);
		arg->parent_root = args32.parent_root;
		arg->flags = args32.flags;
		memcpy(arg->reserved, args32.reserved,
		       sizeof(args32.reserved));
#else
		return -ENOTTY;
#endif
	} else {
		arg = memdup_user(argp, sizeof(*arg));
		if (IS_ERR(arg))
			return PTR_ERR(arg);
	}
	ret = btrfs_ioctl_send(file, arg);
	kfree(arg);
	return ret;
}

C
Christoph Hellwig 已提交
4834 4835 4836
long btrfs_ioctl(struct file *file, unsigned int
		cmd, unsigned long arg)
{
4837 4838 4839
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
4840
	void __user *argp = (void __user *)arg;
C
Christoph Hellwig 已提交
4841 4842

	switch (cmd) {
4843 4844
	case FS_IOC_GETVERSION:
		return btrfs_ioctl_getversion(file, argp);
4845
	case FS_IOC_GETFSLABEL:
4846
		return btrfs_ioctl_get_fslabel(fs_info, argp);
4847 4848
	case FS_IOC_SETFSLABEL:
		return btrfs_ioctl_set_fslabel(file, argp);
4849
	case FITRIM:
4850
		return btrfs_ioctl_fitrim(fs_info, argp);
C
Christoph Hellwig 已提交
4851
	case BTRFS_IOC_SNAP_CREATE:
4852
		return btrfs_ioctl_snap_create(file, argp, 0);
4853
	case BTRFS_IOC_SNAP_CREATE_V2:
4854
		return btrfs_ioctl_snap_create_v2(file, argp, 0);
4855
	case BTRFS_IOC_SUBVOL_CREATE:
4856
		return btrfs_ioctl_snap_create(file, argp, 1);
A
Arne Jansen 已提交
4857 4858
	case BTRFS_IOC_SUBVOL_CREATE_V2:
		return btrfs_ioctl_snap_create_v2(file, argp, 1);
4859
	case BTRFS_IOC_SNAP_DESTROY:
4860 4861 4862
		return btrfs_ioctl_snap_destroy(file, argp, false);
	case BTRFS_IOC_SNAP_DESTROY_V2:
		return btrfs_ioctl_snap_destroy(file, argp, true);
4863 4864 4865 4866
	case BTRFS_IOC_SUBVOL_GETFLAGS:
		return btrfs_ioctl_subvol_getflags(file, argp);
	case BTRFS_IOC_SUBVOL_SETFLAGS:
		return btrfs_ioctl_subvol_setflags(file, argp);
4867 4868
	case BTRFS_IOC_DEFAULT_SUBVOL:
		return btrfs_ioctl_default_subvol(file, argp);
C
Christoph Hellwig 已提交
4869
	case BTRFS_IOC_DEFRAG:
C
Chris Mason 已提交
4870 4871 4872
		return btrfs_ioctl_defrag(file, NULL);
	case BTRFS_IOC_DEFRAG_RANGE:
		return btrfs_ioctl_defrag(file, argp);
C
Christoph Hellwig 已提交
4873
	case BTRFS_IOC_RESIZE:
4874
		return btrfs_ioctl_resize(file, argp);
C
Christoph Hellwig 已提交
4875
	case BTRFS_IOC_ADD_DEV:
4876
		return btrfs_ioctl_add_dev(fs_info, argp);
C
Christoph Hellwig 已提交
4877
	case BTRFS_IOC_RM_DEV:
4878
		return btrfs_ioctl_rm_dev(file, argp);
4879 4880
	case BTRFS_IOC_RM_DEV_V2:
		return btrfs_ioctl_rm_dev_v2(file, argp);
J
Jan Schmidt 已提交
4881
	case BTRFS_IOC_FS_INFO:
4882
		return btrfs_ioctl_fs_info(fs_info, argp);
J
Jan Schmidt 已提交
4883
	case BTRFS_IOC_DEV_INFO:
4884
		return btrfs_ioctl_dev_info(fs_info, argp);
C
Christoph Hellwig 已提交
4885
	case BTRFS_IOC_BALANCE:
4886
		return btrfs_ioctl_balance(file, NULL);
4887 4888
	case BTRFS_IOC_TREE_SEARCH:
		return btrfs_ioctl_tree_search(file, argp);
G
Gerhard Heift 已提交
4889 4890
	case BTRFS_IOC_TREE_SEARCH_V2:
		return btrfs_ioctl_tree_search_v2(file, argp);
4891 4892
	case BTRFS_IOC_INO_LOOKUP:
		return btrfs_ioctl_ino_lookup(file, argp);
4893 4894 4895
	case BTRFS_IOC_INO_PATHS:
		return btrfs_ioctl_ino_to_path(root, argp);
	case BTRFS_IOC_LOGICAL_INO:
4896 4897 4898
		return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
	case BTRFS_IOC_LOGICAL_INO_V2:
		return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
J
Josef Bacik 已提交
4899
	case BTRFS_IOC_SPACE_INFO:
4900
		return btrfs_ioctl_space_info(fs_info, argp);
4901 4902 4903
	case BTRFS_IOC_SYNC: {
		int ret;

4904
		ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
4905 4906
		if (ret)
			return ret;
4907
		ret = btrfs_sync_fs(inode->i_sb, 1);
4908 4909
		/*
		 * The transaction thread may want to do more work,
4910
		 * namely it pokes the cleaner kthread that will start
4911 4912
		 * processing uncleaned subvols.
		 */
4913
		wake_up_process(fs_info->transaction_kthread);
4914 4915
		return ret;
	}
4916
	case BTRFS_IOC_START_SYNC:
4917
		return btrfs_ioctl_start_sync(root, argp);
4918
	case BTRFS_IOC_WAIT_SYNC:
4919
		return btrfs_ioctl_wait_sync(fs_info, argp);
J
Jan Schmidt 已提交
4920
	case BTRFS_IOC_SCRUB:
M
Miao Xie 已提交
4921
		return btrfs_ioctl_scrub(file, argp);
J
Jan Schmidt 已提交
4922
	case BTRFS_IOC_SCRUB_CANCEL:
4923
		return btrfs_ioctl_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
4924
	case BTRFS_IOC_SCRUB_PROGRESS:
4925
		return btrfs_ioctl_scrub_progress(fs_info, argp);
4926
	case BTRFS_IOC_BALANCE_V2:
4927
		return btrfs_ioctl_balance(file, argp);
4928
	case BTRFS_IOC_BALANCE_CTL:
4929
		return btrfs_ioctl_balance_ctl(fs_info, arg);
4930
	case BTRFS_IOC_BALANCE_PROGRESS:
4931
		return btrfs_ioctl_balance_progress(fs_info, argp);
4932 4933
	case BTRFS_IOC_SET_RECEIVED_SUBVOL:
		return btrfs_ioctl_set_received_subvol(file, argp);
4934 4935 4936 4937
#ifdef CONFIG_64BIT
	case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
		return btrfs_ioctl_set_received_subvol_32(file, argp);
#endif
4938
	case BTRFS_IOC_SEND:
4939 4940 4941 4942 4943
		return _btrfs_ioctl_send(file, argp, false);
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
	case BTRFS_IOC_SEND_32:
		return _btrfs_ioctl_send(file, argp, true);
#endif
4944
	case BTRFS_IOC_GET_DEV_STATS:
4945
		return btrfs_ioctl_get_dev_stats(fs_info, argp);
A
Arne Jansen 已提交
4946
	case BTRFS_IOC_QUOTA_CTL:
4947
		return btrfs_ioctl_quota_ctl(file, argp);
A
Arne Jansen 已提交
4948
	case BTRFS_IOC_QGROUP_ASSIGN:
4949
		return btrfs_ioctl_qgroup_assign(file, argp);
A
Arne Jansen 已提交
4950
	case BTRFS_IOC_QGROUP_CREATE:
4951
		return btrfs_ioctl_qgroup_create(file, argp);
A
Arne Jansen 已提交
4952
	case BTRFS_IOC_QGROUP_LIMIT:
4953
		return btrfs_ioctl_qgroup_limit(file, argp);
J
Jan Schmidt 已提交
4954 4955 4956
	case BTRFS_IOC_QUOTA_RESCAN:
		return btrfs_ioctl_quota_rescan(file, argp);
	case BTRFS_IOC_QUOTA_RESCAN_STATUS:
4957
		return btrfs_ioctl_quota_rescan_status(fs_info, argp);
4958
	case BTRFS_IOC_QUOTA_RESCAN_WAIT:
4959
		return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
4960
	case BTRFS_IOC_DEV_REPLACE:
4961
		return btrfs_ioctl_dev_replace(fs_info, argp);
4962
	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
4963
		return btrfs_ioctl_get_supported_features(argp);
4964
	case BTRFS_IOC_GET_FEATURES:
4965
		return btrfs_ioctl_get_features(fs_info, argp);
4966 4967
	case BTRFS_IOC_SET_FEATURES:
		return btrfs_ioctl_set_features(file, argp);
4968 4969
	case BTRFS_IOC_GET_SUBVOL_INFO:
		return btrfs_ioctl_get_subvol_info(file, argp);
4970 4971
	case BTRFS_IOC_GET_SUBVOL_ROOTREF:
		return btrfs_ioctl_get_subvol_rootref(file, argp);
4972 4973
	case BTRFS_IOC_INO_LOOKUP_USER:
		return btrfs_ioctl_ino_lookup_user(file, argp);
B
Boris Burkov 已提交
4974 4975 4976 4977
	case FS_IOC_ENABLE_VERITY:
		return fsverity_ioctl_enable(file, (const void __user *)argp);
	case FS_IOC_MEASURE_VERITY:
		return fsverity_ioctl_measure(file, argp);
C
Christoph Hellwig 已提交
4978 4979 4980 4981
	}

	return -ENOTTY;
}
4982 4983 4984 4985

#ifdef CONFIG_COMPAT
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
4986 4987 4988 4989
	/*
	 * These all access 32-bit values anyway so no further
	 * handling is necessary.
	 */
4990 4991 4992 4993 4994 4995 4996 4997 4998
	switch (cmd) {
	case FS_IOC32_GETVERSION:
		cmd = FS_IOC_GETVERSION;
		break;
	}

	return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif