ioctl.c 122.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Christoph Hellwig 已提交
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/file.h>
#include <linux/fs.h>
10
#include <linux/fsnotify.h>
C
Christoph Hellwig 已提交
11 12 13 14 15
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
16 17
#include <linux/mount.h>
#include <linux/namei.h>
C
Christoph Hellwig 已提交
18 19
#include <linux/writeback.h>
#include <linux/compat.h>
20
#include <linux/security.h>
C
Christoph Hellwig 已提交
21
#include <linux/xattr.h>
22
#include <linux/mm.h>
23
#include <linux/slab.h>
24
#include <linux/blkdev.h>
25
#include <linux/uuid.h>
26
#include <linux/btrfs.h>
M
Mark Fasheh 已提交
27
#include <linux/uaccess.h>
28
#include <linux/iversion.h>
M
Miklos Szeredi 已提交
29
#include <linux/fileattr.h>
B
Boris Burkov 已提交
30
#include <linux/fsverity.h>
C
Christoph Hellwig 已提交
31 32
#include "ctree.h"
#include "disk-io.h"
33
#include "export.h"
C
Christoph Hellwig 已提交
34 35 36 37
#include "transaction.h"
#include "btrfs_inode.h"
#include "print-tree.h"
#include "volumes.h"
38
#include "locking.h"
39
#include "backref.h"
40
#include "rcu-string.h"
41
#include "send.h"
42
#include "dev-replace.h"
43
#include "props.h"
44
#include "sysfs.h"
J
Josef Bacik 已提交
45
#include "qgroup.h"
46
#include "tree-log.h"
47
#include "compression.h"
48
#include "space-info.h"
49
#include "delalloc-space.h"
50
#include "block-group.h"
51
#include "subpage.h"
C
Christoph Hellwig 已提交
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
#ifdef CONFIG_64BIT
/* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
 * structures are incorrect, as the timespec structure from userspace
 * is 4 bytes too small. We define these alternatives here to teach
 * the kernel about the 32-bit struct packing.
 */
struct btrfs_ioctl_timespec_32 {
	__u64 sec;
	__u32 nsec;
} __attribute__ ((__packed__));

struct btrfs_ioctl_received_subvol_args_32 {
	char	uuid[BTRFS_UUID_SIZE];	/* in */
	__u64	stransid;		/* in */
	__u64	rtransid;		/* out */
	struct btrfs_ioctl_timespec_32 stime; /* in */
	struct btrfs_ioctl_timespec_32 rtime; /* out */
	__u64	flags;			/* in */
	__u64	reserved[16];		/* in */
} __attribute__ ((__packed__));

#define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
				struct btrfs_ioctl_received_subvol_args_32)
#endif

78 79 80 81 82 83 84 85 86 87 88 89 90
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
struct btrfs_ioctl_send_args_32 {
	__s64 send_fd;			/* in */
	__u64 clone_sources_count;	/* in */
	compat_uptr_t clone_sources;	/* in */
	__u64 parent_root;		/* in */
	__u64 flags;			/* in */
	__u64 reserved[4];		/* in */
} __attribute__ ((__packed__));

#define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
			       struct btrfs_ioctl_send_args_32)
#endif
91

92
/* Mask out flags that are inappropriate for the given type of inode. */
93 94
static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
		unsigned int flags)
95
{
96
	if (S_ISDIR(inode->i_mode))
97
		return flags;
98
	else if (S_ISREG(inode->i_mode))
99 100 101 102 103 104
		return flags & ~FS_DIRSYNC_FL;
	else
		return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
}

/*
105 106
 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
 * ioctl.
107
 */
108
static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
109 110
{
	unsigned int iflags = 0;
111
	u32 flags = binode->flags;
B
Boris Burkov 已提交
112
	u32 ro_flags = binode->ro_flags;
113 114 115 116 117 118 119 120 121 122 123 124 125

	if (flags & BTRFS_INODE_SYNC)
		iflags |= FS_SYNC_FL;
	if (flags & BTRFS_INODE_IMMUTABLE)
		iflags |= FS_IMMUTABLE_FL;
	if (flags & BTRFS_INODE_APPEND)
		iflags |= FS_APPEND_FL;
	if (flags & BTRFS_INODE_NODUMP)
		iflags |= FS_NODUMP_FL;
	if (flags & BTRFS_INODE_NOATIME)
		iflags |= FS_NOATIME_FL;
	if (flags & BTRFS_INODE_DIRSYNC)
		iflags |= FS_DIRSYNC_FL;
L
Li Zefan 已提交
126 127
	if (flags & BTRFS_INODE_NODATACOW)
		iflags |= FS_NOCOW_FL;
B
Boris Burkov 已提交
128 129
	if (ro_flags & BTRFS_INODE_RO_VERITY)
		iflags |= FS_VERITY_FL;
L
Li Zefan 已提交
130

131
	if (flags & BTRFS_INODE_NOCOMPRESS)
L
Li Zefan 已提交
132
		iflags |= FS_NOCOMP_FL;
133 134
	else if (flags & BTRFS_INODE_COMPRESS)
		iflags |= FS_COMPR_FL;
135 136 137 138 139 140 141

	return iflags;
}

/*
 * Update inode->i_flags based on the btrfs internal flags.
 */
142
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
143
{
144
	struct btrfs_inode *binode = BTRFS_I(inode);
145
	unsigned int new_fl = 0;
146

147
	if (binode->flags & BTRFS_INODE_SYNC)
148
		new_fl |= S_SYNC;
149
	if (binode->flags & BTRFS_INODE_IMMUTABLE)
150
		new_fl |= S_IMMUTABLE;
151
	if (binode->flags & BTRFS_INODE_APPEND)
152
		new_fl |= S_APPEND;
153
	if (binode->flags & BTRFS_INODE_NOATIME)
154
		new_fl |= S_NOATIME;
155
	if (binode->flags & BTRFS_INODE_DIRSYNC)
156
		new_fl |= S_DIRSYNC;
B
Boris Burkov 已提交
157 158
	if (binode->ro_flags & BTRFS_INODE_RO_VERITY)
		new_fl |= S_VERITY;
159 160

	set_mask_bits(&inode->i_flags,
B
Boris Burkov 已提交
161 162
		      S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC |
		      S_VERITY, new_fl);
163 164
}

165 166 167 168 169
/*
 * Check if @flags are a supported and valid set of FS_*_FL flags and that
 * the old and new flags are not conflicting
 */
static int check_fsflags(unsigned int old_flags, unsigned int flags)
170 171 172 173
{
	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
		      FS_NOATIME_FL | FS_NODUMP_FL | \
		      FS_SYNC_FL | FS_DIRSYNC_FL | \
L
Li Zefan 已提交
174 175
		      FS_NOCOMP_FL | FS_COMPR_FL |
		      FS_NOCOW_FL))
176 177
		return -EOPNOTSUPP;

178
	/* COMPR and NOCOMP on new/old are valid */
179 180 181
	if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
		return -EINVAL;

182 183 184 185 186 187 188 189 190
	if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL))
		return -EINVAL;

	/* NOCOW and compression options are mutually exclusive */
	if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
		return -EINVAL;
	if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
		return -EINVAL;

191 192 193
	return 0;
}

194 195 196 197 198 199 200 201 202
static int check_fsflags_compatible(struct btrfs_fs_info *fs_info,
				    unsigned int flags)
{
	if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL))
		return -EPERM;

	return 0;
}

M
Miklos Szeredi 已提交
203 204 205 206 207
/*
 * Set flags/xflags from the internal inode flags. The remaining items of
 * fsxattr are zeroed.
 */
int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
208
{
M
Miklos Szeredi 已提交
209 210
	struct btrfs_inode *binode = BTRFS_I(d_inode(dentry));

211
	fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(binode));
M
Miklos Szeredi 已提交
212 213 214 215 216 217 218
	return 0;
}

int btrfs_fileattr_set(struct user_namespace *mnt_userns,
		       struct dentry *dentry, struct fileattr *fa)
{
	struct inode *inode = d_inode(dentry);
219
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
220 221
	struct btrfs_inode *binode = BTRFS_I(inode);
	struct btrfs_root *root = binode->root;
222
	struct btrfs_trans_handle *trans;
223
	unsigned int fsflags, old_fsflags;
224
	int ret;
225
	const char *comp = NULL;
226
	u32 binode_flags;
227

L
Li Zefan 已提交
228 229 230
	if (btrfs_root_readonly(root))
		return -EROFS;

M
Miklos Szeredi 已提交
231 232
	if (fileattr_has_fsx(fa))
		return -EOPNOTSUPP;
233

M
Miklos Szeredi 已提交
234
	fsflags = btrfs_mask_fsflags_for_type(inode, fa->flags);
235
	old_fsflags = btrfs_inode_flags_to_fsflags(binode);
236 237
	ret = check_fsflags(old_fsflags, fsflags);
	if (ret)
M
Miklos Szeredi 已提交
238
		return ret;
239

240 241
	ret = check_fsflags_compatible(fs_info, fsflags);
	if (ret)
M
Miklos Szeredi 已提交
242
		return ret;
243

244
	binode_flags = binode->flags;
245
	if (fsflags & FS_SYNC_FL)
246
		binode_flags |= BTRFS_INODE_SYNC;
247
	else
248
		binode_flags &= ~BTRFS_INODE_SYNC;
249
	if (fsflags & FS_IMMUTABLE_FL)
250
		binode_flags |= BTRFS_INODE_IMMUTABLE;
251
	else
252
		binode_flags &= ~BTRFS_INODE_IMMUTABLE;
253
	if (fsflags & FS_APPEND_FL)
254
		binode_flags |= BTRFS_INODE_APPEND;
255
	else
256
		binode_flags &= ~BTRFS_INODE_APPEND;
257
	if (fsflags & FS_NODUMP_FL)
258
		binode_flags |= BTRFS_INODE_NODUMP;
259
	else
260
		binode_flags &= ~BTRFS_INODE_NODUMP;
261
	if (fsflags & FS_NOATIME_FL)
262
		binode_flags |= BTRFS_INODE_NOATIME;
263
	else
264
		binode_flags &= ~BTRFS_INODE_NOATIME;
M
Miklos Szeredi 已提交
265 266 267 268 269

	/* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */
	if (!fa->flags_valid) {
		/* 1 item for the inode */
		trans = btrfs_start_transaction(root, 1);
270 271
		if (IS_ERR(trans))
			return PTR_ERR(trans);
M
Miklos Szeredi 已提交
272 273 274
		goto update_flags;
	}

275
	if (fsflags & FS_DIRSYNC_FL)
276
		binode_flags |= BTRFS_INODE_DIRSYNC;
277
	else
278
		binode_flags &= ~BTRFS_INODE_DIRSYNC;
279
	if (fsflags & FS_NOCOW_FL) {
280
		if (S_ISREG(inode->i_mode)) {
281 282 283 284 285 286
			/*
			 * It's safe to turn csums off here, no extents exist.
			 * Otherwise we want the flag to reflect the real COW
			 * status of the file and will not set it.
			 */
			if (inode->i_size == 0)
287 288
				binode_flags |= BTRFS_INODE_NODATACOW |
						BTRFS_INODE_NODATASUM;
289
		} else {
290
			binode_flags |= BTRFS_INODE_NODATACOW;
291 292 293
		}
	} else {
		/*
294
		 * Revert back under same assumptions as above
295
		 */
296
		if (S_ISREG(inode->i_mode)) {
297
			if (inode->i_size == 0)
298 299
				binode_flags &= ~(BTRFS_INODE_NODATACOW |
						  BTRFS_INODE_NODATASUM);
300
		} else {
301
			binode_flags &= ~BTRFS_INODE_NODATACOW;
302 303
		}
	}
304

305 306 307 308 309
	/*
	 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
	 * flag may be changed automatically if compression code won't make
	 * things smaller.
	 */
310
	if (fsflags & FS_NOCOMP_FL) {
311 312
		binode_flags &= ~BTRFS_INODE_COMPRESS;
		binode_flags |= BTRFS_INODE_NOCOMPRESS;
313
	} else if (fsflags & FS_COMPR_FL) {
314

M
Miklos Szeredi 已提交
315 316
		if (IS_SWAPFILE(inode))
			return -ETXTBSY;
317

318 319
		binode_flags |= BTRFS_INODE_COMPRESS;
		binode_flags &= ~BTRFS_INODE_NOCOMPRESS;
320

321 322 323
		comp = btrfs_compress_type2str(fs_info->compress_type);
		if (!comp || comp[0] == 0)
			comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
L
Li Zefan 已提交
324
	} else {
325
		binode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
326
	}
327

328 329 330 331 332
	/*
	 * 1 for inode item
	 * 2 for properties
	 */
	trans = btrfs_start_transaction(root, 3);
M
Miklos Szeredi 已提交
333 334
	if (IS_ERR(trans))
		return PTR_ERR(trans);
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	if (comp) {
		ret = btrfs_set_prop(trans, inode, "btrfs.compression", comp,
				     strlen(comp), 0);
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto out_end_trans;
		}
	} else {
		ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL,
				     0, 0);
		if (ret && ret != -ENODATA) {
			btrfs_abort_transaction(trans, ret);
			goto out_end_trans;
		}
	}

M
Miklos Szeredi 已提交
352
update_flags:
353
	binode->flags = binode_flags;
354
	btrfs_sync_inode_flags_to_i_flags(inode);
355
	inode_inc_iversion(inode);
356
	inode->i_ctime = current_time(inode);
357
	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
358

359
 out_end_trans:
360
	btrfs_end_transaction(trans);
361
	return ret;
362 363
}

364 365 366
/*
 * Start exclusive operation @type, return true on success
 */
367 368 369
bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
			enum btrfs_exclusive_operation type)
{
370 371 372 373 374 375 376 377 378 379
	bool ret = false;

	spin_lock(&fs_info->super_lock);
	if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
		fs_info->exclusive_operation = type;
		ret = true;
	}
	spin_unlock(&fs_info->super_lock);

	return ret;
380 381
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/*
 * Conditionally allow to enter the exclusive operation in case it's compatible
 * with the running one.  This must be paired with btrfs_exclop_start_unlock and
 * btrfs_exclop_finish.
 *
 * Compatibility:
 * - the same type is already running
 * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
 *   must check the condition first that would allow none -> @type
 */
bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
				 enum btrfs_exclusive_operation type)
{
	spin_lock(&fs_info->super_lock);
	if (fs_info->exclusive_operation == type)
		return true;

	spin_unlock(&fs_info->super_lock);
	return false;
}

void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
{
	spin_unlock(&fs_info->super_lock);
}

408 409
void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
{
410
	spin_lock(&fs_info->super_lock);
411
	WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
412
	spin_unlock(&fs_info->super_lock);
413
	sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
414 415
}

416 417
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
{
A
Al Viro 已提交
418
	struct inode *inode = file_inode(file);
419 420 421

	return put_user(inode->i_generation, arg);
}
C
Christoph Hellwig 已提交
422

423 424
static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
					void __user *arg)
425 426 427 428 429 430 431 432 433 434 435
{
	struct btrfs_device *device;
	struct request_queue *q;
	struct fstrim_range range;
	u64 minlen = ULLONG_MAX;
	u64 num_devices = 0;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

436 437 438 439 440 441 442 443
	/*
	 * btrfs_trim_block_group() depends on space cache, which is not
	 * available in zoned filesystem. So, disallow fitrim on a zoned
	 * filesystem for now.
	 */
	if (btrfs_is_zoned(fs_info))
		return -EOPNOTSUPP;

444 445 446 447 448 449 450 451 452 453
	/*
	 * If the fs is mounted with nologreplay, which requires it to be
	 * mounted in RO mode as well, we can not allow discard on free space
	 * inside block groups, because log trees refer to extents that are not
	 * pinned in a block group's free space cache (pinning the extents is
	 * precisely the first phase of replaying a log tree).
	 */
	if (btrfs_test_opt(fs_info, NOLOGREPLAY))
		return -EROFS;

454 455 456
	rcu_read_lock();
	list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
				dev_list) {
457 458 459 460 461
		if (!device->bdev)
			continue;
		q = bdev_get_queue(device->bdev);
		if (blk_queue_discard(q)) {
			num_devices++;
462
			minlen = min_t(u64, q->limits.discard_granularity,
463 464 465
				     minlen);
		}
	}
466
	rcu_read_unlock();
467

468 469 470 471
	if (!num_devices)
		return -EOPNOTSUPP;
	if (copy_from_user(&range, arg, sizeof(range)))
		return -EFAULT;
472 473 474 475 476 477 478

	/*
	 * NOTE: Don't truncate the range using super->total_bytes.  Bytenr of
	 * block group is in the logical address space, which can be any
	 * sectorsize aligned bytenr in  the range [0, U64_MAX].
	 */
	if (range.len < fs_info->sb->s_blocksize)
479
		return -EINVAL;
480 481

	range.minlen = max(range.minlen, minlen);
482
	ret = btrfs_trim_fs(fs_info, &range);
483 484 485 486 487 488 489 490 491
	if (ret < 0)
		return ret;

	if (copy_to_user(arg, &range, sizeof(range)))
		return -EFAULT;

	return 0;
}

492
int __pure btrfs_is_empty_uuid(u8 *uuid)
493
{
C
Chris Mason 已提交
494 495 496 497 498 499 500
	int i;

	for (i = 0; i < BTRFS_UUID_SIZE; i++) {
		if (uuid[i])
			return 0;
	}
	return 1;
501 502
}

503 504
static noinline int create_subvol(struct user_namespace *mnt_userns,
				  struct inode *dir, struct dentry *dentry,
505
				  const char *name, int namelen,
506
				  struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
507
{
508
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
C
Christoph Hellwig 已提交
509 510
	struct btrfs_trans_handle *trans;
	struct btrfs_key key;
511
	struct btrfs_root_item *root_item;
C
Christoph Hellwig 已提交
512 513
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
514
	struct btrfs_root *root = BTRFS_I(dir)->root;
515
	struct btrfs_root *new_root;
516
	struct btrfs_block_rsv block_rsv;
517
	struct timespec64 cur_time = current_time(dir);
518
	struct inode *inode;
C
Christoph Hellwig 已提交
519 520
	int ret;
	int err;
521
	dev_t anon_dev = 0;
C
Christoph Hellwig 已提交
522
	u64 objectid;
523
	u64 index = 0;
C
Christoph Hellwig 已提交
524

525 526 527 528
	root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
	if (!root_item)
		return -ENOMEM;

529
	ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid);
530
	if (ret)
531
		goto fail_free;
532

533 534 535 536
	ret = get_anon_bdev(&anon_dev);
	if (ret < 0)
		goto fail_free;

537 538
	/*
	 * Don't create subvolume whose level is not zero. Or qgroup will be
539
	 * screwed up since it assumes subvolume qgroup's level to be 0.
540
	 */
541 542 543 544
	if (btrfs_qgroup_level(objectid)) {
		ret = -ENOSPC;
		goto fail_free;
	}
545

546
	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
J
Josef Bacik 已提交
547
	/*
548 549
	 * The same as the snapshot creation, please see the comment
	 * of create_snapshot().
J
Josef Bacik 已提交
550
	 */
551
	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
552
	if (ret)
553
		goto fail_free;
554 555 556 557

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
558
		btrfs_subvolume_release_metadata(root, &block_rsv);
559
		goto fail_free;
560 561 562
	}
	trans->block_rsv = &block_rsv;
	trans->bytes_reserved = block_rsv.size;
C
Christoph Hellwig 已提交
563

564
	ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
A
Arne Jansen 已提交
565 566 567
	if (ret)
		goto fail;

568 569
	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
				      BTRFS_NESTING_NORMAL);
570 571 572 573
	if (IS_ERR(leaf)) {
		ret = PTR_ERR(leaf);
		goto fail;
	}
C
Christoph Hellwig 已提交
574 575 576

	btrfs_mark_buffer_dirty(leaf);

577
	inode_item = &root_item->inode;
578 579 580
	btrfs_set_stack_inode_generation(inode_item, 1);
	btrfs_set_stack_inode_size(inode_item, 3);
	btrfs_set_stack_inode_nlink(inode_item, 1);
581
	btrfs_set_stack_inode_nbytes(inode_item,
582
				     fs_info->nodesize);
583
	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
C
Christoph Hellwig 已提交
584

585 586
	btrfs_set_root_flags(root_item, 0);
	btrfs_set_root_limit(root_item, 0);
587
	btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
588

589 590 591 592 593 594
	btrfs_set_root_bytenr(root_item, leaf->start);
	btrfs_set_root_generation(root_item, trans->transid);
	btrfs_set_root_level(root_item, 0);
	btrfs_set_root_refs(root_item, 1);
	btrfs_set_root_used(root_item, leaf->len);
	btrfs_set_root_last_snapshot(root_item, 0);
C
Christoph Hellwig 已提交
595

596 597
	btrfs_set_root_generation_v2(root_item,
			btrfs_root_generation(root_item));
598
	generate_random_guid(root_item->uuid);
599 600 601 602 603
	btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
	btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
	root_item->ctime = root_item->otime;
	btrfs_set_root_ctransid(root_item, trans->transid);
	btrfs_set_root_otransid(root_item, trans->transid);
C
Christoph Hellwig 已提交
604

605
	btrfs_tree_unlock(leaf);
C
Christoph Hellwig 已提交
606

607
	btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
C
Christoph Hellwig 已提交
608 609

	key.objectid = objectid;
610
	key.offset = 0;
611
	key.type = BTRFS_ROOT_ITEM_KEY;
612
	ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
613
				root_item);
614 615 616 617 618 619 620 621 622 623 624
	if (ret) {
		/*
		 * Since we don't abort the transaction in this case, free the
		 * tree block so that we don't leak space and leave the
		 * filesystem in an inconsistent state (an extent item in the
		 * extent tree without backreferences). Also no need to have
		 * the tree block locked since it is not in any tree at this
		 * point, so no other task can find it and use it.
		 */
		btrfs_free_tree_block(trans, root, leaf, 0, 1);
		free_extent_buffer(leaf);
C
Christoph Hellwig 已提交
625
		goto fail;
626 627 628 629
	}

	free_extent_buffer(leaf);
	leaf = NULL;
C
Christoph Hellwig 已提交
630

631
	key.offset = (u64)-1;
632
	new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
633
	if (IS_ERR(new_root)) {
634
		free_anon_bdev(anon_dev);
635
		ret = PTR_ERR(new_root);
636
		btrfs_abort_transaction(trans, ret);
637 638
		goto fail;
	}
639 640
	/* Freeing will be done in btrfs_put_root() of new_root */
	anon_dev = 0;
641

642 643 644 645 646 647
	ret = btrfs_record_root_in_trans(trans, new_root);
	if (ret) {
		btrfs_put_root(new_root);
		btrfs_abort_transaction(trans, ret);
		goto fail;
	}
648

649
	ret = btrfs_create_subvol_root(trans, new_root, root, mnt_userns);
650
	btrfs_put_root(new_root);
651 652
	if (ret) {
		/* We potentially lose an unused inode item here */
653
		btrfs_abort_transaction(trans, ret);
654 655 656
		goto fail;
	}

C
Christoph Hellwig 已提交
657 658 659
	/*
	 * insert the directory item
	 */
660
	ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
661
	if (ret) {
662
		btrfs_abort_transaction(trans, ret);
663 664
		goto fail;
	}
665

666
	ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
667
				    BTRFS_FT_DIR, index);
668
	if (ret) {
669
		btrfs_abort_transaction(trans, ret);
C
Christoph Hellwig 已提交
670
		goto fail;
671
	}
672

673
	btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
674
	ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
675 676 677 678
	if (ret) {
		btrfs_abort_transaction(trans, ret);
		goto fail;
	}
679

680
	ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
681
				 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
682 683 684 685
	if (ret) {
		btrfs_abort_transaction(trans, ret);
		goto fail;
	}
C
Christoph Hellwig 已提交
686

687
	ret = btrfs_uuid_tree_add(trans, root_item->uuid,
688
				  BTRFS_UUID_KEY_SUBVOL, objectid);
689
	if (ret)
690
		btrfs_abort_transaction(trans, ret);
691

C
Christoph Hellwig 已提交
692
fail:
693
	kfree(root_item);
694 695
	trans->block_rsv = NULL;
	trans->bytes_reserved = 0;
696
	btrfs_subvolume_release_metadata(root, &block_rsv);
697

698
	err = btrfs_commit_transaction(trans);
C
Christoph Hellwig 已提交
699 700
	if (err && !ret)
		ret = err;
701

702 703
	if (!ret) {
		inode = btrfs_lookup_dentry(dir, dentry);
704 705
		if (IS_ERR(inode))
			return PTR_ERR(inode);
706 707
		d_instantiate(dentry, inode);
	}
C
Christoph Hellwig 已提交
708
	return ret;
709 710

fail_free:
711 712
	if (anon_dev)
		free_anon_bdev(anon_dev);
713 714
	kfree(root_item);
	return ret;
C
Christoph Hellwig 已提交
715 716
}

717
static int create_snapshot(struct btrfs_root *root, struct inode *dir,
718
			   struct dentry *dentry, bool readonly,
719
			   struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
720
{
721
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
722
	struct inode *inode;
C
Christoph Hellwig 已提交
723 724
	struct btrfs_pending_snapshot *pending_snapshot;
	struct btrfs_trans_handle *trans;
725
	int ret;
C
Christoph Hellwig 已提交
726

727
	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
C
Christoph Hellwig 已提交
728 729
		return -EINVAL;

730 731 732 733 734 735
	if (atomic_read(&root->nr_swapfiles)) {
		btrfs_warn(fs_info,
			   "cannot snapshot subvolume with active swapfile");
		return -ETXTBSY;
	}

736
	pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
737 738 739
	if (!pending_snapshot)
		return -ENOMEM;

740 741 742
	ret = get_anon_bdev(&pending_snapshot->anon_dev);
	if (ret < 0)
		goto free_pending;
743
	pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
744
			GFP_KERNEL);
745 746
	pending_snapshot->path = btrfs_alloc_path();
	if (!pending_snapshot->root_item || !pending_snapshot->path) {
747 748 749 750
		ret = -ENOMEM;
		goto free_pending;
	}

751 752
	btrfs_init_block_rsv(&pending_snapshot->block_rsv,
			     BTRFS_BLOCK_RSV_TEMP);
753 754 755 756 757 758
	/*
	 * 1 - parent dir inode
	 * 2 - dir entries
	 * 1 - root item
	 * 2 - root ref/backref
	 * 1 - root of snapshot
759
	 * 1 - UUID item
760 761
	 */
	ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
762
					&pending_snapshot->block_rsv, 8,
763
					false);
764
	if (ret)
765
		goto free_pending;
766

767
	pending_snapshot->dentry = dentry;
C
Christoph Hellwig 已提交
768
	pending_snapshot->root = root;
L
Li Zefan 已提交
769
	pending_snapshot->readonly = readonly;
770
	pending_snapshot->dir = dir;
771
	pending_snapshot->inherit = inherit;
772

773
	trans = btrfs_start_transaction(root, 0);
774 775 776 777 778
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto fail;
	}

779
	spin_lock(&fs_info->trans_lock);
C
Christoph Hellwig 已提交
780 781
	list_add(&pending_snapshot->list,
		 &trans->transaction->pending_snapshots);
782
	spin_unlock(&fs_info->trans_lock);
783 784

	ret = btrfs_commit_transaction(trans);
785
	if (ret)
786
		goto fail;
787 788 789 790 791

	ret = pending_snapshot->error;
	if (ret)
		goto fail;

792 793 794 795
	ret = btrfs_orphan_cleanup(pending_snapshot->snap);
	if (ret)
		goto fail;

796
	inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
797 798 799 800
	if (IS_ERR(inode)) {
		ret = PTR_ERR(inode);
		goto fail;
	}
801

802 803
	d_instantiate(dentry, inode);
	ret = 0;
804
	pending_snapshot->anon_dev = 0;
805
fail:
806 807 808
	/* Prevent double freeing of anon_dev */
	if (ret && pending_snapshot->snap)
		pending_snapshot->snap->anon_dev = 0;
809
	btrfs_put_root(pending_snapshot->snap);
810
	btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
811
free_pending:
812 813
	if (pending_snapshot->anon_dev)
		free_anon_bdev(pending_snapshot->anon_dev);
814
	kfree(pending_snapshot->root_item);
815
	btrfs_free_path(pending_snapshot->path);
816 817
	kfree(pending_snapshot);

C
Christoph Hellwig 已提交
818 819 820
	return ret;
}

821 822 823 824 825 826 827 828 829 830 831
/*  copy of may_delete in fs/namei.c()
 *	Check whether we can remove a link victim from directory dir, check
 *  whether the type of victim is right.
 *  1. We can't do it if dir is read-only (done in permission())
 *  2. We should have write and exec permissions on dir
 *  3. We can't remove anything from append-only dir
 *  4. We can't do anything with immutable dir (done in permission())
 *  5. If the sticky bit on dir is set we should either
 *	a. be owner of dir, or
 *	b. be owner of victim, or
 *	c. have CAP_FOWNER capability
832
 *  6. If the victim is append-only or immutable we can't do anything with
833 834 835 836 837 838 839 840
 *     links pointing to it.
 *  7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
 *  8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
 *  9. We can't remove a root or mountpoint.
 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
 *     nfs_async_unlink().
 */

841 842
static int btrfs_may_delete(struct user_namespace *mnt_userns,
			    struct inode *dir, struct dentry *victim, int isdir)
843 844 845
{
	int error;

846
	if (d_really_is_negative(victim))
847 848
		return -ENOENT;

849
	BUG_ON(d_inode(victim->d_parent) != dir);
850
	audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
851

852
	error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
853 854 855 856
	if (error)
		return error;
	if (IS_APPEND(dir))
		return -EPERM;
857
	if (check_sticky(mnt_userns, dir, d_inode(victim)) ||
858 859
	    IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) ||
	    IS_SWAPFILE(d_inode(victim)))
860 861
		return -EPERM;
	if (isdir) {
862
		if (!d_is_dir(victim))
863 864 865
			return -ENOTDIR;
		if (IS_ROOT(victim))
			return -EBUSY;
866
	} else if (d_is_dir(victim))
867 868 869 870 871 872 873 874
		return -EISDIR;
	if (IS_DEADDIR(dir))
		return -ENOENT;
	if (victim->d_flags & DCACHE_NFSFS_RENAMED)
		return -EBUSY;
	return 0;
}

875
/* copy of may_create in fs/namei.c() */
876 877
static inline int btrfs_may_create(struct user_namespace *mnt_userns,
				   struct inode *dir, struct dentry *child)
878
{
879
	if (d_really_is_positive(child))
880 881 882
		return -EEXIST;
	if (IS_DEADDIR(dir))
		return -ENOENT;
883
	if (!fsuidgid_has_mapping(dir->i_sb, mnt_userns))
884
		return -EOVERFLOW;
885
	return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
886 887 888 889 890 891 892
}

/*
 * Create a new subvolume below @parent.  This is largely modeled after
 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
 * inside this filesystem so it's quite a bit simpler.
 */
A
Al Viro 已提交
893
static noinline int btrfs_mksubvol(const struct path *parent,
894
				   struct user_namespace *mnt_userns,
895
				   const char *name, int namelen,
S
Sage Weil 已提交
896
				   struct btrfs_root *snap_src,
897
				   bool readonly,
898
				   struct btrfs_qgroup_inherit *inherit)
899
{
900 901
	struct inode *dir = d_inode(parent->dentry);
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
902 903 904
	struct dentry *dentry;
	int error;

905 906 907
	error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (error == -EINTR)
		return error;
908

909
	dentry = lookup_one(mnt_userns, name, parent->dentry, namelen);
910 911 912 913
	error = PTR_ERR(dentry);
	if (IS_ERR(dentry))
		goto out_unlock;

914
	error = btrfs_may_create(mnt_userns, dir, dentry);
915
	if (error)
916
		goto out_dput;
917

C
Chris Mason 已提交
918 919 920 921 922 923 924 925 926 927
	/*
	 * even if this name doesn't exist, we may get hash collisions.
	 * check for them now when we can safely fail
	 */
	error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
					       dir->i_ino, name,
					       namelen);
	if (error)
		goto out_dput;

928
	down_read(&fs_info->subvol_sem);
929 930 931 932

	if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
		goto out_up_read;

933 934 935
	if (snap_src)
		error = create_snapshot(snap_src, dir, dentry, readonly, inherit);
	else
936
		error = create_subvol(mnt_userns, dir, dentry, name, namelen, inherit);
937

938 939 940
	if (!error)
		fsnotify_mkdir(dir, dentry);
out_up_read:
941
	up_read(&fs_info->subvol_sem);
942 943 944
out_dput:
	dput(dentry);
out_unlock:
945
	btrfs_inode_unlock(dir, 0);
946 947 948
	return error;
}

949
static noinline int btrfs_mksnapshot(const struct path *parent,
950
				   struct user_namespace *mnt_userns,
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
				   const char *name, int namelen,
				   struct btrfs_root *root,
				   bool readonly,
				   struct btrfs_qgroup_inherit *inherit)
{
	int ret;
	bool snapshot_force_cow = false;

	/*
	 * Force new buffered writes to reserve space even when NOCOW is
	 * possible. This is to avoid later writeback (running dealloc) to
	 * fallback to COW mode and unexpectedly fail with ENOSPC.
	 */
	btrfs_drew_read_lock(&root->snapshot_lock);

966
	ret = btrfs_start_delalloc_snapshot(root, false);
967 968 969 970 971 972 973 974 975 976 977 978 979
	if (ret)
		goto out;

	/*
	 * All previous writes have started writeback in NOCOW mode, so now
	 * we force future writes to fallback to COW mode during snapshot
	 * creation.
	 */
	atomic_inc(&root->snapshot_force_cow);
	snapshot_force_cow = true;

	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);

980
	ret = btrfs_mksubvol(parent, mnt_userns, name, namelen,
981 982 983 984 985 986 987 988
			     root, readonly, inherit);
out:
	if (snapshot_force_cow)
		atomic_dec(&root->snapshot_force_cow);
	btrfs_drew_read_unlock(&root->snapshot_lock);
	return ret;
}

989 990
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
					       bool locked)
991 992
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
L
Li Zefan 已提交
993 994
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct extent_map *em;
995
	const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
996

L
Li Zefan 已提交
997 998 999 1000
	/*
	 * hopefully we have this extent in the tree already, try without
	 * the full extent lock
	 */
1001
	read_lock(&em_tree->lock);
1002
	em = lookup_extent_mapping(em_tree, start, sectorsize);
1003 1004
	read_unlock(&em_tree->lock);

L
Li Zefan 已提交
1005
	if (!em) {
1006
		struct extent_state *cached = NULL;
1007
		u64 end = start + sectorsize - 1;
1008

L
Li Zefan 已提交
1009
		/* get the big lock and read metadata off disk */
1010 1011
		if (!locked)
			lock_extent_bits(io_tree, start, end, &cached);
1012
		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, sectorsize);
1013 1014
		if (!locked)
			unlock_extent_cached(io_tree, start, end, &cached);
L
Li Zefan 已提交
1015 1016 1017 1018 1019 1020 1021

		if (IS_ERR(em))
			return NULL;
	}

	return em;
}
1022

1023 1024
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
				     bool locked)
L
Li Zefan 已提交
1025 1026 1027 1028 1029 1030 1031 1032
{
	struct extent_map *next;
	bool ret = true;

	/* this is the last extent */
	if (em->start + em->len >= i_size_read(inode))
		return false;

1033
	next = defrag_lookup_extent(inode, em->start + em->len, locked);
1034 1035 1036
	if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
		ret = false;
	else if ((em->block_start + em->block_len == next->block_start) &&
1037
		 (em->block_len > SZ_128K && next->block_len > SZ_128K))
L
Li Zefan 已提交
1038 1039 1040
		ret = false;

	free_extent_map(next);
1041 1042 1043
	return ret;
}

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
/*
 * Prepare one page to be defragged.
 *
 * This will ensure:
 *
 * - Returned page is locked and has been set up properly.
 * - No ordered extent exists in the page.
 * - The page is uptodate.
 *
 * NOTE: Caller should also wait for page writeback after the cluster is
 * prepared, here we don't do writeback wait for each page.
 */
static struct page *defrag_prepare_one_page(struct btrfs_inode *inode,
					    pgoff_t index)
{
	struct address_space *mapping = inode->vfs_inode.i_mapping;
	gfp_t mask = btrfs_alloc_write_mask(mapping);
	u64 page_start = (u64)index << PAGE_SHIFT;
	u64 page_end = page_start + PAGE_SIZE - 1;
	struct extent_state *cached_state = NULL;
	struct page *page;
	int ret;

again:
	page = find_or_create_page(mapping, index, mask);
	if (!page)
		return ERR_PTR(-ENOMEM);

	ret = set_page_extent_mapped(page);
	if (ret < 0) {
		unlock_page(page);
		put_page(page);
		return ERR_PTR(ret);
	}

	/* Wait for any existing ordered extent in the range */
	while (1) {
		struct btrfs_ordered_extent *ordered;

		lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
		ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
		unlock_extent_cached(&inode->io_tree, page_start, page_end,
				     &cached_state);
		if (!ordered)
			break;

		unlock_page(page);
		btrfs_start_ordered_extent(ordered, 1);
		btrfs_put_ordered_extent(ordered);
		lock_page(page);
		/*
		 * We unlocked the page above, so we need check if it was
		 * released or not.
		 */
		if (page->mapping != mapping || !PagePrivate(page)) {
			unlock_page(page);
			put_page(page);
			goto again;
		}
	}

	/*
	 * Now the page range has no ordered extent any more.  Read the page to
	 * make it uptodate.
	 */
	if (!PageUptodate(page)) {
		btrfs_readpage(NULL, page);
		lock_page(page);
		if (page->mapping != mapping || !PagePrivate(page)) {
			unlock_page(page);
			put_page(page);
			goto again;
		}
		if (!PageUptodate(page)) {
			unlock_page(page);
			put_page(page);
			return ERR_PTR(-EIO);
		}
	}
	return page;
}

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
struct defrag_target_range {
	struct list_head list;
	u64 start;
	u64 len;
};

/*
 * Collect all valid target extents.
 *
 * @start:	   file offset to lookup
 * @len:	   length to lookup
 * @extent_thresh: file extent size threshold, any extent size >= this value
 *		   will be ignored
 * @newer_than:    only defrag extents newer than this value
 * @do_compress:   whether the defrag is doing compression
 *		   if true, @extent_thresh will be ignored and all regular
 *		   file extents meeting @newer_than will be targets.
1143
 * @locked:	   if the range has already held extent lock
1144 1145 1146 1147 1148
 * @target_list:   list of targets file extents
 */
static int defrag_collect_targets(struct btrfs_inode *inode,
				  u64 start, u64 len, u32 extent_thresh,
				  u64 newer_than, bool do_compress,
1149
				  bool locked, struct list_head *target_list)
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
{
	u64 cur = start;
	int ret = 0;

	while (cur < start + len) {
		struct extent_map *em;
		struct defrag_target_range *new;
		bool next_mergeable = true;
		u64 range_len;

1160
		em = defrag_lookup_extent(&inode->vfs_inode, cur, locked);
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
		if (!em)
			break;

		/* Skip hole/inline/preallocated extents */
		if (em->block_start >= EXTENT_MAP_LAST_BYTE ||
		    test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			goto next;

		/* Skip older extent */
		if (em->generation < newer_than)
			goto next;

		/*
		 * For do_compress case, we want to compress all valid file
		 * extents, thus no @extent_thresh or mergeable check.
		 */
		if (do_compress)
			goto add;

		/* Skip too large extent */
		if (em->len >= extent_thresh)
			goto next;

1184 1185
		next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
							  locked);
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
		if (!next_mergeable) {
			struct defrag_target_range *last;

			/* Empty target list, no way to merge with last entry */
			if (list_empty(target_list))
				goto next;
			last = list_entry(target_list->prev,
					  struct defrag_target_range, list);
			/* Not mergeable with last entry */
			if (last->start + last->len != cur)
				goto next;

			/* Mergeable, fall through to add it to @target_list. */
		}

add:
		range_len = min(extent_map_end(em), start + len) - cur;
		/*
		 * This one is a good target, check if it can be merged into
		 * last range of the target list.
		 */
		if (!list_empty(target_list)) {
			struct defrag_target_range *last;

			last = list_entry(target_list->prev,
					  struct defrag_target_range, list);
			ASSERT(last->start + last->len <= cur);
			if (last->start + last->len == cur) {
				/* Mergeable, enlarge the last entry */
				last->len += range_len;
				goto next;
			}
			/* Fall through to allocate a new entry */
		}

		/* Allocate new defrag_target_range */
		new = kmalloc(sizeof(*new), GFP_NOFS);
		if (!new) {
			free_extent_map(em);
			ret = -ENOMEM;
			break;
		}
		new->start = cur;
		new->len = range_len;
		list_add_tail(&new->list, target_list);

next:
		cur = extent_map_end(em);
		free_extent_map(em);
	}
	if (ret < 0) {
		struct defrag_target_range *entry;
		struct defrag_target_range *tmp;

		list_for_each_entry_safe(entry, tmp, target_list, list) {
			list_del_init(&entry->list);
			kfree(entry);
		}
	}
	return ret;
}

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
#define CLUSTER_SIZE	(SZ_256K)

/*
 * Defrag one contiguous target range.
 *
 * @inode:	target inode
 * @target:	target range to defrag
 * @pages:	locked pages covering the defrag range
 * @nr_pages:	number of locked pages
 *
 * Caller should ensure:
 *
 * - Pages are prepared
 *   Pages should be locked, no ordered extent in the pages range,
 *   no writeback.
 *
 * - Extent bits are locked
 */
static int defrag_one_locked_target(struct btrfs_inode *inode,
				    struct defrag_target_range *target,
				    struct page **pages, int nr_pages,
				    struct extent_state **cached_state)
{
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
	struct extent_changeset *data_reserved = NULL;
	const u64 start = target->start;
	const u64 len = target->len;
	unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
	unsigned long start_index = start >> PAGE_SHIFT;
	unsigned long first_index = page_index(pages[0]);
	int ret = 0;
	int i;

	ASSERT(last_index - first_index + 1 <= nr_pages);

	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
	if (ret < 0)
		return ret;
	clear_extent_bit(&inode->io_tree, start, start + len - 1,
			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
			 EXTENT_DEFRAG, 0, 0, cached_state);
	set_extent_defrag(&inode->io_tree, start, start + len - 1, cached_state);

	/* Update the page status */
	for (i = start_index - first_index; i <= last_index - first_index; i++) {
		ClearPageChecked(pages[i]);
		btrfs_page_clamp_set_dirty(fs_info, pages[i], start, len);
	}
	btrfs_delalloc_release_extents(inode, len);
	extent_changeset_free(data_reserved);

	return ret;
}

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
			    u32 extent_thresh, u64 newer_than, bool do_compress)
{
	struct extent_state *cached_state = NULL;
	struct defrag_target_range *entry;
	struct defrag_target_range *tmp;
	LIST_HEAD(target_list);
	struct page **pages;
	const u32 sectorsize = inode->root->fs_info->sectorsize;
	u64 last_index = (start + len - 1) >> PAGE_SHIFT;
	u64 start_index = start >> PAGE_SHIFT;
	unsigned int nr_pages = last_index - start_index + 1;
	int ret = 0;
	int i;

	ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));

	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
	if (!pages)
		return -ENOMEM;

	/* Prepare all pages */
	for (i = 0; i < nr_pages; i++) {
		pages[i] = defrag_prepare_one_page(inode, start_index + i);
		if (IS_ERR(pages[i])) {
			ret = PTR_ERR(pages[i]);
			pages[i] = NULL;
			goto free_pages;
		}
	}
	for (i = 0; i < nr_pages; i++)
		wait_on_page_writeback(pages[i]);

	/* Lock the pages range */
	lock_extent_bits(&inode->io_tree, start_index << PAGE_SHIFT,
			 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
			 &cached_state);
	/*
	 * Now we have a consistent view about the extent map, re-check
	 * which range really needs to be defragged.
	 *
	 * And this time we have extent locked already, pass @locked = true
	 * so that we won't relock the extent range and cause deadlock.
	 */
	ret = defrag_collect_targets(inode, start, len, extent_thresh,
				     newer_than, do_compress, true,
				     &target_list);
	if (ret < 0)
		goto unlock_extent;

	list_for_each_entry(entry, &target_list, list) {
		ret = defrag_one_locked_target(inode, entry, pages, nr_pages,
					       &cached_state);
		if (ret < 0)
			break;
	}

	list_for_each_entry_safe(entry, tmp, &target_list, list) {
		list_del_init(&entry->list);
		kfree(entry);
	}
unlock_extent:
	unlock_extent_cached(&inode->io_tree, start_index << PAGE_SHIFT,
			     (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
			     &cached_state);
free_pages:
	for (i = 0; i < nr_pages; i++) {
		if (pages[i]) {
			unlock_page(pages[i]);
			put_page(pages[i]);
		}
	}
	kfree(pages);
	return ret;
}

1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
static int defrag_one_cluster(struct btrfs_inode *inode,
			      struct file_ra_state *ra,
			      u64 start, u32 len, u32 extent_thresh,
			      u64 newer_than, bool do_compress,
			      unsigned long *sectors_defragged,
			      unsigned long max_sectors)
{
	const u32 sectorsize = inode->root->fs_info->sectorsize;
	struct defrag_target_range *entry;
	struct defrag_target_range *tmp;
	LIST_HEAD(target_list);
	int ret;

	BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
	ret = defrag_collect_targets(inode, start, len, extent_thresh,
				     newer_than, do_compress, false,
				     &target_list);
	if (ret < 0)
		goto out;

	list_for_each_entry(entry, &target_list, list) {
		u32 range_len = entry->len;

		/* Reached the limit */
		if (max_sectors && max_sectors == *sectors_defragged)
			break;

		if (max_sectors)
			range_len = min_t(u32, range_len,
				(max_sectors - *sectors_defragged) * sectorsize);

		if (ra)
			page_cache_sync_readahead(inode->vfs_inode.i_mapping,
				ra, NULL, entry->start >> PAGE_SHIFT,
				((entry->start + range_len - 1) >> PAGE_SHIFT) -
				(entry->start >> PAGE_SHIFT) + 1);
		/*
		 * Here we may not defrag any range if holes are punched before
		 * we locked the pages.
		 * But that's fine, it only affects the @sectors_defragged
		 * accounting.
		 */
		ret = defrag_one_range(inode, entry->start, range_len,
				       extent_thresh, newer_than, do_compress);
		if (ret < 0)
			break;
		*sectors_defragged += range_len;
	}
out:
	list_for_each_entry_safe(entry, tmp, &target_list, list) {
		list_del_init(&entry->list);
		kfree(entry);
	}
	return ret;
}

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
/*
 * Entry point to file defragmentation.
 *
 * @inode:	   inode to be defragged
 * @ra:		   readahead state (can be NUL)
 * @range:	   defrag options including range and flags
 * @newer_than:	   minimum transid to defrag
 * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
 *		   will be defragged.
 */
int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
C
Chris Mason 已提交
1446 1447 1448
		      struct btrfs_ioctl_defrag_range_args *range,
		      u64 newer_than, unsigned long max_to_defrag)
{
1449
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1450
	unsigned long sectors_defragged = 0;
1451
	u64 isize = i_size_read(inode);
1452 1453
	u64 cur;
	u64 last_byte;
1454
	bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1455
	bool ra_allocated = false;
1456 1457 1458
	int compress_type = BTRFS_COMPRESS_ZLIB;
	int ret = 0;
	u32 extent_thresh = range->extent_thresh;
C
Chris Mason 已提交
1459

1460 1461 1462 1463 1464
	if (isize == 0)
		return 0;

	if (range->start >= isize)
		return -EINVAL;
1465

1466
	if (do_compress) {
1467
		if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
1468 1469 1470 1471
			return -EINVAL;
		if (range->compress_type)
			compress_type = range->compress_type;
	}
C
Christoph Hellwig 已提交
1472

1473
	if (extent_thresh == 0)
1474
		extent_thresh = SZ_256K;
1475

1476 1477 1478 1479 1480 1481 1482 1483
	if (range->start + range->len > range->start) {
		/* Got a specific range */
		last_byte = min(isize, range->start + range->len) - 1;
	} else {
		/* Defrag until file end */
		last_byte = isize - 1;
	}

C
Chris Mason 已提交
1484
	/*
1485
	 * If we were not given a ra, allocate a readahead context. As
1486 1487
	 * readahead is just an optimization, defrag will work without it so
	 * we don't error out.
C
Chris Mason 已提交
1488
	 */
1489 1490
	if (!ra) {
		ra_allocated = true;
1491
		ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1492 1493
		if (ra)
			file_ra_state_init(ra, inode->i_mapping);
C
Chris Mason 已提交
1494 1495
	}

1496 1497 1498
	/* Align the range */
	cur = round_down(range->start, fs_info->sectorsize);
	last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
1499

1500 1501
	while (cur < last_byte) {
		u64 cluster_end;
1502

1503 1504
		/* The cluster size 256K should always be page aligned */
		BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
1505

1506 1507 1508 1509
		/* We want the cluster end at page boundary when possible */
		cluster_end = (((cur >> PAGE_SHIFT) +
			       (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
		cluster_end = min(cluster_end, last_byte);
1510

1511
		btrfs_inode_lock(inode, 0);
1512 1513
		if (IS_SWAPFILE(inode)) {
			ret = -ETXTBSY;
1514 1515
			btrfs_inode_unlock(inode, 0);
			break;
1516
		}
1517
		if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
1518
			btrfs_inode_unlock(inode, 0);
1519
			break;
1520
		}
1521 1522 1523 1524 1525 1526
		if (do_compress)
			BTRFS_I(inode)->defrag_compress = compress_type;
		ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
				cluster_end + 1 - cur, extent_thresh,
				newer_than, do_compress,
				&sectors_defragged, max_to_defrag);
1527
		btrfs_inode_unlock(inode, 0);
1528 1529 1530
		if (ret < 0)
			break;
		cur = cluster_end + 1;
C
Christoph Hellwig 已提交
1531 1532
	}

1533 1534 1535 1536 1537 1538 1539 1540
	if (ra_allocated)
		kfree(ra);
	if (sectors_defragged) {
		/*
		 * We have defragged some sectors, for compression case they
		 * need to be written back immediately.
		 */
		if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
1541
			filemap_flush(inode->i_mapping);
1542 1543 1544 1545 1546 1547 1548 1549 1550
			if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
				     &BTRFS_I(inode)->runtime_flags))
				filemap_flush(inode->i_mapping);
		}
		if (range->compress_type == BTRFS_COMPRESS_LZO)
			btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
		else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
			btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
		ret = sectors_defragged;
1551
	}
1552
	if (do_compress) {
1553
		btrfs_inode_lock(inode, 0);
1554
		BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1555
		btrfs_inode_unlock(inode, 0);
1556
	}
1557
	return ret;
C
Christoph Hellwig 已提交
1558 1559
}

1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
/*
 * Try to start exclusive operation @type or cancel it if it's running.
 *
 * Return:
 *   0        - normal mode, newly claimed op started
 *  >0        - normal mode, something else is running,
 *              return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS to user space
 * ECANCELED  - cancel mode, successful cancel
 * ENOTCONN   - cancel mode, operation not running anymore
 */
static int exclop_start_or_cancel_reloc(struct btrfs_fs_info *fs_info,
			enum btrfs_exclusive_operation type, bool cancel)
{
	if (!cancel) {
		/* Start normal op */
		if (!btrfs_exclop_start(fs_info, type))
			return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
		/* Exclusive operation is now claimed */
		return 0;
	}

	/* Cancel running op */
	if (btrfs_exclop_start_try_lock(fs_info, type)) {
		/*
		 * This blocks any exclop finish from setting it to NONE, so we
		 * request cancellation. Either it runs and we will wait for it,
		 * or it has finished and no waiting will happen.
		 */
		atomic_inc(&fs_info->reloc_cancel_req);
		btrfs_exclop_start_unlock(fs_info);

		if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
			wait_on_bit(&fs_info->flags, BTRFS_FS_RELOC_RUNNING,
				    TASK_INTERRUPTIBLE);

		return -ECANCELED;
	}

	/* Something else is running or none */
	return -ENOTCONN;
}

1602
static noinline int btrfs_ioctl_resize(struct file *file,
1603
					void __user *arg)
C
Christoph Hellwig 已提交
1604
{
1605
	BTRFS_DEV_LOOKUP_ARGS(args);
1606 1607
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
1608 1609 1610
	u64 new_size;
	u64 old_size;
	u64 devid = 1;
1611
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Christoph Hellwig 已提交
1612 1613 1614 1615
	struct btrfs_ioctl_vol_args *vol_args;
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device = NULL;
	char *sizestr;
1616
	char *retptr;
C
Christoph Hellwig 已提交
1617 1618 1619
	char *devstr = NULL;
	int ret = 0;
	int mod = 0;
D
David Sterba 已提交
1620
	bool cancel;
C
Christoph Hellwig 已提交
1621

1622 1623 1624
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

1625 1626 1627 1628
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

D
David Sterba 已提交
1629 1630 1631 1632
	/*
	 * Read the arguments before checking exclusivity to be able to
	 * distinguish regular resize and cancel
	 */
L
Li Zefan 已提交
1633
	vol_args = memdup_user(arg, sizeof(*vol_args));
1634 1635
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
David Sterba 已提交
1636
		goto out_drop;
1637
	}
1638
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
C
Christoph Hellwig 已提交
1639
	sizestr = vol_args->name;
D
David Sterba 已提交
1640 1641 1642 1643 1644 1645
	cancel = (strcmp("cancel", sizestr) == 0);
	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_RESIZE, cancel);
	if (ret)
		goto out_free;
	/* Exclusive operation is now claimed */

C
Christoph Hellwig 已提交
1646 1647 1648 1649 1650
	devstr = strchr(sizestr, ':');
	if (devstr) {
		sizestr = devstr + 1;
		*devstr = '\0';
		devstr = vol_args->name;
1651 1652
		ret = kstrtoull(devstr, 10, &devid);
		if (ret)
D
David Sterba 已提交
1653
			goto out_finish;
1654 1655
		if (!devid) {
			ret = -EINVAL;
D
David Sterba 已提交
1656
			goto out_finish;
1657
		}
1658
		btrfs_info(fs_info, "resizing devid %llu", devid);
C
Christoph Hellwig 已提交
1659
	}
M
Miao Xie 已提交
1660

1661 1662
	args.devid = devid;
	device = btrfs_find_device(fs_info->fs_devices, &args);
C
Christoph Hellwig 已提交
1663
	if (!device) {
1664 1665
		btrfs_info(fs_info, "resizer unable to find device %llu",
			   devid);
1666
		ret = -ENODEV;
D
David Sterba 已提交
1667
		goto out_finish;
C
Christoph Hellwig 已提交
1668
	}
M
Miao Xie 已提交
1669

1670
	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1671
		btrfs_info(fs_info,
1672
			   "resizer unable to apply on readonly device %llu",
1673
		       devid);
1674
		ret = -EPERM;
D
David Sterba 已提交
1675
		goto out_finish;
L
Liu Bo 已提交
1676 1677
	}

C
Christoph Hellwig 已提交
1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	if (!strcmp(sizestr, "max"))
		new_size = device->bdev->bd_inode->i_size;
	else {
		if (sizestr[0] == '-') {
			mod = -1;
			sizestr++;
		} else if (sizestr[0] == '+') {
			mod = 1;
			sizestr++;
		}
1688 1689
		new_size = memparse(sizestr, &retptr);
		if (*retptr != '\0' || new_size == 0) {
C
Christoph Hellwig 已提交
1690
			ret = -EINVAL;
D
David Sterba 已提交
1691
			goto out_finish;
C
Christoph Hellwig 已提交
1692 1693 1694
		}
	}

1695
	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1696
		ret = -EPERM;
D
David Sterba 已提交
1697
		goto out_finish;
1698 1699
	}

1700
	old_size = btrfs_device_get_total_bytes(device);
C
Christoph Hellwig 已提交
1701 1702 1703 1704

	if (mod < 0) {
		if (new_size > old_size) {
			ret = -EINVAL;
D
David Sterba 已提交
1705
			goto out_finish;
C
Christoph Hellwig 已提交
1706 1707 1708
		}
		new_size = old_size - new_size;
	} else if (mod > 0) {
1709
		if (new_size > ULLONG_MAX - old_size) {
1710
			ret = -ERANGE;
D
David Sterba 已提交
1711
			goto out_finish;
1712
		}
C
Christoph Hellwig 已提交
1713 1714 1715
		new_size = old_size + new_size;
	}

1716
	if (new_size < SZ_256M) {
C
Christoph Hellwig 已提交
1717
		ret = -EINVAL;
D
David Sterba 已提交
1718
		goto out_finish;
C
Christoph Hellwig 已提交
1719 1720 1721
	}
	if (new_size > device->bdev->bd_inode->i_size) {
		ret = -EFBIG;
D
David Sterba 已提交
1722
		goto out_finish;
C
Christoph Hellwig 已提交
1723 1724
	}

1725
	new_size = round_down(new_size, fs_info->sectorsize);
C
Christoph Hellwig 已提交
1726 1727

	if (new_size > old_size) {
1728
		trans = btrfs_start_transaction(root, 0);
1729 1730
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
D
David Sterba 已提交
1731
			goto out_finish;
1732
		}
C
Christoph Hellwig 已提交
1733
		ret = btrfs_grow_device(trans, device, new_size);
1734
		btrfs_commit_transaction(trans);
1735
	} else if (new_size < old_size) {
C
Christoph Hellwig 已提交
1736
		ret = btrfs_shrink_device(device, new_size);
1737
	} /* equal, nothing need to do */
C
Christoph Hellwig 已提交
1738

1739 1740 1741 1742 1743
	if (ret == 0 && new_size != old_size)
		btrfs_info_in_rcu(fs_info,
			"resize device %s (devid %llu) from %llu to %llu",
			rcu_str_deref(device->name), device->devid,
			old_size, new_size);
D
David Sterba 已提交
1744 1745
out_finish:
	btrfs_exclop_finish(fs_info);
1746
out_free:
C
Christoph Hellwig 已提交
1747
	kfree(vol_args);
D
David Sterba 已提交
1748
out_drop:
1749
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
1750 1751 1752
	return ret;
}

1753
static noinline int __btrfs_ioctl_snap_create(struct file *file,
1754
				struct user_namespace *mnt_userns,
1755
				const char *name, unsigned long fd, int subvol,
1756
				bool readonly,
1757
				struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
1758 1759
{
	int namelen;
1760
	int ret = 0;
C
Christoph Hellwig 已提交
1761

1762 1763 1764
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1765 1766 1767 1768
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;

S
Sage Weil 已提交
1769 1770
	namelen = strlen(name);
	if (strchr(name, '/')) {
C
Christoph Hellwig 已提交
1771
		ret = -EINVAL;
1772
		goto out_drop_write;
C
Christoph Hellwig 已提交
1773 1774
	}

1775 1776 1777
	if (name[0] == '.' &&
	   (namelen == 1 || (name[1] == '.' && namelen == 2))) {
		ret = -EEXIST;
1778
		goto out_drop_write;
1779 1780
	}

1781
	if (subvol) {
1782 1783
		ret = btrfs_mksubvol(&file->f_path, mnt_userns, name,
				     namelen, NULL, readonly, inherit);
1784
	} else {
1785
		struct fd src = fdget(fd);
1786
		struct inode *src_inode;
1787
		if (!src.file) {
1788
			ret = -EINVAL;
1789
			goto out_drop_write;
1790 1791
		}

A
Al Viro 已提交
1792 1793
		src_inode = file_inode(src.file);
		if (src_inode->i_sb != file_inode(file)->i_sb) {
J
Josef Bacik 已提交
1794
			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1795
				   "Snapshot src from another FS");
1796
			ret = -EXDEV;
1797
		} else if (!inode_owner_or_capable(mnt_userns, src_inode)) {
1798 1799 1800 1801 1802
			/*
			 * Subvolume creation is not restricted, but snapshots
			 * are limited to own subvolumes only
			 */
			ret = -EPERM;
1803
		} else {
1804 1805 1806 1807
			ret = btrfs_mksnapshot(&file->f_path, mnt_userns,
					       name, namelen,
					       BTRFS_I(src_inode)->root,
					       readonly, inherit);
1808
		}
1809
		fdput(src);
1810
	}
1811 1812
out_drop_write:
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
1813
out:
S
Sage Weil 已提交
1814 1815 1816 1817
	return ret;
}

static noinline int btrfs_ioctl_snap_create(struct file *file,
1818
					    void __user *arg, int subvol)
S
Sage Weil 已提交
1819
{
1820
	struct btrfs_ioctl_vol_args *vol_args;
S
Sage Weil 已提交
1821 1822
	int ret;

1823 1824 1825
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1826 1827 1828 1829
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
S
Sage Weil 已提交
1830

1831 1832 1833
	ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
					vol_args->name, vol_args->fd, subvol,
					false, NULL);
1834

1835 1836 1837
	kfree(vol_args);
	return ret;
}
1838

1839 1840 1841 1842 1843
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
					       void __user *arg, int subvol)
{
	struct btrfs_ioctl_vol_args_v2 *vol_args;
	int ret;
L
Li Zefan 已提交
1844
	bool readonly = false;
A
Arne Jansen 已提交
1845
	struct btrfs_qgroup_inherit *inherit = NULL;
1846

1847 1848 1849
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1850 1851 1852 1853
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1854

1855
	if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ARGS_MASK) {
L
Li Zefan 已提交
1856
		ret = -EOPNOTSUPP;
D
Dan Carpenter 已提交
1857
		goto free_args;
S
Sage Weil 已提交
1858
	}
1859

L
Li Zefan 已提交
1860 1861
	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
		readonly = true;
A
Arne Jansen 已提交
1862
	if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1863 1864 1865 1866
		u64 nums;

		if (vol_args->size < sizeof(*inherit) ||
		    vol_args->size > PAGE_SIZE) {
A
Arne Jansen 已提交
1867
			ret = -EINVAL;
D
Dan Carpenter 已提交
1868
			goto free_args;
A
Arne Jansen 已提交
1869 1870 1871 1872
		}
		inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
		if (IS_ERR(inherit)) {
			ret = PTR_ERR(inherit);
D
Dan Carpenter 已提交
1873
			goto free_args;
A
Arne Jansen 已提交
1874
		}
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888

		if (inherit->num_qgroups > PAGE_SIZE ||
		    inherit->num_ref_copies > PAGE_SIZE ||
		    inherit->num_excl_copies > PAGE_SIZE) {
			ret = -EINVAL;
			goto free_inherit;
		}

		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
		       2 * inherit->num_excl_copies;
		if (vol_args->size != struct_size(inherit, qgroups, nums)) {
			ret = -EINVAL;
			goto free_inherit;
		}
A
Arne Jansen 已提交
1889
	}
1890

1891 1892 1893
	ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
					vol_args->name, vol_args->fd, subvol,
					readonly, inherit);
D
Dan Carpenter 已提交
1894 1895 1896
	if (ret)
		goto free_inherit;
free_inherit:
A
Arne Jansen 已提交
1897
	kfree(inherit);
D
Dan Carpenter 已提交
1898 1899
free_args:
	kfree(vol_args);
C
Christoph Hellwig 已提交
1900 1901 1902
	return ret;
}

1903 1904 1905
static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
						void __user *arg)
{
A
Al Viro 已提交
1906
	struct inode *inode = file_inode(file);
1907
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1908 1909 1910 1911
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
	u64 flags = 0;

1912
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
1913 1914
		return -EINVAL;

1915
	down_read(&fs_info->subvol_sem);
1916 1917
	if (btrfs_root_readonly(root))
		flags |= BTRFS_SUBVOL_RDONLY;
1918
	up_read(&fs_info->subvol_sem);
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928

	if (copy_to_user(arg, &flags, sizeof(flags)))
		ret = -EFAULT;

	return ret;
}

static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
					      void __user *arg)
{
A
Al Viro 已提交
1929
	struct inode *inode = file_inode(file);
1930
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1931 1932 1933 1934 1935 1936
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	u64 root_flags;
	u64 flags;
	int ret = 0;

1937
	if (!inode_owner_or_capable(file_mnt_user_ns(file), inode))
1938 1939
		return -EPERM;

1940 1941 1942
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;
1943

1944
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
1945 1946 1947
		ret = -EINVAL;
		goto out_drop_write;
	}
1948

1949 1950 1951 1952
	if (copy_from_user(&flags, arg, sizeof(flags))) {
		ret = -EFAULT;
		goto out_drop_write;
	}
1953

1954 1955 1956 1957
	if (flags & ~BTRFS_SUBVOL_RDONLY) {
		ret = -EOPNOTSUPP;
		goto out_drop_write;
	}
1958

1959
	down_write(&fs_info->subvol_sem);
1960 1961 1962

	/* nothing to do */
	if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1963
		goto out_drop_sem;
1964 1965

	root_flags = btrfs_root_flags(&root->root_item);
1966
	if (flags & BTRFS_SUBVOL_RDONLY) {
1967 1968
		btrfs_set_root_flags(&root->root_item,
				     root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1969 1970 1971 1972 1973 1974 1975 1976
	} else {
		/*
		 * Block RO -> RW transition if this subvolume is involved in
		 * send
		 */
		spin_lock(&root->root_item_lock);
		if (root->send_in_progress == 0) {
			btrfs_set_root_flags(&root->root_item,
1977
				     root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1978 1979 1980
			spin_unlock(&root->root_item_lock);
		} else {
			spin_unlock(&root->root_item_lock);
1981 1982 1983
			btrfs_warn(fs_info,
				   "Attempt to set subvolume %llu read-write during send",
				   root->root_key.objectid);
1984 1985 1986 1987
			ret = -EPERM;
			goto out_drop_sem;
		}
	}
1988 1989 1990 1991 1992 1993 1994

	trans = btrfs_start_transaction(root, 1);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_reset;
	}

1995
	ret = btrfs_update_root(trans, fs_info->tree_root,
1996
				&root->root_key, &root->root_item);
1997 1998 1999 2000 2001 2002
	if (ret < 0) {
		btrfs_end_transaction(trans);
		goto out_reset;
	}

	ret = btrfs_commit_transaction(trans);
2003 2004 2005 2006

out_reset:
	if (ret)
		btrfs_set_root_flags(&root->root_item, root_flags);
2007
out_drop_sem:
2008
	up_write(&fs_info->subvol_sem);
2009 2010 2011
out_drop_write:
	mnt_drop_write_file(file);
out:
2012 2013 2014
	return ret;
}

2015 2016 2017
static noinline int key_in_sk(struct btrfs_key *key,
			      struct btrfs_ioctl_search_key *sk)
{
2018 2019 2020 2021 2022 2023 2024 2025 2026
	struct btrfs_key test;
	int ret;

	test.objectid = sk->min_objectid;
	test.type = sk->min_type;
	test.offset = sk->min_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret < 0)
2027
		return 0;
2028 2029 2030 2031 2032 2033 2034

	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret > 0)
2035 2036 2037 2038
		return 0;
	return 1;
}

2039
static noinline int copy_to_sk(struct btrfs_path *path,
2040 2041
			       struct btrfs_key *key,
			       struct btrfs_ioctl_search_key *sk,
2042
			       size_t *buf_size,
2043
			       char __user *ubuf,
2044 2045 2046 2047 2048 2049
			       unsigned long *sk_offset,
			       int *num_found)
{
	u64 found_transid;
	struct extent_buffer *leaf;
	struct btrfs_ioctl_search_header sh;
2050
	struct btrfs_key test;
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
	unsigned long item_off;
	unsigned long item_len;
	int nritems;
	int i;
	int slot;
	int ret = 0;

	leaf = path->nodes[0];
	slot = path->slots[0];
	nritems = btrfs_header_nritems(leaf);

	if (btrfs_header_generation(leaf) > sk->max_transid) {
		i = nritems;
		goto advance_key;
	}
	found_transid = btrfs_header_generation(leaf);

	for (i = slot; i < nritems; i++) {
		item_off = btrfs_item_ptr_offset(leaf, i);
		item_len = btrfs_item_size_nr(leaf, i);

2072 2073 2074 2075
		btrfs_item_key_to_cpu(leaf, key, i);
		if (!key_in_sk(key, sk))
			continue;

2076
		if (sizeof(sh) + item_len > *buf_size) {
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
			if (*num_found) {
				ret = 1;
				goto out;
			}

			/*
			 * return one empty item back for v1, which does not
			 * handle -EOVERFLOW
			 */

2087
			*buf_size = sizeof(sh) + item_len;
2088
			item_len = 0;
2089 2090
			ret = -EOVERFLOW;
		}
2091

2092
		if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2093
			ret = 1;
2094
			goto out;
2095 2096 2097 2098 2099 2100 2101 2102
		}

		sh.objectid = key->objectid;
		sh.offset = key->offset;
		sh.type = key->type;
		sh.len = item_len;
		sh.transid = found_transid;

2103 2104 2105 2106 2107 2108 2109 2110
		/*
		 * Copy search result header. If we fault then loop again so we
		 * can fault in the pages and -EFAULT there if there's a
		 * problem. Otherwise we'll fault and then copy the buffer in
		 * properly this next time through
		 */
		if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) {
			ret = 0;
2111 2112 2113
			goto out;
		}

2114 2115 2116
		*sk_offset += sizeof(sh);

		if (item_len) {
2117
			char __user *up = ubuf + *sk_offset;
2118 2119 2120 2121 2122 2123 2124 2125
			/*
			 * Copy the item, same behavior as above, but reset the
			 * * sk_offset so we copy the full thing again.
			 */
			if (read_extent_buffer_to_user_nofault(leaf, up,
						item_off, item_len)) {
				ret = 0;
				*sk_offset -= sizeof(sh);
2126 2127 2128
				goto out;
			}

2129 2130
			*sk_offset += item_len;
		}
2131
		(*num_found)++;
2132

2133 2134 2135
		if (ret) /* -EOVERFLOW from above */
			goto out;

2136 2137 2138 2139
		if (*num_found >= sk->nr_items) {
			ret = 1;
			goto out;
		}
2140 2141
	}
advance_key:
2142
	ret = 0;
2143 2144 2145 2146 2147 2148
	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;
	if (btrfs_comp_cpu_keys(key, &test) >= 0)
		ret = 1;
	else if (key->offset < (u64)-1)
2149
		key->offset++;
2150
	else if (key->type < (u8)-1) {
2151
		key->offset = 0;
2152
		key->type++;
2153
	} else if (key->objectid < (u64)-1) {
2154 2155
		key->offset = 0;
		key->type = 0;
2156
		key->objectid++;
2157 2158
	} else
		ret = 1;
2159
out:
2160 2161 2162 2163 2164 2165 2166 2167 2168
	/*
	 *  0: all items from this leaf copied, continue with next
	 *  1: * more items can be copied, but unused buffer is too small
	 *     * all items were found
	 *     Either way, it will stops the loop which iterates to the next
	 *     leaf
	 *  -EOVERFLOW: item was to large for buffer
	 *  -EFAULT: could not copy extent buffer back to userspace
	 */
2169 2170 2171 2172
	return ret;
}

static noinline int search_ioctl(struct inode *inode,
2173
				 struct btrfs_ioctl_search_key *sk,
2174
				 size_t *buf_size,
2175
				 char __user *ubuf)
2176
{
2177
	struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2178 2179 2180 2181 2182 2183 2184
	struct btrfs_root *root;
	struct btrfs_key key;
	struct btrfs_path *path;
	int ret;
	int num_found = 0;
	unsigned long sk_offset = 0;

2185 2186
	if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
		*buf_size = sizeof(struct btrfs_ioctl_search_header);
2187
		return -EOVERFLOW;
2188
	}
2189

2190 2191 2192 2193 2194 2195
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	if (sk->tree_id == 0) {
		/* search the root of the inode that was passed */
2196
		root = btrfs_grab_root(BTRFS_I(inode)->root);
2197
	} else {
D
David Sterba 已提交
2198
		root = btrfs_get_fs_root(info, sk->tree_id, true);
2199 2200
		if (IS_ERR(root)) {
			btrfs_free_path(path);
2201
			return PTR_ERR(root);
2202 2203 2204 2205 2206 2207 2208
		}
	}

	key.objectid = sk->min_objectid;
	key.type = sk->min_type;
	key.offset = sk->min_offset;

2209
	while (1) {
2210 2211
		ret = fault_in_pages_writeable(ubuf + sk_offset,
					       *buf_size - sk_offset);
2212 2213 2214
		if (ret)
			break;

2215
		ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2216 2217 2218 2219 2220
		if (ret != 0) {
			if (ret > 0)
				ret = 0;
			goto err;
		}
2221
		ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2222
				 &sk_offset, &num_found);
2223
		btrfs_release_path(path);
2224
		if (ret)
2225 2226 2227
			break;

	}
2228 2229
	if (ret > 0)
		ret = 0;
2230 2231
err:
	sk->nr_items = num_found;
2232
	btrfs_put_root(root);
2233 2234 2235 2236 2237 2238 2239
	btrfs_free_path(path);
	return ret;
}

static noinline int btrfs_ioctl_tree_search(struct file *file,
					   void __user *argp)
{
2240 2241
	struct btrfs_ioctl_search_args __user *uargs;
	struct btrfs_ioctl_search_key sk;
2242 2243 2244
	struct inode *inode;
	int ret;
	size_t buf_size;
2245 2246 2247 2248

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2249 2250 2251 2252
	uargs = (struct btrfs_ioctl_search_args __user *)argp;

	if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
		return -EFAULT;
2253

2254
	buf_size = sizeof(uargs->buf);
2255

A
Al Viro 已提交
2256
	inode = file_inode(file);
2257
	ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2258 2259 2260 2261 2262 2263 2264 2265

	/*
	 * In the origin implementation an overflow is handled by returning a
	 * search header with a len of zero, so reset ret.
	 */
	if (ret == -EOVERFLOW)
		ret = 0;

2266
	if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2267 2268 2269 2270
		ret = -EFAULT;
	return ret;
}

G
Gerhard Heift 已提交
2271 2272 2273 2274 2275 2276 2277 2278
static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
					       void __user *argp)
{
	struct btrfs_ioctl_search_args_v2 __user *uarg;
	struct btrfs_ioctl_search_args_v2 args;
	struct inode *inode;
	int ret;
	size_t buf_size;
2279
	const size_t buf_limit = SZ_16M;
G
Gerhard Heift 已提交
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	/* copy search header and buffer size */
	uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
	if (copy_from_user(&args, uarg, sizeof(args)))
		return -EFAULT;

	buf_size = args.buf_size;

	/* limit result size to 16MB */
	if (buf_size > buf_limit)
		buf_size = buf_limit;

	inode = file_inode(file);
	ret = search_ioctl(inode, &args.key, &buf_size,
2297
			   (char __user *)(&uarg->buf[0]));
G
Gerhard Heift 已提交
2298 2299 2300 2301 2302 2303
	if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
		ret = -EFAULT;
	else if (ret == -EOVERFLOW &&
		copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
		ret = -EFAULT;

2304 2305 2306
	return ret;
}

2307
/*
2308 2309 2310
 * Search INODE_REFs to identify path name of 'dirid' directory
 * in a 'tree_id' tree. and sets path name to 'name'.
 */
2311 2312 2313 2314 2315
static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
				u64 tree_id, u64 dirid, char *name)
{
	struct btrfs_root *root;
	struct btrfs_key key;
2316
	char *ptr;
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
	int ret = -1;
	int slot;
	int len;
	int total_len = 0;
	struct btrfs_inode_ref *iref;
	struct extent_buffer *l;
	struct btrfs_path *path;

	if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
		name[0]='\0';
		return 0;
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2334
	ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2335

D
David Sterba 已提交
2336
	root = btrfs_get_fs_root(info, tree_id, true);
2337
	if (IS_ERR(root)) {
2338
		ret = PTR_ERR(root);
2339 2340 2341
		root = NULL;
		goto out;
	}
2342 2343 2344

	key.objectid = dirid;
	key.type = BTRFS_INODE_REF_KEY;
2345
	key.offset = (u64)-1;
2346

2347
	while (1) {
2348
		ret = btrfs_search_backwards(root, &key, path);
2349 2350
		if (ret < 0)
			goto out;
2351
		else if (ret > 0) {
2352 2353
			ret = -ENOENT;
			goto out;
2354
		}
2355 2356 2357 2358 2359 2360 2361 2362

		l = path->nodes[0];
		slot = path->slots[0];

		iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
		len = btrfs_inode_ref_name_len(l, iref);
		ptr -= len + 1;
		total_len += len + 1;
2363 2364
		if (ptr < name) {
			ret = -ENAMETOOLONG;
2365
			goto out;
2366
		}
2367 2368

		*(ptr + len) = '/';
2369
		read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2370 2371 2372 2373

		if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
			break;

2374
		btrfs_release_path(path);
2375
		key.objectid = key.offset;
2376
		key.offset = (u64)-1;
2377 2378
		dirid = key.objectid;
	}
2379
	memmove(name, ptr, total_len);
2380
	name[total_len] = '\0';
2381 2382
	ret = 0;
out:
2383
	btrfs_put_root(root);
2384
	btrfs_free_path(path);
2385 2386 2387
	return ret;
}

2388 2389
static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
				struct inode *inode,
2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
				struct btrfs_ioctl_ino_lookup_user_args *args)
{
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
	struct super_block *sb = inode->i_sb;
	struct btrfs_key upper_limit = BTRFS_I(inode)->location;
	u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
	u64 dirid = args->dirid;
	unsigned long item_off;
	unsigned long item_len;
	struct btrfs_inode_ref *iref;
	struct btrfs_root_ref *rref;
2401
	struct btrfs_root *root = NULL;
2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
	struct btrfs_path *path;
	struct btrfs_key key, key2;
	struct extent_buffer *leaf;
	struct inode *temp_inode;
	char *ptr;
	int slot;
	int len;
	int total_len = 0;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	/*
	 * If the bottom subvolume does not exist directly under upper_limit,
	 * construct the path in from the bottom up.
	 */
	if (dirid != upper_limit.objectid) {
		ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];

D
David Sterba 已提交
2423
		root = btrfs_get_fs_root(fs_info, treeid, true);
2424 2425 2426 2427 2428 2429 2430 2431 2432
		if (IS_ERR(root)) {
			ret = PTR_ERR(root);
			goto out;
		}

		key.objectid = dirid;
		key.type = BTRFS_INODE_REF_KEY;
		key.offset = (u64)-1;
		while (1) {
2433 2434 2435 2436 2437
			ret = btrfs_search_backwards(root, &key, path);
			if (ret < 0)
				goto out_put;
			else if (ret > 0) {
				ret = -ENOENT;
2438
				goto out_put;
2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449
			}

			leaf = path->nodes[0];
			slot = path->slots[0];

			iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
			len = btrfs_inode_ref_name_len(leaf, iref);
			ptr -= len + 1;
			total_len += len + 1;
			if (ptr < args->path) {
				ret = -ENAMETOOLONG;
2450
				goto out_put;
2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
			}

			*(ptr + len) = '/';
			read_extent_buffer(leaf, ptr,
					(unsigned long)(iref + 1), len);

			/* Check the read+exec permission of this directory */
			ret = btrfs_previous_item(root, path, dirid,
						  BTRFS_INODE_ITEM_KEY);
			if (ret < 0) {
2461
				goto out_put;
2462 2463
			} else if (ret > 0) {
				ret = -ENOENT;
2464
				goto out_put;
2465 2466 2467 2468 2469 2470 2471
			}

			leaf = path->nodes[0];
			slot = path->slots[0];
			btrfs_item_key_to_cpu(leaf, &key2, slot);
			if (key2.objectid != dirid) {
				ret = -ENOENT;
2472
				goto out_put;
2473 2474
			}

D
David Sterba 已提交
2475
			temp_inode = btrfs_iget(sb, key2.objectid, root);
2476 2477
			if (IS_ERR(temp_inode)) {
				ret = PTR_ERR(temp_inode);
2478
				goto out_put;
2479
			}
2480
			ret = inode_permission(mnt_userns, temp_inode,
2481
					       MAY_READ | MAY_EXEC);
2482 2483 2484
			iput(temp_inode);
			if (ret) {
				ret = -EACCES;
2485
				goto out_put;
2486 2487 2488 2489 2490 2491
			}

			if (key.offset == upper_limit.objectid)
				break;
			if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
				ret = -EACCES;
2492
				goto out_put;
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502
			}

			btrfs_release_path(path);
			key.objectid = key.offset;
			key.offset = (u64)-1;
			dirid = key.objectid;
		}

		memmove(args->path, ptr, total_len);
		args->path[total_len] = '\0';
2503
		btrfs_put_root(root);
2504
		root = NULL;
2505 2506 2507 2508 2509 2510 2511
		btrfs_release_path(path);
	}

	/* Get the bottom subvolume's name from ROOT_REF */
	key.objectid = treeid;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = args->treeid;
2512
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
	if (ret < 0) {
		goto out;
	} else if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	slot = path->slots[0];
	btrfs_item_key_to_cpu(leaf, &key, slot);

	item_off = btrfs_item_ptr_offset(leaf, slot);
	item_len = btrfs_item_size_nr(leaf, slot);
	/* Check if dirid in ROOT_REF corresponds to passed dirid */
	rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
	if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
		ret = -EINVAL;
		goto out;
	}

	/* Copy subvolume's name */
	item_off += sizeof(struct btrfs_root_ref);
	item_len -= sizeof(struct btrfs_root_ref);
	read_extent_buffer(leaf, args->name, item_off, item_len);
	args->name[item_len] = 0;

2539
out_put:
2540
	btrfs_put_root(root);
2541 2542 2543 2544 2545
out:
	btrfs_free_path(path);
	return ret;
}

2546 2547 2548
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
					   void __user *argp)
{
2549 2550
	struct btrfs_ioctl_ino_lookup_args *args;
	struct inode *inode;
2551
	int ret = 0;
2552

J
Julia Lawall 已提交
2553 2554 2555
	args = memdup_user(argp, sizeof(*args));
	if (IS_ERR(args))
		return PTR_ERR(args);
2556

A
Al Viro 已提交
2557
	inode = file_inode(file);
2558

2559 2560 2561 2562
	/*
	 * Unprivileged query to obtain the containing subvolume root id. The
	 * path is reset so it's consistent with btrfs_search_path_in_tree.
	 */
2563 2564 2565
	if (args->treeid == 0)
		args->treeid = BTRFS_I(inode)->root->root_key.objectid;

2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
	if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
		args->name[0] = 0;
		goto out;
	}

	if (!capable(CAP_SYS_ADMIN)) {
		ret = -EPERM;
		goto out;
	}

2576 2577 2578 2579
	ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
					args->treeid, args->objectid,
					args->name);

2580
out:
2581 2582 2583 2584
	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
		ret = -EFAULT;

	kfree(args);
2585 2586 2587
	return ret;
}

2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
/*
 * Version of ino_lookup ioctl (unprivileged)
 *
 * The main differences from ino_lookup ioctl are:
 *
 *   1. Read + Exec permission will be checked using inode_permission() during
 *      path construction. -EACCES will be returned in case of failure.
 *   2. Path construction will be stopped at the inode number which corresponds
 *      to the fd with which this ioctl is called. If constructed path does not
 *      exist under fd's inode, -EACCES will be returned.
 *   3. The name of bottom subvolume is also searched and filled.
 */
static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_ino_lookup_user_args *args;
	struct inode *inode;
	int ret;

	args = memdup_user(argp, sizeof(*args));
	if (IS_ERR(args))
		return PTR_ERR(args);

	inode = file_inode(file);

	if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
	    BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
		/*
		 * The subvolume does not exist under fd with which this is
		 * called
		 */
		kfree(args);
		return -EACCES;
	}

2622
	ret = btrfs_search_path_in_tree_user(file_mnt_user_ns(file), inode, args);
2623 2624 2625 2626 2627 2628 2629 2630

	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
		ret = -EFAULT;

	kfree(args);
	return ret;
}

2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662
/* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_get_subvol_info_args *subvol_info;
	struct btrfs_fs_info *fs_info;
	struct btrfs_root *root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_root_item *root_item;
	struct btrfs_root_ref *rref;
	struct extent_buffer *leaf;
	unsigned long item_off;
	unsigned long item_len;
	struct inode *inode;
	int slot;
	int ret = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
	if (!subvol_info) {
		btrfs_free_path(path);
		return -ENOMEM;
	}

	inode = file_inode(file);
	fs_info = BTRFS_I(inode)->root->fs_info;

	/* Get root_item of inode's subvolume */
	key.objectid = BTRFS_I(inode)->root->root_key.objectid;
D
David Sterba 已提交
2663
	root = btrfs_get_fs_root(fs_info, key.objectid, true);
2664 2665
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
2666 2667
		goto out_free;
	}
2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700
	root_item = &root->root_item;

	subvol_info->treeid = key.objectid;

	subvol_info->generation = btrfs_root_generation(root_item);
	subvol_info->flags = btrfs_root_flags(root_item);

	memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
	memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
						    BTRFS_UUID_SIZE);
	memcpy(subvol_info->received_uuid, root_item->received_uuid,
						    BTRFS_UUID_SIZE);

	subvol_info->ctransid = btrfs_root_ctransid(root_item);
	subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
	subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);

	subvol_info->otransid = btrfs_root_otransid(root_item);
	subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
	subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);

	subvol_info->stransid = btrfs_root_stransid(root_item);
	subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
	subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);

	subvol_info->rtransid = btrfs_root_rtransid(root_item);
	subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
	subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);

	if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
		/* Search root tree for ROOT_BACKREF of this subvolume */
		key.type = BTRFS_ROOT_BACKREF_KEY;
		key.offset = 0;
2701
		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2702 2703 2704 2705
		if (ret < 0) {
			goto out;
		} else if (path->slots[0] >=
			   btrfs_header_nritems(path->nodes[0])) {
2706
			ret = btrfs_next_leaf(fs_info->tree_root, path);
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
			if (ret < 0) {
				goto out;
			} else if (ret > 0) {
				ret = -EUCLEAN;
				goto out;
			}
		}

		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == subvol_info->treeid &&
		    key.type == BTRFS_ROOT_BACKREF_KEY) {
			subvol_info->parent_id = key.offset;

			rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
			subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);

			item_off = btrfs_item_ptr_offset(leaf, slot)
					+ sizeof(struct btrfs_root_ref);
			item_len = btrfs_item_size_nr(leaf, slot)
					- sizeof(struct btrfs_root_ref);
			read_extent_buffer(leaf, subvol_info->name,
					   item_off, item_len);
		} else {
			ret = -ENOENT;
			goto out;
		}
	}

	if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
		ret = -EFAULT;

out:
2741
	btrfs_put_root(root);
2742
out_free:
2743
	btrfs_free_path(path);
2744
	kfree(subvol_info);
2745 2746 2747
	return ret;
}

2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
/*
 * Return ROOT_REF information of the subvolume containing this inode
 * except the subvolume name.
 */
static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
	struct btrfs_root_ref *rref;
	struct btrfs_root *root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct inode *inode;
	u64 objectid;
	int slot;
	int ret;
	u8 found;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	rootrefs = memdup_user(argp, sizeof(*rootrefs));
	if (IS_ERR(rootrefs)) {
		btrfs_free_path(path);
		return PTR_ERR(rootrefs);
	}

	inode = file_inode(file);
	root = BTRFS_I(inode)->root->fs_info->tree_root;
	objectid = BTRFS_I(inode)->root->root_key.objectid;

	key.objectid = objectid;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = rootrefs->min_treeid;
	found = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0) {
		goto out;
	} else if (path->slots[0] >=
		   btrfs_header_nritems(path->nodes[0])) {
		ret = btrfs_next_leaf(root, path);
		if (ret < 0) {
			goto out;
		} else if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}
	while (1) {
		leaf = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
			ret = 0;
			goto out;
		}

		if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
			ret = -EOVERFLOW;
			goto out;
		}

		rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
		rootrefs->rootref[found].treeid = key.offset;
		rootrefs->rootref[found].dirid =
				  btrfs_root_ref_dirid(leaf, rref);
		found++;

		ret = btrfs_next_item(root, path);
		if (ret < 0) {
			goto out;
		} else if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}

out:
	if (!ret || ret == -EOVERFLOW) {
		rootrefs->num_items = found;
		/* update min_treeid for next search */
		if (found)
			rootrefs->min_treeid =
				rootrefs->rootref[found - 1].treeid + 1;
		if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
			ret = -EFAULT;
	}

	kfree(rootrefs);
	btrfs_free_path(path);

	return ret;
}

2845
static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2846 2847
					     void __user *arg,
					     bool destroy_v2)
2848
{
A
Al Viro 已提交
2849
	struct dentry *parent = file->f_path.dentry;
2850
	struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2851
	struct dentry *dentry;
2852
	struct inode *dir = d_inode(parent);
2853 2854 2855
	struct inode *inode;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_root *dest = NULL;
2856 2857
	struct btrfs_ioctl_vol_args *vol_args = NULL;
	struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL;
2858
	struct user_namespace *mnt_userns = file_mnt_user_ns(file);
2859 2860
	char *subvol_name, *subvol_name_ptr = NULL;
	int subvol_namelen;
2861
	int err = 0;
2862
	bool destroy_parent = false;
2863

2864 2865 2866 2867
	if (destroy_v2) {
		vol_args2 = memdup_user(arg, sizeof(*vol_args2));
		if (IS_ERR(vol_args2))
			return PTR_ERR(vol_args2);
2868

2869 2870 2871 2872
		if (vol_args2->flags & ~BTRFS_SUBVOL_DELETE_ARGS_MASK) {
			err = -EOPNOTSUPP;
			goto out;
		}
2873

2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885
		/*
		 * If SPEC_BY_ID is not set, we are looking for the subvolume by
		 * name, same as v1 currently does.
		 */
		if (!(vol_args2->flags & BTRFS_SUBVOL_SPEC_BY_ID)) {
			vol_args2->name[BTRFS_SUBVOL_NAME_MAX] = 0;
			subvol_name = vol_args2->name;

			err = mnt_want_write_file(file);
			if (err)
				goto out;
		} else {
2886
			struct inode *old_dir;
2887

2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923
			if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) {
				err = -EINVAL;
				goto out;
			}

			err = mnt_want_write_file(file);
			if (err)
				goto out;

			dentry = btrfs_get_dentry(fs_info->sb,
					BTRFS_FIRST_FREE_OBJECTID,
					vol_args2->subvolid, 0, 0);
			if (IS_ERR(dentry)) {
				err = PTR_ERR(dentry);
				goto out_drop_write;
			}

			/*
			 * Change the default parent since the subvolume being
			 * deleted can be outside of the current mount point.
			 */
			parent = btrfs_get_parent(dentry);

			/*
			 * At this point dentry->d_name can point to '/' if the
			 * subvolume we want to destroy is outsite of the
			 * current mount point, so we need to release the
			 * current dentry and execute the lookup to return a new
			 * one with ->d_name pointing to the
			 * <mount point>/subvol_name.
			 */
			dput(dentry);
			if (IS_ERR(parent)) {
				err = PTR_ERR(parent);
				goto out_drop_write;
			}
2924
			old_dir = dir;
2925 2926 2927 2928 2929 2930 2931 2932 2933 2934
			dir = d_inode(parent);

			/*
			 * If v2 was used with SPEC_BY_ID, a new parent was
			 * allocated since the subvolume can be outside of the
			 * current mount point. Later on we need to release this
			 * new parent dentry.
			 */
			destroy_parent = true;

2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948
			/*
			 * On idmapped mounts, deletion via subvolid is
			 * restricted to subvolumes that are immediate
			 * ancestors of the inode referenced by the file
			 * descriptor in the ioctl. Otherwise the idmapping
			 * could potentially be abused to delete subvolumes
			 * anywhere in the filesystem the user wouldn't be able
			 * to delete without an idmapped mount.
			 */
			if (old_dir != dir && mnt_userns != &init_user_ns) {
				err = -EOPNOTSUPP;
				goto free_parent;
			}

2949 2950 2951 2952 2953 2954
			subvol_name_ptr = btrfs_get_subvol_name_from_objectid(
						fs_info, vol_args2->subvolid);
			if (IS_ERR(subvol_name_ptr)) {
				err = PTR_ERR(subvol_name_ptr);
				goto free_parent;
			}
D
David Sterba 已提交
2955
			/* subvol_name_ptr is already nul terminated */
2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968
			subvol_name = (char *)kbasename(subvol_name_ptr);
		}
	} else {
		vol_args = memdup_user(arg, sizeof(*vol_args));
		if (IS_ERR(vol_args))
			return PTR_ERR(vol_args);

		vol_args->name[BTRFS_PATH_NAME_MAX] = 0;
		subvol_name = vol_args->name;

		err = mnt_want_write_file(file);
		if (err)
			goto out;
2969 2970
	}

2971
	subvol_namelen = strlen(subvol_name);
2972

2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
	if (strchr(subvol_name, '/') ||
	    strncmp(subvol_name, "..", subvol_namelen) == 0) {
		err = -EINVAL;
		goto free_subvol_name;
	}

	if (!S_ISDIR(dir->i_mode)) {
		err = -ENOTDIR;
		goto free_subvol_name;
	}
2983

2984 2985
	err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (err == -EINTR)
2986
		goto free_subvol_name;
2987
	dentry = lookup_one(mnt_userns, subvol_name, parent, subvol_namelen);
2988 2989 2990 2991 2992
	if (IS_ERR(dentry)) {
		err = PTR_ERR(dentry);
		goto out_unlock_dir;
	}

2993
	if (d_really_is_negative(dentry)) {
2994 2995 2996 2997
		err = -ENOENT;
		goto out_dput;
	}

2998
	inode = d_inode(dentry);
2999
	dest = BTRFS_I(inode)->root;
3000
	if (!capable(CAP_SYS_ADMIN)) {
3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014
		/*
		 * Regular user.  Only allow this with a special mount
		 * option, when the user has write+exec access to the
		 * subvol root, and when rmdir(2) would have been
		 * allowed.
		 *
		 * Note that this is _not_ check that the subvol is
		 * empty or doesn't contain data that we wouldn't
		 * otherwise be able to delete.
		 *
		 * Users who want to delete empty subvols should try
		 * rmdir(2).
		 */
		err = -EPERM;
3015
		if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028
			goto out_dput;

		/*
		 * Do not allow deletion if the parent dir is the same
		 * as the dir to be deleted.  That means the ioctl
		 * must be called on the dentry referencing the root
		 * of the subvol, not a random directory contained
		 * within it.
		 */
		err = -EINVAL;
		if (root == dest)
			goto out_dput;

3029
		err = inode_permission(mnt_userns, inode, MAY_WRITE | MAY_EXEC);
3030 3031 3032 3033
		if (err)
			goto out_dput;
	}

3034
	/* check if subvolume may be deleted by a user */
3035
	err = btrfs_may_delete(mnt_userns, dir, dentry, 1);
3036 3037 3038
	if (err)
		goto out_dput;

3039
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
3040 3041 3042 3043
		err = -EINVAL;
		goto out_dput;
	}

3044
	btrfs_inode_lock(inode, 0);
3045
	err = btrfs_delete_subvolume(dir, dentry);
3046
	btrfs_inode_unlock(inode, 0);
3047 3048
	if (!err) {
		fsnotify_rmdir(dir, dentry);
3049
		d_delete(dentry);
3050
	}
3051

3052 3053 3054
out_dput:
	dput(dentry);
out_unlock_dir:
3055
	btrfs_inode_unlock(dir, 0);
3056 3057 3058 3059 3060
free_subvol_name:
	kfree(subvol_name_ptr);
free_parent:
	if (destroy_parent)
		dput(parent);
3061
out_drop_write:
A
Al Viro 已提交
3062
	mnt_drop_write_file(file);
3063
out:
3064
	kfree(vol_args2);
3065 3066 3067 3068
	kfree(vol_args);
	return err;
}

C
Chris Mason 已提交
3069
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
C
Christoph Hellwig 已提交
3070
{
A
Al Viro 已提交
3071
	struct inode *inode = file_inode(file);
C
Christoph Hellwig 已提交
3072
	struct btrfs_root *root = BTRFS_I(inode)->root;
3073
	struct btrfs_ioctl_defrag_range_args range = {0};
Y
Yan Zheng 已提交
3074 3075
	int ret;

3076 3077 3078
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
L
Li Zefan 已提交
3079

3080 3081 3082
	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
3083
	}
C
Christoph Hellwig 已提交
3084 3085 3086

	switch (inode->i_mode & S_IFMT) {
	case S_IFDIR:
3087 3088 3089 3090
		if (!capable(CAP_SYS_ADMIN)) {
			ret = -EPERM;
			goto out;
		}
3091
		ret = btrfs_defrag_root(root);
C
Christoph Hellwig 已提交
3092 3093
		break;
	case S_IFREG:
3094 3095 3096 3097 3098 3099
		/*
		 * Note that this does not check the file descriptor for write
		 * access. This prevents defragmenting executables that are
		 * running and allows defrag on files open in read-only mode.
		 */
		if (!capable(CAP_SYS_ADMIN) &&
3100
		    inode_permission(&init_user_ns, inode, MAY_WRITE)) {
3101
			ret = -EPERM;
3102 3103
			goto out;
		}
C
Chris Mason 已提交
3104 3105

		if (argp) {
3106
			if (copy_from_user(&range, argp, sizeof(range))) {
C
Chris Mason 已提交
3107
				ret = -EFAULT;
3108
				goto out;
C
Chris Mason 已提交
3109 3110
			}
			/* compression requires us to start the IO */
3111 3112 3113
			if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
				range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
				range.extent_thresh = (u32)-1;
C
Chris Mason 已提交
3114 3115 3116
			}
		} else {
			/* the rest are all set to zero by kzalloc */
3117
			range.len = (u64)-1;
C
Chris Mason 已提交
3118
		}
3119
		ret = btrfs_defrag_file(file_inode(file), &file->f_ra,
3120
					&range, BTRFS_OLDEST_GENERATION, 0);
C
Chris Mason 已提交
3121 3122
		if (ret > 0)
			ret = 0;
C
Christoph Hellwig 已提交
3123
		break;
3124 3125
	default:
		ret = -EINVAL;
C
Christoph Hellwig 已提交
3126
	}
3127
out:
3128
	mnt_drop_write_file(file);
3129
	return ret;
C
Christoph Hellwig 已提交
3130 3131
}

3132
static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
C
Christoph Hellwig 已提交
3133 3134 3135 3136
{
	struct btrfs_ioctl_vol_args *vol_args;
	int ret;

3137 3138 3139
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3140
	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD))
3141
		return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3142

L
Li Zefan 已提交
3143
	vol_args = memdup_user(arg, sizeof(*vol_args));
3144 3145 3146 3147
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
		goto out;
	}
C
Christoph Hellwig 已提交
3148

3149
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3150
	ret = btrfs_init_new_device(fs_info, vol_args->name);
C
Christoph Hellwig 已提交
3151

A
Anand Jain 已提交
3152
	if (!ret)
3153
		btrfs_info(fs_info, "disk added %s", vol_args->name);
A
Anand Jain 已提交
3154

C
Christoph Hellwig 已提交
3155
	kfree(vol_args);
3156
out:
3157
	btrfs_exclop_finish(fs_info);
C
Christoph Hellwig 已提交
3158 3159 3160
	return ret;
}

3161
static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
3162
{
3163 3164
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3165
	struct btrfs_ioctl_vol_args_v2 *vol_args;
3166 3167
	struct block_device *bdev = NULL;
	fmode_t mode;
C
Christoph Hellwig 已提交
3168
	int ret;
D
David Sterba 已提交
3169
	bool cancel = false;
C
Christoph Hellwig 已提交
3170

3171 3172 3173
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3174 3175 3176
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3177

L
Li Zefan 已提交
3178
	vol_args = memdup_user(arg, sizeof(*vol_args));
3179 3180
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
Dan Carpenter 已提交
3181
		goto err_drop;
3182
	}
C
Christoph Hellwig 已提交
3183

3184
	if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
3185 3186 3187
		ret = -EOPNOTSUPP;
		goto out;
	}
D
David Sterba 已提交
3188 3189 3190 3191
	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
	if (!(vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) &&
	    strcmp("cancel", vol_args->name) == 0)
		cancel = true;
C
Christoph Hellwig 已提交
3192

D
David Sterba 已提交
3193 3194 3195
	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
					   cancel);
	if (ret)
3196
		goto out;
D
David Sterba 已提交
3197
	/* Exclusive operation is now claimed */
3198

D
David Sterba 已提交
3199
	if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3200
		ret = btrfs_rm_device(fs_info, NULL, vol_args->devid, &bdev, &mode);
D
David Sterba 已提交
3201
	else
3202
		ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
D
David Sterba 已提交
3203

3204
	btrfs_exclop_finish(fs_info);
3205

3206
	if (!ret) {
3207
		if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3208
			btrfs_info(fs_info, "device deleted: id %llu",
3209 3210
					vol_args->devid);
		else
3211
			btrfs_info(fs_info, "device deleted: %s",
3212 3213
					vol_args->name);
	}
3214 3215
out:
	kfree(vol_args);
D
Dan Carpenter 已提交
3216
err_drop:
3217
	mnt_drop_write_file(file);
3218 3219
	if (bdev)
		blkdev_put(bdev, mode);
C
Christoph Hellwig 已提交
3220 3221 3222
	return ret;
}

3223
static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
3224
{
3225 3226
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
3227
	struct btrfs_ioctl_vol_args *vol_args;
3228 3229
	struct block_device *bdev = NULL;
	fmode_t mode;
C
Christoph Hellwig 已提交
3230
	int ret;
D
David Sterba 已提交
3231
	bool cancel;
C
Christoph Hellwig 已提交
3232

3233 3234 3235
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3236 3237 3238
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3239

3240 3241 3242
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
David Sterba 已提交
3243
		goto out_drop_write;
3244
	}
3245
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
D
David Sterba 已提交
3246 3247 3248 3249 3250
	cancel = (strcmp("cancel", vol_args->name) == 0);

	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
					   cancel);
	if (ret == 0) {
3251
		ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
D
David Sterba 已提交
3252 3253 3254 3255
		if (!ret)
			btrfs_info(fs_info, "disk deleted %s", vol_args->name);
		btrfs_exclop_finish(fs_info);
	}
3256 3257

	kfree(vol_args);
3258
out_drop_write:
3259
	mnt_drop_write_file(file);
3260 3261
	if (bdev)
		blkdev_put(bdev, mode);
C
Christoph Hellwig 已提交
3262 3263 3264
	return ret;
}

3265 3266
static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
				void __user *arg)
J
Jan Schmidt 已提交
3267
{
3268
	struct btrfs_ioctl_fs_info_args *fi_args;
J
Jan Schmidt 已提交
3269
	struct btrfs_device *device;
3270
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3271
	u64 flags_in;
3272
	int ret = 0;
J
Jan Schmidt 已提交
3273

3274 3275 3276 3277 3278 3279
	fi_args = memdup_user(arg, sizeof(*fi_args));
	if (IS_ERR(fi_args))
		return PTR_ERR(fi_args);

	flags_in = fi_args->flags;
	memset(fi_args, 0, sizeof(*fi_args));
3280

3281
	rcu_read_lock();
3282
	fi_args->num_devices = fs_devices->num_devices;
J
Jan Schmidt 已提交
3283

3284
	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3285 3286
		if (device->devid > fi_args->max_id)
			fi_args->max_id = device->devid;
J
Jan Schmidt 已提交
3287
	}
3288
	rcu_read_unlock();
J
Jan Schmidt 已提交
3289

3290
	memcpy(&fi_args->fsid, fs_devices->fsid, sizeof(fi_args->fsid));
3291 3292 3293
	fi_args->nodesize = fs_info->nodesize;
	fi_args->sectorsize = fs_info->sectorsize;
	fi_args->clone_alignment = fs_info->sectorsize;
3294

3295 3296 3297 3298 3299 3300
	if (flags_in & BTRFS_FS_INFO_FLAG_CSUM_INFO) {
		fi_args->csum_type = btrfs_super_csum_type(fs_info->super_copy);
		fi_args->csum_size = btrfs_super_csum_size(fs_info->super_copy);
		fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO;
	}

3301 3302 3303 3304 3305
	if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) {
		fi_args->generation = fs_info->generation;
		fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION;
	}

3306 3307 3308 3309 3310 3311
	if (flags_in & BTRFS_FS_INFO_FLAG_METADATA_UUID) {
		memcpy(&fi_args->metadata_uuid, fs_devices->metadata_uuid,
		       sizeof(fi_args->metadata_uuid));
		fi_args->flags |= BTRFS_FS_INFO_FLAG_METADATA_UUID;
	}

3312 3313
	if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
		ret = -EFAULT;
J
Jan Schmidt 已提交
3314

3315 3316
	kfree(fi_args);
	return ret;
J
Jan Schmidt 已提交
3317 3318
}

3319 3320
static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
				 void __user *arg)
J
Jan Schmidt 已提交
3321
{
3322
	BTRFS_DEV_LOOKUP_ARGS(args);
J
Jan Schmidt 已提交
3323 3324 3325 3326 3327 3328 3329 3330
	struct btrfs_ioctl_dev_info_args *di_args;
	struct btrfs_device *dev;
	int ret = 0;

	di_args = memdup_user(arg, sizeof(*di_args));
	if (IS_ERR(di_args))
		return PTR_ERR(di_args);

3331
	args.devid = di_args->devid;
3332
	if (!btrfs_is_empty_uuid(di_args->uuid))
3333
		args.uuid = di_args->uuid;
J
Jan Schmidt 已提交
3334

3335
	rcu_read_lock();
3336
	dev = btrfs_find_device(fs_info->fs_devices, &args);
J
Jan Schmidt 已提交
3337 3338 3339 3340 3341 3342
	if (!dev) {
		ret = -ENODEV;
		goto out;
	}

	di_args->devid = dev->devid;
3343 3344
	di_args->bytes_used = btrfs_device_get_bytes_used(dev);
	di_args->total_bytes = btrfs_device_get_total_bytes(dev);
J
Jan Schmidt 已提交
3345
	memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3346
	if (dev->name) {
3347 3348
		strncpy(di_args->path, rcu_str_deref(dev->name),
				sizeof(di_args->path) - 1);
3349 3350
		di_args->path[sizeof(di_args->path) - 1] = 0;
	} else {
3351
		di_args->path[0] = '\0';
3352
	}
J
Jan Schmidt 已提交
3353 3354

out:
3355
	rcu_read_unlock();
J
Jan Schmidt 已提交
3356 3357 3358 3359 3360 3361 3362
	if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
		ret = -EFAULT;

	kfree(di_args);
	return ret;
}

3363 3364
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
{
A
Al Viro 已提交
3365
	struct inode *inode = file_inode(file);
3366
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3367 3368 3369 3370
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root *new_root;
	struct btrfs_dir_item *di;
	struct btrfs_trans_handle *trans;
3371
	struct btrfs_path *path = NULL;
3372 3373 3374
	struct btrfs_disk_key disk_key;
	u64 objectid = 0;
	u64 dir_id;
3375
	int ret;
3376 3377 3378 3379

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3380 3381 3382 3383 3384 3385 3386 3387
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	if (copy_from_user(&objectid, argp, sizeof(objectid))) {
		ret = -EFAULT;
		goto out;
	}
3388 3389

	if (!objectid)
3390
		objectid = BTRFS_FS_TREE_OBJECTID;
3391

D
David Sterba 已提交
3392
	new_root = btrfs_get_fs_root(fs_info, objectid, true);
3393 3394 3395 3396
	if (IS_ERR(new_root)) {
		ret = PTR_ERR(new_root);
		goto out;
	}
3397 3398 3399 3400
	if (!is_fstree(new_root->root_key.objectid)) {
		ret = -ENOENT;
		goto out_free;
	}
3401 3402

	path = btrfs_alloc_path();
3403 3404
	if (!path) {
		ret = -ENOMEM;
3405
		goto out_free;
3406
	}
3407 3408

	trans = btrfs_start_transaction(root, 1);
3409
	if (IS_ERR(trans)) {
3410
		ret = PTR_ERR(trans);
3411
		goto out_free;
3412 3413
	}

3414 3415
	dir_id = btrfs_super_root_dir(fs_info->super_copy);
	di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
3416
				   dir_id, "default", 7, 1);
3417
	if (IS_ERR_OR_NULL(di)) {
3418
		btrfs_release_path(path);
3419
		btrfs_end_transaction(trans);
3420
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
3421
			  "Umm, you don't have the default diritem, this isn't going to work");
3422
		ret = -ENOENT;
3423
		goto out_free;
3424 3425 3426 3427 3428
	}

	btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
	btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
	btrfs_mark_buffer_dirty(path->nodes[0]);
3429
	btrfs_release_path(path);
3430

3431
	btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
3432
	btrfs_end_transaction(trans);
3433
out_free:
3434
	btrfs_put_root(new_root);
3435
	btrfs_free_path(path);
3436 3437 3438
out:
	mnt_drop_write_file(file);
	return ret;
3439 3440
}

3441 3442
static void get_block_group_info(struct list_head *groups_list,
				 struct btrfs_ioctl_space_info *space)
3443
{
3444
	struct btrfs_block_group *block_group;
3445 3446 3447 3448 3449 3450

	space->total_bytes = 0;
	space->used_bytes = 0;
	space->flags = 0;
	list_for_each_entry(block_group, groups_list, list) {
		space->flags = block_group->flags;
3451
		space->total_bytes += block_group->length;
3452
		space->used_bytes += block_group->used;
3453 3454 3455
	}
}

3456 3457
static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
				   void __user *arg)
J
Josef Bacik 已提交
3458 3459 3460 3461
{
	struct btrfs_ioctl_space_args space_args;
	struct btrfs_ioctl_space_info space;
	struct btrfs_ioctl_space_info *dest;
3462
	struct btrfs_ioctl_space_info *dest_orig;
3463
	struct btrfs_ioctl_space_info __user *user_dest;
J
Josef Bacik 已提交
3464
	struct btrfs_space_info *info;
3465 3466 3467 3468 3469 3470
	static const u64 types[] = {
		BTRFS_BLOCK_GROUP_DATA,
		BTRFS_BLOCK_GROUP_SYSTEM,
		BTRFS_BLOCK_GROUP_METADATA,
		BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
	};
3471
	int num_types = 4;
3472
	int alloc_size;
J
Josef Bacik 已提交
3473
	int ret = 0;
3474
	u64 slot_count = 0;
3475
	int i, c;
J
Josef Bacik 已提交
3476 3477 3478 3479 3480 3481

	if (copy_from_user(&space_args,
			   (struct btrfs_ioctl_space_args __user *)arg,
			   sizeof(space_args)))
		return -EFAULT;

3482 3483 3484 3485
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

		info = NULL;
3486
		list_for_each_entry(tmp, &fs_info->space_info, list) {
3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}

		if (!info)
			continue;

		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c]))
				slot_count++;
		}
		up_read(&info->groups_sem);
	}
3503

3504 3505 3506 3507 3508
	/*
	 * Global block reserve, exported as a space_info
	 */
	slot_count++;

3509 3510 3511 3512 3513
	/* space_slots == 0 means they are asking for a count */
	if (space_args.space_slots == 0) {
		space_args.total_spaces = slot_count;
		goto out;
	}
3514

3515
	slot_count = min_t(u64, space_args.space_slots, slot_count);
3516

3517
	alloc_size = sizeof(*dest) * slot_count;
3518

3519 3520 3521
	/* we generally have at most 6 or so space infos, one for each raid
	 * level.  So, a whole page should be more than enough for everyone
	 */
3522
	if (alloc_size > PAGE_SIZE)
3523 3524
		return -ENOMEM;

J
Josef Bacik 已提交
3525
	space_args.total_spaces = 0;
3526
	dest = kmalloc(alloc_size, GFP_KERNEL);
3527 3528 3529
	if (!dest)
		return -ENOMEM;
	dest_orig = dest;
J
Josef Bacik 已提交
3530

3531
	/* now we have a buffer to copy into */
3532 3533 3534
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

3535 3536 3537
		if (!slot_count)
			break;

3538
		info = NULL;
3539
		list_for_each_entry(tmp, &fs_info->space_info, list) {
3540 3541 3542 3543 3544
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}
3545

3546 3547 3548 3549 3550
		if (!info)
			continue;
		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c])) {
3551 3552
				get_block_group_info(&info->block_groups[c],
						     &space);
3553 3554 3555
				memcpy(dest, &space, sizeof(space));
				dest++;
				space_args.total_spaces++;
3556
				slot_count--;
3557
			}
3558 3559
			if (!slot_count)
				break;
3560 3561
		}
		up_read(&info->groups_sem);
J
Josef Bacik 已提交
3562 3563
	}

3564 3565 3566 3567
	/*
	 * Add global block reserve
	 */
	if (slot_count) {
3568
		struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3569 3570 3571 3572 3573 3574 3575 3576 3577 3578

		spin_lock(&block_rsv->lock);
		space.total_bytes = block_rsv->size;
		space.used_bytes = block_rsv->size - block_rsv->reserved;
		spin_unlock(&block_rsv->lock);
		space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
		memcpy(dest, &space, sizeof(space));
		space_args.total_spaces++;
	}

D
Daniel J Blueman 已提交
3579
	user_dest = (struct btrfs_ioctl_space_info __user *)
3580 3581 3582 3583 3584 3585 3586 3587
		(arg + sizeof(struct btrfs_ioctl_space_args));

	if (copy_to_user(user_dest, dest_orig, alloc_size))
		ret = -EFAULT;

	kfree(dest_orig);
out:
	if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
J
Josef Bacik 已提交
3588 3589 3590 3591 3592
		ret = -EFAULT;

	return ret;
}

3593 3594
static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
					    void __user *argp)
3595 3596 3597
{
	struct btrfs_trans_handle *trans;
	u64 transid;
T
Tsutomu Itoh 已提交
3598
	int ret;
3599

M
Miao Xie 已提交
3600
	trans = btrfs_attach_transaction_barrier(root);
3601 3602 3603 3604 3605 3606 3607 3608
	if (IS_ERR(trans)) {
		if (PTR_ERR(trans) != -ENOENT)
			return PTR_ERR(trans);

		/* No running transaction, don't bother */
		transid = root->fs_info->last_trans_committed;
		goto out;
	}
3609
	transid = trans->transid;
3610
	ret = btrfs_commit_transaction_async(trans);
3611
	if (ret) {
3612
		btrfs_end_transaction(trans);
T
Tsutomu Itoh 已提交
3613
		return ret;
3614
	}
3615
out:
3616 3617 3618 3619 3620 3621
	if (argp)
		if (copy_to_user(argp, &transid, sizeof(transid)))
			return -EFAULT;
	return 0;
}

3622
static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
3623
					   void __user *argp)
3624 3625 3626 3627 3628 3629 3630 3631 3632
{
	u64 transid;

	if (argp) {
		if (copy_from_user(&transid, argp, sizeof(transid)))
			return -EFAULT;
	} else {
		transid = 0;  /* current trans */
	}
3633
	return btrfs_wait_for_commit(fs_info, transid);
3634 3635
}

M
Miao Xie 已提交
3636
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
J
Jan Schmidt 已提交
3637
{
3638
	struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
J
Jan Schmidt 已提交
3639
	struct btrfs_ioctl_scrub_args *sa;
M
Miao Xie 已提交
3640
	int ret;
J
Jan Schmidt 已提交
3641 3642 3643 3644 3645 3646 3647 3648

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

M
Miao Xie 已提交
3649 3650 3651 3652 3653 3654
	if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
		ret = mnt_want_write_file(file);
		if (ret)
			goto out;
	}

3655
	ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
3656 3657
			      &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
			      0);
J
Jan Schmidt 已提交
3658

3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671
	/*
	 * Copy scrub args to user space even if btrfs_scrub_dev() returned an
	 * error. This is important as it allows user space to know how much
	 * progress scrub has done. For example, if scrub is canceled we get
	 * -ECANCELED from btrfs_scrub_dev() and return that error back to user
	 * space. Later user space can inspect the progress from the structure
	 * btrfs_ioctl_scrub_args and resume scrub from where it left off
	 * previously (btrfs-progs does this).
	 * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
	 * then return -EFAULT to signal the structure was not copied or it may
	 * be corrupt and unreliable due to a partial copy.
	 */
	if (copy_to_user(arg, sa, sizeof(*sa)))
J
Jan Schmidt 已提交
3672 3673
		ret = -EFAULT;

M
Miao Xie 已提交
3674 3675 3676
	if (!(sa->flags & BTRFS_SCRUB_READONLY))
		mnt_drop_write_file(file);
out:
J
Jan Schmidt 已提交
3677 3678 3679 3680
	kfree(sa);
	return ret;
}

3681
static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
J
Jan Schmidt 已提交
3682 3683 3684 3685
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3686
	return btrfs_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
3687 3688
}

3689
static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
J
Jan Schmidt 已提交
3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701
				       void __user *arg)
{
	struct btrfs_ioctl_scrub_args *sa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

3702
	ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
J
Jan Schmidt 已提交
3703

3704
	if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
J
Jan Schmidt 已提交
3705 3706 3707 3708 3709 3710
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

3711
static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
3712
				      void __user *arg)
3713 3714 3715 3716 3717 3718 3719 3720
{
	struct btrfs_ioctl_get_dev_stats *sa;
	int ret;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

3721 3722 3723 3724 3725
	if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
		kfree(sa);
		return -EPERM;
	}

3726
	ret = btrfs_get_dev_stats(fs_info, sa);
3727

3728
	if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
3729 3730 3731 3732 3733 3734
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

3735 3736
static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
				    void __user *arg)
3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749
{
	struct btrfs_ioctl_dev_replace_args *p;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	p = memdup_user(arg, sizeof(*p));
	if (IS_ERR(p))
		return PTR_ERR(p);

	switch (p->cmd) {
	case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
3750
		if (sb_rdonly(fs_info->sb)) {
3751 3752 3753
			ret = -EROFS;
			goto out;
		}
3754
		if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) {
3755
			ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3756
		} else {
3757
			ret = btrfs_dev_replace_by_ioctl(fs_info, p);
3758
			btrfs_exclop_finish(fs_info);
3759 3760 3761
		}
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
3762
		btrfs_dev_replace_status(fs_info, p);
3763 3764 3765
		ret = 0;
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
3766
		p->result = btrfs_dev_replace_cancel(fs_info);
3767
		ret = 0;
3768 3769 3770 3771 3772 3773
		break;
	default:
		ret = -EINVAL;
		break;
	}

3774
	if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p)))
3775
		ret = -EFAULT;
3776
out:
3777 3778 3779 3780
	kfree(p);
	return ret;
}

3781 3782 3783 3784
static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
{
	int ret = 0;
	int i;
3785
	u64 rel_ptr;
3786
	int size;
3787
	struct btrfs_ioctl_ino_path_args *ipa = NULL;
3788 3789 3790
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_path *path;

3791
	if (!capable(CAP_DAC_READ_SEARCH))
3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819
		return -EPERM;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	ipa = memdup_user(arg, sizeof(*ipa));
	if (IS_ERR(ipa)) {
		ret = PTR_ERR(ipa);
		ipa = NULL;
		goto out;
	}

	size = min_t(u32, ipa->size, 4096);
	ipath = init_ipath(size, root, path);
	if (IS_ERR(ipath)) {
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto out;
	}

	ret = paths_from_inode(ipa->inum, ipath);
	if (ret < 0)
		goto out;

	for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
3820 3821
		rel_ptr = ipath->fspath->val[i] -
			  (u64)(unsigned long)ipath->fspath->val;
3822
		ipath->fspath->val[i] = rel_ptr;
3823 3824
	}

3825 3826
	ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
			   ipath->fspath, size);
3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859
	if (ret) {
		ret = -EFAULT;
		goto out;
	}

out:
	btrfs_free_path(path);
	free_ipath(ipath);
	kfree(ipa);

	return ret;
}

static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
{
	struct btrfs_data_container *inodes = ctx;
	const size_t c = 3 * sizeof(u64);

	if (inodes->bytes_left >= c) {
		inodes->bytes_left -= c;
		inodes->val[inodes->elem_cnt] = inum;
		inodes->val[inodes->elem_cnt + 1] = offset;
		inodes->val[inodes->elem_cnt + 2] = root;
		inodes->elem_cnt += 3;
	} else {
		inodes->bytes_missing += c - inodes->bytes_left;
		inodes->bytes_left = 0;
		inodes->elem_missed += 3;
	}

	return 0;
}

3860
static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
3861
					void __user *arg, int version)
3862 3863 3864 3865 3866 3867
{
	int ret = 0;
	int size;
	struct btrfs_ioctl_logical_ino_args *loi;
	struct btrfs_data_container *inodes = NULL;
	struct btrfs_path *path = NULL;
3868
	bool ignore_offset;
3869 3870 3871 3872 3873

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	loi = memdup_user(arg, sizeof(*loi));
3874 3875
	if (IS_ERR(loi))
		return PTR_ERR(loi);
3876

3877 3878
	if (version == 1) {
		ignore_offset = false;
3879
		size = min_t(u32, loi->size, SZ_64K);
3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891
	} else {
		/* All reserved bits must be 0 for now */
		if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
			ret = -EINVAL;
			goto out_loi;
		}
		/* Only accept flags we have defined so far */
		if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
			ret = -EINVAL;
			goto out_loi;
		}
		ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
3892
		size = min_t(u32, loi->size, SZ_16M);
3893 3894
	}

3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907
	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	inodes = init_data_container(size);
	if (IS_ERR(inodes)) {
		ret = PTR_ERR(inodes);
		inodes = NULL;
		goto out;
	}

3908
	ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
3909
					  build_ino_list, inodes, ignore_offset);
L
Liu Bo 已提交
3910
	if (ret == -EINVAL)
3911 3912 3913 3914
		ret = -ENOENT;
	if (ret < 0)
		goto out;

3915 3916
	ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
			   size);
3917 3918 3919 3920 3921
	if (ret)
		ret = -EFAULT;

out:
	btrfs_free_path(path);
3922
	kvfree(inodes);
3923
out_loi:
3924 3925 3926 3927 3928
	kfree(loi);

	return ret;
}

3929
void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
3930 3931 3932 3933 3934 3935
			       struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	bargs->flags = bctl->flags;

3936
	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
3937 3938 3939
		bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
	if (atomic_read(&fs_info->balance_pause_req))
		bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
3940 3941
	if (atomic_read(&fs_info->balance_cancel_req))
		bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
3942

3943 3944 3945
	memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
	memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
	memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
3946

3947 3948 3949
	spin_lock(&fs_info->balance_lock);
	memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
	spin_unlock(&fs_info->balance_lock);
3950 3951
}

3952
static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3953
{
A
Al Viro 已提交
3954
	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
3955 3956 3957
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_ioctl_balance_args *bargs;
	struct btrfs_balance_control *bctl;
3958
	bool need_unlock; /* for mut. excl. ops lock */
3959 3960 3961 3962 3963
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3964
	ret = mnt_want_write_file(file);
3965 3966 3967
	if (ret)
		return ret;

3968
again:
3969
	if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
3970 3971 3972 3973 3974 3975
		mutex_lock(&fs_info->balance_mutex);
		need_unlock = true;
		goto locked;
	}

	/*
3976
	 * mut. excl. ops lock is locked.  Three possibilities:
3977 3978 3979 3980
	 *   (1) some other op is running
	 *   (2) balance is running
	 *   (3) balance is paused -- special case (think resume)
	 */
3981
	mutex_lock(&fs_info->balance_mutex);
3982 3983
	if (fs_info->balance_ctl) {
		/* this is either (2) or (3) */
3984
		if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
3985
			mutex_unlock(&fs_info->balance_mutex);
3986 3987 3988 3989
			/*
			 * Lock released to allow other waiters to continue,
			 * we'll reexamine the status again.
			 */
3990 3991 3992
			mutex_lock(&fs_info->balance_mutex);

			if (fs_info->balance_ctl &&
3993
			    !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009
				/* this is (3) */
				need_unlock = false;
				goto locked;
			}

			mutex_unlock(&fs_info->balance_mutex);
			goto again;
		} else {
			/* this is (2) */
			mutex_unlock(&fs_info->balance_mutex);
			ret = -EINPROGRESS;
			goto out;
		}
	} else {
		/* this is (1) */
		mutex_unlock(&fs_info->balance_mutex);
4010
		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4011 4012 4013 4014
		goto out;
	}

locked:
4015 4016 4017 4018 4019

	if (arg) {
		bargs = memdup_user(arg, sizeof(*bargs));
		if (IS_ERR(bargs)) {
			ret = PTR_ERR(bargs);
4020
			goto out_unlock;
4021
		}
4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035

		if (bargs->flags & BTRFS_BALANCE_RESUME) {
			if (!fs_info->balance_ctl) {
				ret = -ENOTCONN;
				goto out_bargs;
			}

			bctl = fs_info->balance_ctl;
			spin_lock(&fs_info->balance_lock);
			bctl->flags |= BTRFS_BALANCE_RESUME;
			spin_unlock(&fs_info->balance_lock);

			goto do_balance;
		}
4036 4037 4038 4039
	} else {
		bargs = NULL;
	}

4040
	if (fs_info->balance_ctl) {
4041 4042 4043 4044
		ret = -EINPROGRESS;
		goto out_bargs;
	}

4045
	bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056
	if (!bctl) {
		ret = -ENOMEM;
		goto out_bargs;
	}

	if (arg) {
		memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
		memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
		memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));

		bctl->flags = bargs->flags;
4057 4058 4059
	} else {
		/* balance everything - no filters */
		bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4060 4061
	}

4062 4063
	if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
		ret = -EINVAL;
4064
		goto out_bctl;
4065 4066
	}

4067
do_balance:
4068
	/*
4069 4070 4071 4072
	 * Ownership of bctl and exclusive operation goes to btrfs_balance.
	 * bctl is freed in reset_balance_state, or, if restriper was paused
	 * all the way until unmount, in free_fs_info.  The flag should be
	 * cleared after reset_balance_state.
4073
	 */
4074 4075
	need_unlock = false;

4076
	ret = btrfs_balance(fs_info, bctl, bargs);
4077
	bctl = NULL;
4078

4079
	if ((ret == 0 || ret == -ECANCELED) && arg) {
4080 4081 4082 4083
		if (copy_to_user(arg, bargs, sizeof(*bargs)))
			ret = -EFAULT;
	}

4084 4085
out_bctl:
	kfree(bctl);
4086 4087
out_bargs:
	kfree(bargs);
4088
out_unlock:
4089
	mutex_unlock(&fs_info->balance_mutex);
4090
	if (need_unlock)
4091
		btrfs_exclop_finish(fs_info);
4092
out:
4093
	mnt_drop_write_file(file);
4094 4095 4096
	return ret;
}

4097
static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
4098 4099 4100 4101 4102 4103
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	switch (cmd) {
	case BTRFS_BALANCE_CTL_PAUSE:
4104
		return btrfs_pause_balance(fs_info);
4105
	case BTRFS_BALANCE_CTL_CANCEL:
4106
		return btrfs_cancel_balance(fs_info);
4107 4108 4109 4110 4111
	}

	return -EINVAL;
}

4112
static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126
					 void __user *arg)
{
	struct btrfs_ioctl_balance_args *bargs;
	int ret = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		ret = -ENOTCONN;
		goto out;
	}

4127
	bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4128 4129 4130 4131 4132
	if (!bargs) {
		ret = -ENOMEM;
		goto out;
	}

4133
	btrfs_update_ioctl_balance_args(fs_info, bargs);
4134 4135 4136 4137 4138 4139 4140 4141 4142 4143

	if (copy_to_user(arg, bargs, sizeof(*bargs)))
		ret = -EFAULT;

	kfree(bargs);
out:
	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

4144
static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4145
{
4146 4147
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
A
Arne Jansen 已提交
4148 4149 4150 4151 4152 4153
	struct btrfs_ioctl_quota_ctl_args *sa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4154 4155 4156
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4157 4158

	sa = memdup_user(arg, sizeof(*sa));
4159 4160 4161 4162
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4163

4164
	down_write(&fs_info->subvol_sem);
A
Arne Jansen 已提交
4165 4166 4167

	switch (sa->cmd) {
	case BTRFS_QUOTA_CTL_ENABLE:
4168
		ret = btrfs_quota_enable(fs_info);
A
Arne Jansen 已提交
4169 4170
		break;
	case BTRFS_QUOTA_CTL_DISABLE:
4171
		ret = btrfs_quota_disable(fs_info);
A
Arne Jansen 已提交
4172 4173 4174 4175 4176 4177 4178
		break;
	default:
		ret = -EINVAL;
		break;
	}

	kfree(sa);
4179
	up_write(&fs_info->subvol_sem);
4180 4181
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4182 4183 4184
	return ret;
}

4185
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4186
{
4187 4188 4189
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4190 4191 4192 4193 4194 4195 4196 4197
	struct btrfs_ioctl_qgroup_assign_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4198 4199 4200
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4201 4202

	sa = memdup_user(arg, sizeof(*sa));
4203 4204 4205 4206
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4207 4208 4209 4210 4211 4212 4213 4214

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	if (sa->assign) {
4215
		ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
A
Arne Jansen 已提交
4216
	} else {
4217
		ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
A
Arne Jansen 已提交
4218 4219
	}

4220
	/* update qgroup status and info */
4221
	err = btrfs_run_qgroups(trans);
4222
	if (err < 0)
4223 4224
		btrfs_handle_fs_error(fs_info, err,
				      "failed to update qgroup status and info");
4225
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4226 4227 4228 4229 4230
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4231 4232
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4233 4234 4235
	return ret;
}

4236
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4237
{
4238 4239
	struct inode *inode = file_inode(file);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4240 4241 4242 4243 4244 4245 4246 4247
	struct btrfs_ioctl_qgroup_create_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4248 4249 4250
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4251 4252

	sa = memdup_user(arg, sizeof(*sa));
4253 4254 4255 4256
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4257

M
Miao Xie 已提交
4258 4259 4260 4261 4262
	if (!sa->qgroupid) {
		ret = -EINVAL;
		goto out;
	}

A
Arne Jansen 已提交
4263 4264 4265 4266 4267 4268 4269
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	if (sa->create) {
4270
		ret = btrfs_create_qgroup(trans, sa->qgroupid);
A
Arne Jansen 已提交
4271
	} else {
4272
		ret = btrfs_remove_qgroup(trans, sa->qgroupid);
A
Arne Jansen 已提交
4273 4274
	}

4275
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4276 4277 4278 4279 4280
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4281 4282
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4283 4284 4285
	return ret;
}

4286
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4287
{
4288 4289
	struct inode *inode = file_inode(file);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4290 4291 4292 4293 4294 4295 4296 4297 4298
	struct btrfs_ioctl_qgroup_limit_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;
	u64 qgroupid;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4299 4300 4301
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4302 4303

	sa = memdup_user(arg, sizeof(*sa));
4304 4305 4306 4307
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	qgroupid = sa->qgroupid;
	if (!qgroupid) {
		/* take the current subvol as qgroup */
		qgroupid = root->root_key.objectid;
	}

4321
	ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
A
Arne Jansen 已提交
4322

4323
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4324 4325 4326 4327 4328
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4329 4330
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4331 4332 4333
	return ret;
}

J
Jan Schmidt 已提交
4334 4335
static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
{
4336 4337
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
J
Jan Schmidt 已提交
4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358
	struct btrfs_ioctl_quota_rescan_args *qsa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	qsa = memdup_user(arg, sizeof(*qsa));
	if (IS_ERR(qsa)) {
		ret = PTR_ERR(qsa);
		goto drop_write;
	}

	if (qsa->flags) {
		ret = -EINVAL;
		goto out;
	}

4359
	ret = btrfs_qgroup_rescan(fs_info);
J
Jan Schmidt 已提交
4360 4361 4362 4363 4364 4365 4366 4367

out:
	kfree(qsa);
drop_write:
	mnt_drop_write_file(file);
	return ret;
}

4368 4369
static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
						void __user *arg)
J
Jan Schmidt 已提交
4370
{
4371
	struct btrfs_ioctl_quota_rescan_args qsa = {0};
J
Jan Schmidt 已提交
4372 4373 4374 4375

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4376
	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4377 4378
		qsa.flags = 1;
		qsa.progress = fs_info->qgroup_rescan_progress.objectid;
J
Jan Schmidt 已提交
4379 4380
	}

4381
	if (copy_to_user(arg, &qsa, sizeof(qsa)))
4382
		return -EFAULT;
J
Jan Schmidt 已提交
4383

4384
	return 0;
J
Jan Schmidt 已提交
4385 4386
}

4387 4388
static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
						void __user *arg)
4389 4390 4391 4392
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4393
	return btrfs_qgroup_wait_for_completion(fs_info, true);
4394 4395
}

4396
static long _btrfs_ioctl_set_received_subvol(struct file *file,
4397
					    struct user_namespace *mnt_userns,
4398
					    struct btrfs_ioctl_received_subvol_args *sa)
4399
{
A
Al Viro 已提交
4400
	struct inode *inode = file_inode(file);
4401
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4402 4403 4404
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root_item *root_item = &root->root_item;
	struct btrfs_trans_handle *trans;
4405
	struct timespec64 ct = current_time(inode);
4406
	int ret = 0;
4407
	int received_uuid_changed;
4408

4409
	if (!inode_owner_or_capable(mnt_userns, inode))
4410 4411
		return -EPERM;

4412 4413 4414 4415
	ret = mnt_want_write_file(file);
	if (ret < 0)
		return ret;

4416
	down_write(&fs_info->subvol_sem);
4417

4418
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
4419 4420 4421 4422 4423 4424 4425 4426 4427
		ret = -EINVAL;
		goto out;
	}

	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
	}

4428 4429 4430 4431 4432
	/*
	 * 1 - root item
	 * 2 - uuid items (received uuid + subvol uuid)
	 */
	trans = btrfs_start_transaction(root, 3);
4433 4434 4435 4436 4437 4438 4439 4440 4441 4442
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		trans = NULL;
		goto out;
	}

	sa->rtransid = trans->transid;
	sa->rtime.sec = ct.tv_sec;
	sa->rtime.nsec = ct.tv_nsec;

4443 4444 4445
	received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
				       BTRFS_UUID_SIZE);
	if (received_uuid_changed &&
4446
	    !btrfs_is_empty_uuid(root_item->received_uuid)) {
4447
		ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
4448 4449 4450 4451 4452 4453 4454 4455
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  root->root_key.objectid);
		if (ret && ret != -ENOENT) {
		        btrfs_abort_transaction(trans, ret);
		        btrfs_end_transaction(trans);
		        goto out;
		}
	}
4456 4457 4458
	memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
	btrfs_set_root_stransid(root_item, sa->stransid);
	btrfs_set_root_rtransid(root_item, sa->rtransid);
4459 4460 4461 4462
	btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
	btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
4463

4464
	ret = btrfs_update_root(trans, fs_info->tree_root,
4465 4466
				&root->root_key, &root->root_item);
	if (ret < 0) {
4467
		btrfs_end_transaction(trans);
4468
		goto out;
4469 4470
	}
	if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
4471
		ret = btrfs_uuid_tree_add(trans, sa->uuid,
4472 4473 4474
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  root->root_key.objectid);
		if (ret < 0 && ret != -EEXIST) {
4475
			btrfs_abort_transaction(trans, ret);
4476
			btrfs_end_transaction(trans);
4477
			goto out;
4478 4479
		}
	}
4480
	ret = btrfs_commit_transaction(trans);
4481
out:
4482
	up_write(&fs_info->subvol_sem);
4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495
	mnt_drop_write_file(file);
	return ret;
}

#ifdef CONFIG_64BIT
static long btrfs_ioctl_set_received_subvol_32(struct file *file,
						void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
	struct btrfs_ioctl_received_subvol_args *args64 = NULL;
	int ret = 0;

	args32 = memdup_user(arg, sizeof(*args32));
4496 4497
	if (IS_ERR(args32))
		return PTR_ERR(args32);
4498

4499
	args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
4500 4501
	if (!args64) {
		ret = -ENOMEM;
4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513
		goto out;
	}

	memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
	args64->stransid = args32->stransid;
	args64->rtransid = args32->rtransid;
	args64->stime.sec = args32->stime.sec;
	args64->stime.nsec = args32->stime.nsec;
	args64->rtime.sec = args32->rtime.sec;
	args64->rtime.nsec = args32->rtime.nsec;
	args64->flags = args32->flags;

4514
	ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), args64);
4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544
	if (ret)
		goto out;

	memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
	args32->stransid = args64->stransid;
	args32->rtransid = args64->rtransid;
	args32->stime.sec = args64->stime.sec;
	args32->stime.nsec = args64->stime.nsec;
	args32->rtime.sec = args64->rtime.sec;
	args32->rtime.nsec = args64->rtime.nsec;
	args32->flags = args64->flags;

	ret = copy_to_user(arg, args32, sizeof(*args32));
	if (ret)
		ret = -EFAULT;

out:
	kfree(args32);
	kfree(args64);
	return ret;
}
#endif

static long btrfs_ioctl_set_received_subvol(struct file *file,
					    void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args *sa = NULL;
	int ret = 0;

	sa = memdup_user(arg, sizeof(*sa));
4545 4546
	if (IS_ERR(sa))
		return PTR_ERR(sa);
4547

4548
	ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), sa);
4549 4550 4551 4552

	if (ret)
		goto out;

4553 4554 4555 4556 4557 4558 4559 4560 4561
	ret = copy_to_user(arg, sa, sizeof(*sa));
	if (ret)
		ret = -EFAULT;

out:
	kfree(sa);
	return ret;
}

4562 4563
static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info,
					void __user *arg)
4564
{
4565
	size_t len;
4566
	int ret;
4567 4568
	char label[BTRFS_LABEL_SIZE];

4569 4570 4571
	spin_lock(&fs_info->super_lock);
	memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
	spin_unlock(&fs_info->super_lock);
4572 4573

	len = strnlen(label, BTRFS_LABEL_SIZE);
4574 4575

	if (len == BTRFS_LABEL_SIZE) {
4576 4577 4578
		btrfs_warn(fs_info,
			   "label is too long, return the first %zu bytes",
			   --len);
4579 4580 4581 4582 4583 4584 4585
	}

	ret = copy_to_user(arg, label, len);

	return ret ? -EFAULT : 0;
}

4586 4587
static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
{
4588 4589 4590 4591
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602
	struct btrfs_trans_handle *trans;
	char label[BTRFS_LABEL_SIZE];
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(label, arg, sizeof(label)))
		return -EFAULT;

	if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
4603
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
4604 4605
			  "unable to set label with more than %d bytes",
			  BTRFS_LABEL_SIZE - 1);
4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618
		return -EINVAL;
	}

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_unlock;
	}

4619
	spin_lock(&fs_info->super_lock);
4620
	strcpy(super_block->label, label);
4621
	spin_unlock(&fs_info->super_lock);
4622
	ret = btrfs_commit_transaction(trans);
4623 4624 4625 4626 4627 4628

out_unlock:
	mnt_drop_write_file(file);
	return ret;
}

4629 4630 4631 4632 4633
#define INIT_FEATURE_FLAGS(suffix) \
	{ .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
	  .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
	  .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }

4634
int btrfs_ioctl_get_supported_features(void __user *arg)
4635
{
D
David Sterba 已提交
4636
	static const struct btrfs_ioctl_feature_flags features[3] = {
4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647
		INIT_FEATURE_FLAGS(SUPP),
		INIT_FEATURE_FLAGS(SAFE_SET),
		INIT_FEATURE_FLAGS(SAFE_CLEAR)
	};

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

4648 4649
static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
					void __user *arg)
4650
{
4651
	struct btrfs_super_block *super_block = fs_info->super_copy;
4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663
	struct btrfs_ioctl_feature_flags features;

	features.compat_flags = btrfs_super_compat_flags(super_block);
	features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
	features.incompat_flags = btrfs_super_incompat_flags(super_block);

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

4664
static int check_feature_bits(struct btrfs_fs_info *fs_info,
4665
			      enum btrfs_feature_set set,
4666 4667 4668
			      u64 change_mask, u64 flags, u64 supported_flags,
			      u64 safe_set, u64 safe_clear)
{
4669
	const char *type = btrfs_feature_set_name(set);
4670
	char *names;
4671 4672 4673 4674 4675 4676
	u64 disallowed, unsupported;
	u64 set_mask = flags & change_mask;
	u64 clear_mask = ~flags & change_mask;

	unsupported = set_mask & ~supported_flags;
	if (unsupported) {
4677 4678
		names = btrfs_printable_features(set, unsupported);
		if (names) {
4679 4680 4681
			btrfs_warn(fs_info,
				   "this kernel does not support the %s feature bit%s",
				   names, strchr(names, ',') ? "s" : "");
4682 4683
			kfree(names);
		} else
4684 4685 4686
			btrfs_warn(fs_info,
				   "this kernel does not support %s bits 0x%llx",
				   type, unsupported);
4687 4688 4689 4690 4691
		return -EOPNOTSUPP;
	}

	disallowed = set_mask & ~safe_set;
	if (disallowed) {
4692 4693
		names = btrfs_printable_features(set, disallowed);
		if (names) {
4694 4695 4696
			btrfs_warn(fs_info,
				   "can't set the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
4697 4698
			kfree(names);
		} else
4699 4700 4701
			btrfs_warn(fs_info,
				   "can't set %s bits 0x%llx while mounted",
				   type, disallowed);
4702 4703 4704 4705 4706
		return -EPERM;
	}

	disallowed = clear_mask & ~safe_clear;
	if (disallowed) {
4707 4708
		names = btrfs_printable_features(set, disallowed);
		if (names) {
4709 4710 4711
			btrfs_warn(fs_info,
				   "can't clear the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
4712 4713
			kfree(names);
		} else
4714 4715 4716
			btrfs_warn(fs_info,
				   "can't clear %s bits 0x%llx while mounted",
				   type, disallowed);
4717 4718 4719 4720 4721 4722
		return -EPERM;
	}

	return 0;
}

4723 4724
#define check_feature(fs_info, change_mask, flags, mask_base)	\
check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags,	\
4725 4726 4727 4728 4729 4730
		   BTRFS_FEATURE_ ## mask_base ## _SUPP,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_SET,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)

static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
{
4731 4732 4733 4734
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750
	struct btrfs_ioctl_feature_flags flags[2];
	struct btrfs_trans_handle *trans;
	u64 newflags;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(flags, arg, sizeof(flags)))
		return -EFAULT;

	/* Nothing to do */
	if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
	    !flags[0].incompat_flags)
		return 0;

4751
	ret = check_feature(fs_info, flags[0].compat_flags,
4752 4753 4754 4755
			    flags[1].compat_flags, COMPAT);
	if (ret)
		return ret;

4756
	ret = check_feature(fs_info, flags[0].compat_ro_flags,
4757 4758 4759 4760
			    flags[1].compat_ro_flags, COMPAT_RO);
	if (ret)
		return ret;

4761
	ret = check_feature(fs_info, flags[0].incompat_flags,
4762 4763 4764 4765
			    flags[1].incompat_flags, INCOMPAT);
	if (ret)
		return ret;

4766 4767 4768 4769
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

4770
	trans = btrfs_start_transaction(root, 0);
4771 4772 4773 4774
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_drop_write;
	}
4775

4776
	spin_lock(&fs_info->super_lock);
4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790
	newflags = btrfs_super_compat_flags(super_block);
	newflags |= flags[0].compat_flags & flags[1].compat_flags;
	newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
	btrfs_set_super_compat_flags(super_block, newflags);

	newflags = btrfs_super_compat_ro_flags(super_block);
	newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
	newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
	btrfs_set_super_compat_ro_flags(super_block, newflags);

	newflags = btrfs_super_incompat_flags(super_block);
	newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
	newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
	btrfs_set_super_incompat_flags(super_block, newflags);
4791
	spin_unlock(&fs_info->super_lock);
4792

4793
	ret = btrfs_commit_transaction(trans);
4794 4795 4796 4797
out_drop_write:
	mnt_drop_write_file(file);

	return ret;
4798 4799
}

4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834
static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
{
	struct btrfs_ioctl_send_args *arg;
	int ret;

	if (compat) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
		struct btrfs_ioctl_send_args_32 args32;

		ret = copy_from_user(&args32, argp, sizeof(args32));
		if (ret)
			return -EFAULT;
		arg = kzalloc(sizeof(*arg), GFP_KERNEL);
		if (!arg)
			return -ENOMEM;
		arg->send_fd = args32.send_fd;
		arg->clone_sources_count = args32.clone_sources_count;
		arg->clone_sources = compat_ptr(args32.clone_sources);
		arg->parent_root = args32.parent_root;
		arg->flags = args32.flags;
		memcpy(arg->reserved, args32.reserved,
		       sizeof(args32.reserved));
#else
		return -ENOTTY;
#endif
	} else {
		arg = memdup_user(argp, sizeof(*arg));
		if (IS_ERR(arg))
			return PTR_ERR(arg);
	}
	ret = btrfs_ioctl_send(file, arg);
	kfree(arg);
	return ret;
}

C
Christoph Hellwig 已提交
4835 4836 4837
long btrfs_ioctl(struct file *file, unsigned int
		cmd, unsigned long arg)
{
4838 4839 4840
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
4841
	void __user *argp = (void __user *)arg;
C
Christoph Hellwig 已提交
4842 4843

	switch (cmd) {
4844 4845
	case FS_IOC_GETVERSION:
		return btrfs_ioctl_getversion(file, argp);
4846
	case FS_IOC_GETFSLABEL:
4847
		return btrfs_ioctl_get_fslabel(fs_info, argp);
4848 4849
	case FS_IOC_SETFSLABEL:
		return btrfs_ioctl_set_fslabel(file, argp);
4850
	case FITRIM:
4851
		return btrfs_ioctl_fitrim(fs_info, argp);
C
Christoph Hellwig 已提交
4852
	case BTRFS_IOC_SNAP_CREATE:
4853
		return btrfs_ioctl_snap_create(file, argp, 0);
4854
	case BTRFS_IOC_SNAP_CREATE_V2:
4855
		return btrfs_ioctl_snap_create_v2(file, argp, 0);
4856
	case BTRFS_IOC_SUBVOL_CREATE:
4857
		return btrfs_ioctl_snap_create(file, argp, 1);
A
Arne Jansen 已提交
4858 4859
	case BTRFS_IOC_SUBVOL_CREATE_V2:
		return btrfs_ioctl_snap_create_v2(file, argp, 1);
4860
	case BTRFS_IOC_SNAP_DESTROY:
4861 4862 4863
		return btrfs_ioctl_snap_destroy(file, argp, false);
	case BTRFS_IOC_SNAP_DESTROY_V2:
		return btrfs_ioctl_snap_destroy(file, argp, true);
4864 4865 4866 4867
	case BTRFS_IOC_SUBVOL_GETFLAGS:
		return btrfs_ioctl_subvol_getflags(file, argp);
	case BTRFS_IOC_SUBVOL_SETFLAGS:
		return btrfs_ioctl_subvol_setflags(file, argp);
4868 4869
	case BTRFS_IOC_DEFAULT_SUBVOL:
		return btrfs_ioctl_default_subvol(file, argp);
C
Christoph Hellwig 已提交
4870
	case BTRFS_IOC_DEFRAG:
C
Chris Mason 已提交
4871 4872 4873
		return btrfs_ioctl_defrag(file, NULL);
	case BTRFS_IOC_DEFRAG_RANGE:
		return btrfs_ioctl_defrag(file, argp);
C
Christoph Hellwig 已提交
4874
	case BTRFS_IOC_RESIZE:
4875
		return btrfs_ioctl_resize(file, argp);
C
Christoph Hellwig 已提交
4876
	case BTRFS_IOC_ADD_DEV:
4877
		return btrfs_ioctl_add_dev(fs_info, argp);
C
Christoph Hellwig 已提交
4878
	case BTRFS_IOC_RM_DEV:
4879
		return btrfs_ioctl_rm_dev(file, argp);
4880 4881
	case BTRFS_IOC_RM_DEV_V2:
		return btrfs_ioctl_rm_dev_v2(file, argp);
J
Jan Schmidt 已提交
4882
	case BTRFS_IOC_FS_INFO:
4883
		return btrfs_ioctl_fs_info(fs_info, argp);
J
Jan Schmidt 已提交
4884
	case BTRFS_IOC_DEV_INFO:
4885
		return btrfs_ioctl_dev_info(fs_info, argp);
C
Christoph Hellwig 已提交
4886
	case BTRFS_IOC_BALANCE:
4887
		return btrfs_ioctl_balance(file, NULL);
4888 4889
	case BTRFS_IOC_TREE_SEARCH:
		return btrfs_ioctl_tree_search(file, argp);
G
Gerhard Heift 已提交
4890 4891
	case BTRFS_IOC_TREE_SEARCH_V2:
		return btrfs_ioctl_tree_search_v2(file, argp);
4892 4893
	case BTRFS_IOC_INO_LOOKUP:
		return btrfs_ioctl_ino_lookup(file, argp);
4894 4895 4896
	case BTRFS_IOC_INO_PATHS:
		return btrfs_ioctl_ino_to_path(root, argp);
	case BTRFS_IOC_LOGICAL_INO:
4897 4898 4899
		return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
	case BTRFS_IOC_LOGICAL_INO_V2:
		return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
J
Josef Bacik 已提交
4900
	case BTRFS_IOC_SPACE_INFO:
4901
		return btrfs_ioctl_space_info(fs_info, argp);
4902 4903 4904
	case BTRFS_IOC_SYNC: {
		int ret;

4905
		ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
4906 4907
		if (ret)
			return ret;
4908
		ret = btrfs_sync_fs(inode->i_sb, 1);
4909 4910
		/*
		 * The transaction thread may want to do more work,
4911
		 * namely it pokes the cleaner kthread that will start
4912 4913
		 * processing uncleaned subvols.
		 */
4914
		wake_up_process(fs_info->transaction_kthread);
4915 4916
		return ret;
	}
4917
	case BTRFS_IOC_START_SYNC:
4918
		return btrfs_ioctl_start_sync(root, argp);
4919
	case BTRFS_IOC_WAIT_SYNC:
4920
		return btrfs_ioctl_wait_sync(fs_info, argp);
J
Jan Schmidt 已提交
4921
	case BTRFS_IOC_SCRUB:
M
Miao Xie 已提交
4922
		return btrfs_ioctl_scrub(file, argp);
J
Jan Schmidt 已提交
4923
	case BTRFS_IOC_SCRUB_CANCEL:
4924
		return btrfs_ioctl_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
4925
	case BTRFS_IOC_SCRUB_PROGRESS:
4926
		return btrfs_ioctl_scrub_progress(fs_info, argp);
4927
	case BTRFS_IOC_BALANCE_V2:
4928
		return btrfs_ioctl_balance(file, argp);
4929
	case BTRFS_IOC_BALANCE_CTL:
4930
		return btrfs_ioctl_balance_ctl(fs_info, arg);
4931
	case BTRFS_IOC_BALANCE_PROGRESS:
4932
		return btrfs_ioctl_balance_progress(fs_info, argp);
4933 4934
	case BTRFS_IOC_SET_RECEIVED_SUBVOL:
		return btrfs_ioctl_set_received_subvol(file, argp);
4935 4936 4937 4938
#ifdef CONFIG_64BIT
	case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
		return btrfs_ioctl_set_received_subvol_32(file, argp);
#endif
4939
	case BTRFS_IOC_SEND:
4940 4941 4942 4943 4944
		return _btrfs_ioctl_send(file, argp, false);
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
	case BTRFS_IOC_SEND_32:
		return _btrfs_ioctl_send(file, argp, true);
#endif
4945
	case BTRFS_IOC_GET_DEV_STATS:
4946
		return btrfs_ioctl_get_dev_stats(fs_info, argp);
A
Arne Jansen 已提交
4947
	case BTRFS_IOC_QUOTA_CTL:
4948
		return btrfs_ioctl_quota_ctl(file, argp);
A
Arne Jansen 已提交
4949
	case BTRFS_IOC_QGROUP_ASSIGN:
4950
		return btrfs_ioctl_qgroup_assign(file, argp);
A
Arne Jansen 已提交
4951
	case BTRFS_IOC_QGROUP_CREATE:
4952
		return btrfs_ioctl_qgroup_create(file, argp);
A
Arne Jansen 已提交
4953
	case BTRFS_IOC_QGROUP_LIMIT:
4954
		return btrfs_ioctl_qgroup_limit(file, argp);
J
Jan Schmidt 已提交
4955 4956 4957
	case BTRFS_IOC_QUOTA_RESCAN:
		return btrfs_ioctl_quota_rescan(file, argp);
	case BTRFS_IOC_QUOTA_RESCAN_STATUS:
4958
		return btrfs_ioctl_quota_rescan_status(fs_info, argp);
4959
	case BTRFS_IOC_QUOTA_RESCAN_WAIT:
4960
		return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
4961
	case BTRFS_IOC_DEV_REPLACE:
4962
		return btrfs_ioctl_dev_replace(fs_info, argp);
4963
	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
4964
		return btrfs_ioctl_get_supported_features(argp);
4965
	case BTRFS_IOC_GET_FEATURES:
4966
		return btrfs_ioctl_get_features(fs_info, argp);
4967 4968
	case BTRFS_IOC_SET_FEATURES:
		return btrfs_ioctl_set_features(file, argp);
4969 4970
	case BTRFS_IOC_GET_SUBVOL_INFO:
		return btrfs_ioctl_get_subvol_info(file, argp);
4971 4972
	case BTRFS_IOC_GET_SUBVOL_ROOTREF:
		return btrfs_ioctl_get_subvol_rootref(file, argp);
4973 4974
	case BTRFS_IOC_INO_LOOKUP_USER:
		return btrfs_ioctl_ino_lookup_user(file, argp);
B
Boris Burkov 已提交
4975 4976 4977 4978
	case FS_IOC_ENABLE_VERITY:
		return fsverity_ioctl_enable(file, (const void __user *)argp);
	case FS_IOC_MEASURE_VERITY:
		return fsverity_ioctl_measure(file, argp);
C
Christoph Hellwig 已提交
4979 4980 4981 4982
	}

	return -ENOTTY;
}
4983 4984 4985 4986

#ifdef CONFIG_COMPAT
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
4987 4988 4989 4990
	/*
	 * These all access 32-bit values anyway so no further
	 * handling is necessary.
	 */
4991 4992 4993 4994 4995 4996 4997 4998 4999
	switch (cmd) {
	case FS_IOC32_GETVERSION:
		cmd = FS_IOC_GETVERSION;
		break;
	}

	return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif