ioctl.c 129.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Christoph Hellwig 已提交
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/file.h>
#include <linux/fs.h>
10
#include <linux/fsnotify.h>
C
Christoph Hellwig 已提交
11 12 13 14 15
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
16 17
#include <linux/mount.h>
#include <linux/namei.h>
C
Christoph Hellwig 已提交
18 19
#include <linux/writeback.h>
#include <linux/compat.h>
20
#include <linux/security.h>
C
Christoph Hellwig 已提交
21
#include <linux/xattr.h>
22
#include <linux/mm.h>
23
#include <linux/slab.h>
24
#include <linux/blkdev.h>
25
#include <linux/uuid.h>
26
#include <linux/btrfs.h>
M
Mark Fasheh 已提交
27
#include <linux/uaccess.h>
28
#include <linux/iversion.h>
M
Miklos Szeredi 已提交
29
#include <linux/fileattr.h>
B
Boris Burkov 已提交
30
#include <linux/fsverity.h>
C
Christoph Hellwig 已提交
31 32
#include "ctree.h"
#include "disk-io.h"
33
#include "export.h"
C
Christoph Hellwig 已提交
34 35 36 37
#include "transaction.h"
#include "btrfs_inode.h"
#include "print-tree.h"
#include "volumes.h"
38
#include "locking.h"
39
#include "backref.h"
40
#include "rcu-string.h"
41
#include "send.h"
42
#include "dev-replace.h"
43
#include "props.h"
44
#include "sysfs.h"
J
Josef Bacik 已提交
45
#include "qgroup.h"
46
#include "tree-log.h"
47
#include "compression.h"
48
#include "space-info.h"
49
#include "delalloc-space.h"
50
#include "block-group.h"
51
#include "subpage.h"
C
Christoph Hellwig 已提交
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
#ifdef CONFIG_64BIT
/* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
 * structures are incorrect, as the timespec structure from userspace
 * is 4 bytes too small. We define these alternatives here to teach
 * the kernel about the 32-bit struct packing.
 */
struct btrfs_ioctl_timespec_32 {
	__u64 sec;
	__u32 nsec;
} __attribute__ ((__packed__));

struct btrfs_ioctl_received_subvol_args_32 {
	char	uuid[BTRFS_UUID_SIZE];	/* in */
	__u64	stransid;		/* in */
	__u64	rtransid;		/* out */
	struct btrfs_ioctl_timespec_32 stime; /* in */
	struct btrfs_ioctl_timespec_32 rtime; /* out */
	__u64	flags;			/* in */
	__u64	reserved[16];		/* in */
} __attribute__ ((__packed__));

#define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
				struct btrfs_ioctl_received_subvol_args_32)
#endif

78 79 80 81 82 83 84 85 86 87 88 89 90
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
struct btrfs_ioctl_send_args_32 {
	__s64 send_fd;			/* in */
	__u64 clone_sources_count;	/* in */
	compat_uptr_t clone_sources;	/* in */
	__u64 parent_root;		/* in */
	__u64 flags;			/* in */
	__u64 reserved[4];		/* in */
} __attribute__ ((__packed__));

#define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
			       struct btrfs_ioctl_send_args_32)
#endif
91

92
/* Mask out flags that are inappropriate for the given type of inode. */
93 94
static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
		unsigned int flags)
95
{
96
	if (S_ISDIR(inode->i_mode))
97
		return flags;
98
	else if (S_ISREG(inode->i_mode))
99 100 101 102 103 104
		return flags & ~FS_DIRSYNC_FL;
	else
		return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
}

/*
105 106
 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
 * ioctl.
107
 */
108
static unsigned int btrfs_inode_flags_to_fsflags(struct btrfs_inode *binode)
109 110
{
	unsigned int iflags = 0;
111
	u32 flags = binode->flags;
B
Boris Burkov 已提交
112
	u32 ro_flags = binode->ro_flags;
113 114 115 116 117 118 119 120 121 122 123 124 125

	if (flags & BTRFS_INODE_SYNC)
		iflags |= FS_SYNC_FL;
	if (flags & BTRFS_INODE_IMMUTABLE)
		iflags |= FS_IMMUTABLE_FL;
	if (flags & BTRFS_INODE_APPEND)
		iflags |= FS_APPEND_FL;
	if (flags & BTRFS_INODE_NODUMP)
		iflags |= FS_NODUMP_FL;
	if (flags & BTRFS_INODE_NOATIME)
		iflags |= FS_NOATIME_FL;
	if (flags & BTRFS_INODE_DIRSYNC)
		iflags |= FS_DIRSYNC_FL;
L
Li Zefan 已提交
126 127
	if (flags & BTRFS_INODE_NODATACOW)
		iflags |= FS_NOCOW_FL;
B
Boris Burkov 已提交
128 129
	if (ro_flags & BTRFS_INODE_RO_VERITY)
		iflags |= FS_VERITY_FL;
L
Li Zefan 已提交
130

131
	if (flags & BTRFS_INODE_NOCOMPRESS)
L
Li Zefan 已提交
132
		iflags |= FS_NOCOMP_FL;
133 134
	else if (flags & BTRFS_INODE_COMPRESS)
		iflags |= FS_COMPR_FL;
135 136 137 138 139 140 141

	return iflags;
}

/*
 * Update inode->i_flags based on the btrfs internal flags.
 */
142
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
143
{
144
	struct btrfs_inode *binode = BTRFS_I(inode);
145
	unsigned int new_fl = 0;
146

147
	if (binode->flags & BTRFS_INODE_SYNC)
148
		new_fl |= S_SYNC;
149
	if (binode->flags & BTRFS_INODE_IMMUTABLE)
150
		new_fl |= S_IMMUTABLE;
151
	if (binode->flags & BTRFS_INODE_APPEND)
152
		new_fl |= S_APPEND;
153
	if (binode->flags & BTRFS_INODE_NOATIME)
154
		new_fl |= S_NOATIME;
155
	if (binode->flags & BTRFS_INODE_DIRSYNC)
156
		new_fl |= S_DIRSYNC;
B
Boris Burkov 已提交
157 158
	if (binode->ro_flags & BTRFS_INODE_RO_VERITY)
		new_fl |= S_VERITY;
159 160

	set_mask_bits(&inode->i_flags,
B
Boris Burkov 已提交
161 162
		      S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC |
		      S_VERITY, new_fl);
163 164
}

165 166 167 168 169
/*
 * Check if @flags are a supported and valid set of FS_*_FL flags and that
 * the old and new flags are not conflicting
 */
static int check_fsflags(unsigned int old_flags, unsigned int flags)
170 171 172 173
{
	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
		      FS_NOATIME_FL | FS_NODUMP_FL | \
		      FS_SYNC_FL | FS_DIRSYNC_FL | \
L
Li Zefan 已提交
174 175
		      FS_NOCOMP_FL | FS_COMPR_FL |
		      FS_NOCOW_FL))
176 177
		return -EOPNOTSUPP;

178
	/* COMPR and NOCOMP on new/old are valid */
179 180 181
	if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
		return -EINVAL;

182 183 184 185 186 187 188 189 190
	if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL))
		return -EINVAL;

	/* NOCOW and compression options are mutually exclusive */
	if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
		return -EINVAL;
	if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
		return -EINVAL;

191 192 193
	return 0;
}

194 195 196 197 198 199 200 201 202
static int check_fsflags_compatible(struct btrfs_fs_info *fs_info,
				    unsigned int flags)
{
	if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL))
		return -EPERM;

	return 0;
}

M
Miklos Szeredi 已提交
203 204 205 206 207
/*
 * Set flags/xflags from the internal inode flags. The remaining items of
 * fsxattr are zeroed.
 */
int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
208
{
M
Miklos Szeredi 已提交
209 210
	struct btrfs_inode *binode = BTRFS_I(d_inode(dentry));

211
	fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(binode));
M
Miklos Szeredi 已提交
212 213 214 215 216 217 218
	return 0;
}

int btrfs_fileattr_set(struct user_namespace *mnt_userns,
		       struct dentry *dentry, struct fileattr *fa)
{
	struct inode *inode = d_inode(dentry);
219
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
220 221
	struct btrfs_inode *binode = BTRFS_I(inode);
	struct btrfs_root *root = binode->root;
222
	struct btrfs_trans_handle *trans;
223
	unsigned int fsflags, old_fsflags;
224
	int ret;
225
	const char *comp = NULL;
226
	u32 binode_flags;
227

L
Li Zefan 已提交
228 229 230
	if (btrfs_root_readonly(root))
		return -EROFS;

M
Miklos Szeredi 已提交
231 232
	if (fileattr_has_fsx(fa))
		return -EOPNOTSUPP;
233

M
Miklos Szeredi 已提交
234
	fsflags = btrfs_mask_fsflags_for_type(inode, fa->flags);
235
	old_fsflags = btrfs_inode_flags_to_fsflags(binode);
236 237
	ret = check_fsflags(old_fsflags, fsflags);
	if (ret)
M
Miklos Szeredi 已提交
238
		return ret;
239

240 241
	ret = check_fsflags_compatible(fs_info, fsflags);
	if (ret)
M
Miklos Szeredi 已提交
242
		return ret;
243

244
	binode_flags = binode->flags;
245
	if (fsflags & FS_SYNC_FL)
246
		binode_flags |= BTRFS_INODE_SYNC;
247
	else
248
		binode_flags &= ~BTRFS_INODE_SYNC;
249
	if (fsflags & FS_IMMUTABLE_FL)
250
		binode_flags |= BTRFS_INODE_IMMUTABLE;
251
	else
252
		binode_flags &= ~BTRFS_INODE_IMMUTABLE;
253
	if (fsflags & FS_APPEND_FL)
254
		binode_flags |= BTRFS_INODE_APPEND;
255
	else
256
		binode_flags &= ~BTRFS_INODE_APPEND;
257
	if (fsflags & FS_NODUMP_FL)
258
		binode_flags |= BTRFS_INODE_NODUMP;
259
	else
260
		binode_flags &= ~BTRFS_INODE_NODUMP;
261
	if (fsflags & FS_NOATIME_FL)
262
		binode_flags |= BTRFS_INODE_NOATIME;
263
	else
264
		binode_flags &= ~BTRFS_INODE_NOATIME;
M
Miklos Szeredi 已提交
265 266 267 268 269

	/* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */
	if (!fa->flags_valid) {
		/* 1 item for the inode */
		trans = btrfs_start_transaction(root, 1);
270 271
		if (IS_ERR(trans))
			return PTR_ERR(trans);
M
Miklos Szeredi 已提交
272 273 274
		goto update_flags;
	}

275
	if (fsflags & FS_DIRSYNC_FL)
276
		binode_flags |= BTRFS_INODE_DIRSYNC;
277
	else
278
		binode_flags &= ~BTRFS_INODE_DIRSYNC;
279
	if (fsflags & FS_NOCOW_FL) {
280
		if (S_ISREG(inode->i_mode)) {
281 282 283 284 285 286
			/*
			 * It's safe to turn csums off here, no extents exist.
			 * Otherwise we want the flag to reflect the real COW
			 * status of the file and will not set it.
			 */
			if (inode->i_size == 0)
287 288
				binode_flags |= BTRFS_INODE_NODATACOW |
						BTRFS_INODE_NODATASUM;
289
		} else {
290
			binode_flags |= BTRFS_INODE_NODATACOW;
291 292 293
		}
	} else {
		/*
294
		 * Revert back under same assumptions as above
295
		 */
296
		if (S_ISREG(inode->i_mode)) {
297
			if (inode->i_size == 0)
298 299
				binode_flags &= ~(BTRFS_INODE_NODATACOW |
						  BTRFS_INODE_NODATASUM);
300
		} else {
301
			binode_flags &= ~BTRFS_INODE_NODATACOW;
302 303
		}
	}
304

305 306 307 308 309
	/*
	 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
	 * flag may be changed automatically if compression code won't make
	 * things smaller.
	 */
310
	if (fsflags & FS_NOCOMP_FL) {
311 312
		binode_flags &= ~BTRFS_INODE_COMPRESS;
		binode_flags |= BTRFS_INODE_NOCOMPRESS;
313
	} else if (fsflags & FS_COMPR_FL) {
314

M
Miklos Szeredi 已提交
315 316
		if (IS_SWAPFILE(inode))
			return -ETXTBSY;
317

318 319
		binode_flags |= BTRFS_INODE_COMPRESS;
		binode_flags &= ~BTRFS_INODE_NOCOMPRESS;
320

321 322 323
		comp = btrfs_compress_type2str(fs_info->compress_type);
		if (!comp || comp[0] == 0)
			comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
L
Li Zefan 已提交
324
	} else {
325
		binode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
326
	}
327

328 329 330 331 332
	/*
	 * 1 for inode item
	 * 2 for properties
	 */
	trans = btrfs_start_transaction(root, 3);
M
Miklos Szeredi 已提交
333 334
	if (IS_ERR(trans))
		return PTR_ERR(trans);
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	if (comp) {
		ret = btrfs_set_prop(trans, inode, "btrfs.compression", comp,
				     strlen(comp), 0);
		if (ret) {
			btrfs_abort_transaction(trans, ret);
			goto out_end_trans;
		}
	} else {
		ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL,
				     0, 0);
		if (ret && ret != -ENODATA) {
			btrfs_abort_transaction(trans, ret);
			goto out_end_trans;
		}
	}

M
Miklos Szeredi 已提交
352
update_flags:
353
	binode->flags = binode_flags;
354
	btrfs_sync_inode_flags_to_i_flags(inode);
355
	inode_inc_iversion(inode);
356
	inode->i_ctime = current_time(inode);
357
	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
358

359
 out_end_trans:
360
	btrfs_end_transaction(trans);
361
	return ret;
362 363
}

364 365 366
/*
 * Start exclusive operation @type, return true on success
 */
367 368 369
bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
			enum btrfs_exclusive_operation type)
{
370 371 372 373 374 375 376 377 378 379
	bool ret = false;

	spin_lock(&fs_info->super_lock);
	if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) {
		fs_info->exclusive_operation = type;
		ret = true;
	}
	spin_unlock(&fs_info->super_lock);

	return ret;
380 381
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/*
 * Conditionally allow to enter the exclusive operation in case it's compatible
 * with the running one.  This must be paired with btrfs_exclop_start_unlock and
 * btrfs_exclop_finish.
 *
 * Compatibility:
 * - the same type is already running
 * - not BTRFS_EXCLOP_NONE - this is intentionally incompatible and the caller
 *   must check the condition first that would allow none -> @type
 */
bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
				 enum btrfs_exclusive_operation type)
{
	spin_lock(&fs_info->super_lock);
	if (fs_info->exclusive_operation == type)
		return true;

	spin_unlock(&fs_info->super_lock);
	return false;
}

void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info)
{
	spin_unlock(&fs_info->super_lock);
}

408 409
void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
{
410
	spin_lock(&fs_info->super_lock);
411
	WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
412
	spin_unlock(&fs_info->super_lock);
413
	sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
414 415
}

416 417
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
{
A
Al Viro 已提交
418
	struct inode *inode = file_inode(file);
419 420 421

	return put_user(inode->i_generation, arg);
}
C
Christoph Hellwig 已提交
422

423 424
static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
					void __user *arg)
425 426 427 428 429 430 431 432 433 434 435
{
	struct btrfs_device *device;
	struct request_queue *q;
	struct fstrim_range range;
	u64 minlen = ULLONG_MAX;
	u64 num_devices = 0;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

436 437 438 439 440 441 442 443
	/*
	 * btrfs_trim_block_group() depends on space cache, which is not
	 * available in zoned filesystem. So, disallow fitrim on a zoned
	 * filesystem for now.
	 */
	if (btrfs_is_zoned(fs_info))
		return -EOPNOTSUPP;

444 445 446 447 448 449 450 451 452 453
	/*
	 * If the fs is mounted with nologreplay, which requires it to be
	 * mounted in RO mode as well, we can not allow discard on free space
	 * inside block groups, because log trees refer to extents that are not
	 * pinned in a block group's free space cache (pinning the extents is
	 * precisely the first phase of replaying a log tree).
	 */
	if (btrfs_test_opt(fs_info, NOLOGREPLAY))
		return -EROFS;

454 455 456
	rcu_read_lock();
	list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
				dev_list) {
457 458 459 460 461
		if (!device->bdev)
			continue;
		q = bdev_get_queue(device->bdev);
		if (blk_queue_discard(q)) {
			num_devices++;
462
			minlen = min_t(u64, q->limits.discard_granularity,
463 464 465
				     minlen);
		}
	}
466
	rcu_read_unlock();
467

468 469 470 471
	if (!num_devices)
		return -EOPNOTSUPP;
	if (copy_from_user(&range, arg, sizeof(range)))
		return -EFAULT;
472 473 474 475 476 477 478

	/*
	 * NOTE: Don't truncate the range using super->total_bytes.  Bytenr of
	 * block group is in the logical address space, which can be any
	 * sectorsize aligned bytenr in  the range [0, U64_MAX].
	 */
	if (range.len < fs_info->sb->s_blocksize)
479
		return -EINVAL;
480 481

	range.minlen = max(range.minlen, minlen);
482
	ret = btrfs_trim_fs(fs_info, &range);
483 484 485 486 487 488 489 490 491
	if (ret < 0)
		return ret;

	if (copy_to_user(arg, &range, sizeof(range)))
		return -EFAULT;

	return 0;
}

492
int __pure btrfs_is_empty_uuid(u8 *uuid)
493
{
C
Chris Mason 已提交
494 495 496 497 498 499 500
	int i;

	for (i = 0; i < BTRFS_UUID_SIZE; i++) {
		if (uuid[i])
			return 0;
	}
	return 1;
501 502
}

503 504
static noinline int create_subvol(struct user_namespace *mnt_userns,
				  struct inode *dir, struct dentry *dentry,
505
				  const char *name, int namelen,
506
				  struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
507
{
508
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
C
Christoph Hellwig 已提交
509 510
	struct btrfs_trans_handle *trans;
	struct btrfs_key key;
511
	struct btrfs_root_item *root_item;
C
Christoph Hellwig 已提交
512 513
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
514
	struct btrfs_root *root = BTRFS_I(dir)->root;
515
	struct btrfs_root *new_root;
516
	struct btrfs_block_rsv block_rsv;
517
	struct timespec64 cur_time = current_time(dir);
518
	struct inode *inode;
C
Christoph Hellwig 已提交
519 520
	int ret;
	int err;
521
	dev_t anon_dev = 0;
C
Christoph Hellwig 已提交
522
	u64 objectid;
523
	u64 index = 0;
C
Christoph Hellwig 已提交
524

525 526 527 528
	root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
	if (!root_item)
		return -ENOMEM;

529
	ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid);
530
	if (ret)
531
		goto fail_free;
532

533 534 535 536
	ret = get_anon_bdev(&anon_dev);
	if (ret < 0)
		goto fail_free;

537 538
	/*
	 * Don't create subvolume whose level is not zero. Or qgroup will be
539
	 * screwed up since it assumes subvolume qgroup's level to be 0.
540
	 */
541 542 543 544
	if (btrfs_qgroup_level(objectid)) {
		ret = -ENOSPC;
		goto fail_free;
	}
545

546
	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
J
Josef Bacik 已提交
547
	/*
548 549
	 * The same as the snapshot creation, please see the comment
	 * of create_snapshot().
J
Josef Bacik 已提交
550
	 */
551
	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
552
	if (ret)
553
		goto fail_free;
554 555 556 557

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
558
		btrfs_subvolume_release_metadata(root, &block_rsv);
559
		goto fail_free;
560 561 562
	}
	trans->block_rsv = &block_rsv;
	trans->bytes_reserved = block_rsv.size;
C
Christoph Hellwig 已提交
563

564
	ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
A
Arne Jansen 已提交
565 566 567
	if (ret)
		goto fail;

568 569
	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
				      BTRFS_NESTING_NORMAL);
570 571 572 573
	if (IS_ERR(leaf)) {
		ret = PTR_ERR(leaf);
		goto fail;
	}
C
Christoph Hellwig 已提交
574 575 576

	btrfs_mark_buffer_dirty(leaf);

577
	inode_item = &root_item->inode;
578 579 580
	btrfs_set_stack_inode_generation(inode_item, 1);
	btrfs_set_stack_inode_size(inode_item, 3);
	btrfs_set_stack_inode_nlink(inode_item, 1);
581
	btrfs_set_stack_inode_nbytes(inode_item,
582
				     fs_info->nodesize);
583
	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
C
Christoph Hellwig 已提交
584

585 586
	btrfs_set_root_flags(root_item, 0);
	btrfs_set_root_limit(root_item, 0);
587
	btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
588

589 590 591 592 593 594
	btrfs_set_root_bytenr(root_item, leaf->start);
	btrfs_set_root_generation(root_item, trans->transid);
	btrfs_set_root_level(root_item, 0);
	btrfs_set_root_refs(root_item, 1);
	btrfs_set_root_used(root_item, leaf->len);
	btrfs_set_root_last_snapshot(root_item, 0);
C
Christoph Hellwig 已提交
595

596 597
	btrfs_set_root_generation_v2(root_item,
			btrfs_root_generation(root_item));
598
	generate_random_guid(root_item->uuid);
599 600 601 602 603
	btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
	btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
	root_item->ctime = root_item->otime;
	btrfs_set_root_ctransid(root_item, trans->transid);
	btrfs_set_root_otransid(root_item, trans->transid);
C
Christoph Hellwig 已提交
604

605
	btrfs_tree_unlock(leaf);
C
Christoph Hellwig 已提交
606

607
	btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
C
Christoph Hellwig 已提交
608 609

	key.objectid = objectid;
610
	key.offset = 0;
611
	key.type = BTRFS_ROOT_ITEM_KEY;
612
	ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
613
				root_item);
614 615 616 617 618 619 620 621 622 623 624
	if (ret) {
		/*
		 * Since we don't abort the transaction in this case, free the
		 * tree block so that we don't leak space and leave the
		 * filesystem in an inconsistent state (an extent item in the
		 * extent tree without backreferences). Also no need to have
		 * the tree block locked since it is not in any tree at this
		 * point, so no other task can find it and use it.
		 */
		btrfs_free_tree_block(trans, root, leaf, 0, 1);
		free_extent_buffer(leaf);
C
Christoph Hellwig 已提交
625
		goto fail;
626 627 628 629
	}

	free_extent_buffer(leaf);
	leaf = NULL;
C
Christoph Hellwig 已提交
630

631
	key.offset = (u64)-1;
632
	new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
633
	if (IS_ERR(new_root)) {
634
		free_anon_bdev(anon_dev);
635
		ret = PTR_ERR(new_root);
636
		btrfs_abort_transaction(trans, ret);
637 638
		goto fail;
	}
639 640
	/* Freeing will be done in btrfs_put_root() of new_root */
	anon_dev = 0;
641

642 643 644 645 646 647
	ret = btrfs_record_root_in_trans(trans, new_root);
	if (ret) {
		btrfs_put_root(new_root);
		btrfs_abort_transaction(trans, ret);
		goto fail;
	}
648

649
	ret = btrfs_create_subvol_root(trans, new_root, root, mnt_userns);
650
	btrfs_put_root(new_root);
651 652
	if (ret) {
		/* We potentially lose an unused inode item here */
653
		btrfs_abort_transaction(trans, ret);
654 655 656
		goto fail;
	}

C
Christoph Hellwig 已提交
657 658 659
	/*
	 * insert the directory item
	 */
660
	ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
661
	if (ret) {
662
		btrfs_abort_transaction(trans, ret);
663 664
		goto fail;
	}
665

666
	ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
667
				    BTRFS_FT_DIR, index);
668
	if (ret) {
669
		btrfs_abort_transaction(trans, ret);
C
Christoph Hellwig 已提交
670
		goto fail;
671
	}
672

673
	btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
674
	ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
675 676 677 678
	if (ret) {
		btrfs_abort_transaction(trans, ret);
		goto fail;
	}
679

680
	ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
681
				 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
682 683 684 685
	if (ret) {
		btrfs_abort_transaction(trans, ret);
		goto fail;
	}
C
Christoph Hellwig 已提交
686

687
	ret = btrfs_uuid_tree_add(trans, root_item->uuid,
688
				  BTRFS_UUID_KEY_SUBVOL, objectid);
689
	if (ret)
690
		btrfs_abort_transaction(trans, ret);
691

C
Christoph Hellwig 已提交
692
fail:
693
	kfree(root_item);
694 695
	trans->block_rsv = NULL;
	trans->bytes_reserved = 0;
696
	btrfs_subvolume_release_metadata(root, &block_rsv);
697

698
	err = btrfs_commit_transaction(trans);
C
Christoph Hellwig 已提交
699 700
	if (err && !ret)
		ret = err;
701

702 703
	if (!ret) {
		inode = btrfs_lookup_dentry(dir, dentry);
704 705
		if (IS_ERR(inode))
			return PTR_ERR(inode);
706 707
		d_instantiate(dentry, inode);
	}
C
Christoph Hellwig 已提交
708
	return ret;
709 710

fail_free:
711 712
	if (anon_dev)
		free_anon_bdev(anon_dev);
713 714
	kfree(root_item);
	return ret;
C
Christoph Hellwig 已提交
715 716
}

717
static int create_snapshot(struct btrfs_root *root, struct inode *dir,
718
			   struct dentry *dentry, bool readonly,
719
			   struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
720
{
721
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
722
	struct inode *inode;
C
Christoph Hellwig 已提交
723 724
	struct btrfs_pending_snapshot *pending_snapshot;
	struct btrfs_trans_handle *trans;
725
	int ret;
C
Christoph Hellwig 已提交
726

727
	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
C
Christoph Hellwig 已提交
728 729
		return -EINVAL;

730 731 732 733 734 735
	if (atomic_read(&root->nr_swapfiles)) {
		btrfs_warn(fs_info,
			   "cannot snapshot subvolume with active swapfile");
		return -ETXTBSY;
	}

736
	pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
737 738 739
	if (!pending_snapshot)
		return -ENOMEM;

740 741 742
	ret = get_anon_bdev(&pending_snapshot->anon_dev);
	if (ret < 0)
		goto free_pending;
743
	pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
744
			GFP_KERNEL);
745 746
	pending_snapshot->path = btrfs_alloc_path();
	if (!pending_snapshot->root_item || !pending_snapshot->path) {
747 748 749 750
		ret = -ENOMEM;
		goto free_pending;
	}

751 752
	btrfs_init_block_rsv(&pending_snapshot->block_rsv,
			     BTRFS_BLOCK_RSV_TEMP);
753 754 755 756 757 758
	/*
	 * 1 - parent dir inode
	 * 2 - dir entries
	 * 1 - root item
	 * 2 - root ref/backref
	 * 1 - root of snapshot
759
	 * 1 - UUID item
760 761
	 */
	ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
762
					&pending_snapshot->block_rsv, 8,
763
					false);
764
	if (ret)
765
		goto free_pending;
766

767
	pending_snapshot->dentry = dentry;
C
Christoph Hellwig 已提交
768
	pending_snapshot->root = root;
L
Li Zefan 已提交
769
	pending_snapshot->readonly = readonly;
770
	pending_snapshot->dir = dir;
771
	pending_snapshot->inherit = inherit;
772

773
	trans = btrfs_start_transaction(root, 0);
774 775 776 777 778
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto fail;
	}

779
	spin_lock(&fs_info->trans_lock);
C
Christoph Hellwig 已提交
780 781
	list_add(&pending_snapshot->list,
		 &trans->transaction->pending_snapshots);
782
	spin_unlock(&fs_info->trans_lock);
783 784

	ret = btrfs_commit_transaction(trans);
785
	if (ret)
786
		goto fail;
787 788 789 790 791

	ret = pending_snapshot->error;
	if (ret)
		goto fail;

792 793 794 795
	ret = btrfs_orphan_cleanup(pending_snapshot->snap);
	if (ret)
		goto fail;

796
	inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
797 798 799 800
	if (IS_ERR(inode)) {
		ret = PTR_ERR(inode);
		goto fail;
	}
801

802 803
	d_instantiate(dentry, inode);
	ret = 0;
804
	pending_snapshot->anon_dev = 0;
805
fail:
806 807 808
	/* Prevent double freeing of anon_dev */
	if (ret && pending_snapshot->snap)
		pending_snapshot->snap->anon_dev = 0;
809
	btrfs_put_root(pending_snapshot->snap);
810
	btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
811
free_pending:
812 813
	if (pending_snapshot->anon_dev)
		free_anon_bdev(pending_snapshot->anon_dev);
814
	kfree(pending_snapshot->root_item);
815
	btrfs_free_path(pending_snapshot->path);
816 817
	kfree(pending_snapshot);

C
Christoph Hellwig 已提交
818 819 820
	return ret;
}

821 822 823 824 825 826 827 828 829 830 831
/*  copy of may_delete in fs/namei.c()
 *	Check whether we can remove a link victim from directory dir, check
 *  whether the type of victim is right.
 *  1. We can't do it if dir is read-only (done in permission())
 *  2. We should have write and exec permissions on dir
 *  3. We can't remove anything from append-only dir
 *  4. We can't do anything with immutable dir (done in permission())
 *  5. If the sticky bit on dir is set we should either
 *	a. be owner of dir, or
 *	b. be owner of victim, or
 *	c. have CAP_FOWNER capability
832
 *  6. If the victim is append-only or immutable we can't do anything with
833 834 835 836 837 838 839 840
 *     links pointing to it.
 *  7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
 *  8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
 *  9. We can't remove a root or mountpoint.
 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
 *     nfs_async_unlink().
 */

841 842
static int btrfs_may_delete(struct user_namespace *mnt_userns,
			    struct inode *dir, struct dentry *victim, int isdir)
843 844 845
{
	int error;

846
	if (d_really_is_negative(victim))
847 848
		return -ENOENT;

849
	BUG_ON(d_inode(victim->d_parent) != dir);
850
	audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
851

852
	error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
853 854 855 856
	if (error)
		return error;
	if (IS_APPEND(dir))
		return -EPERM;
857
	if (check_sticky(mnt_userns, dir, d_inode(victim)) ||
858 859
	    IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) ||
	    IS_SWAPFILE(d_inode(victim)))
860 861
		return -EPERM;
	if (isdir) {
862
		if (!d_is_dir(victim))
863 864 865
			return -ENOTDIR;
		if (IS_ROOT(victim))
			return -EBUSY;
866
	} else if (d_is_dir(victim))
867 868 869 870 871 872 873 874
		return -EISDIR;
	if (IS_DEADDIR(dir))
		return -ENOENT;
	if (victim->d_flags & DCACHE_NFSFS_RENAMED)
		return -EBUSY;
	return 0;
}

875
/* copy of may_create in fs/namei.c() */
876 877
static inline int btrfs_may_create(struct user_namespace *mnt_userns,
				   struct inode *dir, struct dentry *child)
878
{
879
	if (d_really_is_positive(child))
880 881 882
		return -EEXIST;
	if (IS_DEADDIR(dir))
		return -ENOENT;
883
	if (!fsuidgid_has_mapping(dir->i_sb, mnt_userns))
884
		return -EOVERFLOW;
885
	return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
886 887 888 889 890 891 892
}

/*
 * Create a new subvolume below @parent.  This is largely modeled after
 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
 * inside this filesystem so it's quite a bit simpler.
 */
A
Al Viro 已提交
893
static noinline int btrfs_mksubvol(const struct path *parent,
894
				   struct user_namespace *mnt_userns,
895
				   const char *name, int namelen,
S
Sage Weil 已提交
896
				   struct btrfs_root *snap_src,
897
				   bool readonly,
898
				   struct btrfs_qgroup_inherit *inherit)
899
{
900 901
	struct inode *dir = d_inode(parent->dentry);
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
902 903 904
	struct dentry *dentry;
	int error;

905 906 907
	error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (error == -EINTR)
		return error;
908

909
	dentry = lookup_one(mnt_userns, name, parent->dentry, namelen);
910 911 912 913
	error = PTR_ERR(dentry);
	if (IS_ERR(dentry))
		goto out_unlock;

914
	error = btrfs_may_create(mnt_userns, dir, dentry);
915
	if (error)
916
		goto out_dput;
917

C
Chris Mason 已提交
918 919 920 921 922 923 924 925 926 927
	/*
	 * even if this name doesn't exist, we may get hash collisions.
	 * check for them now when we can safely fail
	 */
	error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
					       dir->i_ino, name,
					       namelen);
	if (error)
		goto out_dput;

928
	down_read(&fs_info->subvol_sem);
929 930 931 932

	if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
		goto out_up_read;

933 934 935
	if (snap_src)
		error = create_snapshot(snap_src, dir, dentry, readonly, inherit);
	else
936
		error = create_subvol(mnt_userns, dir, dentry, name, namelen, inherit);
937

938 939 940
	if (!error)
		fsnotify_mkdir(dir, dentry);
out_up_read:
941
	up_read(&fs_info->subvol_sem);
942 943 944
out_dput:
	dput(dentry);
out_unlock:
945
	btrfs_inode_unlock(dir, 0);
946 947 948
	return error;
}

949
static noinline int btrfs_mksnapshot(const struct path *parent,
950
				   struct user_namespace *mnt_userns,
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
				   const char *name, int namelen,
				   struct btrfs_root *root,
				   bool readonly,
				   struct btrfs_qgroup_inherit *inherit)
{
	int ret;
	bool snapshot_force_cow = false;

	/*
	 * Force new buffered writes to reserve space even when NOCOW is
	 * possible. This is to avoid later writeback (running dealloc) to
	 * fallback to COW mode and unexpectedly fail with ENOSPC.
	 */
	btrfs_drew_read_lock(&root->snapshot_lock);

966
	ret = btrfs_start_delalloc_snapshot(root, false);
967 968 969 970 971 972 973 974 975 976 977 978 979
	if (ret)
		goto out;

	/*
	 * All previous writes have started writeback in NOCOW mode, so now
	 * we force future writes to fallback to COW mode during snapshot
	 * creation.
	 */
	atomic_inc(&root->snapshot_force_cow);
	snapshot_force_cow = true;

	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);

980
	ret = btrfs_mksubvol(parent, mnt_userns, name, namelen,
981 982 983 984 985 986 987 988
			     root, readonly, inherit);
out:
	if (snapshot_force_cow)
		atomic_dec(&root->snapshot_force_cow);
	btrfs_drew_read_unlock(&root->snapshot_lock);
	return ret;
}

C
Chris Mason 已提交
989 990 991 992 993 994 995
/*
 * When we're defragging a range, we don't want to kick it off again
 * if it is really just waiting for delalloc to send it down.
 * If we find a nice big extent or delalloc range for the bytes in the
 * file you want to defrag, we return 0 to let you know to skip this
 * part of the file
 */
996
static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
C
Chris Mason 已提交
997 998 999 1000
{
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct extent_map *em = NULL;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1001
	const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
C
Chris Mason 已提交
1002 1003 1004
	u64 end;

	read_lock(&em_tree->lock);
1005
	em = lookup_extent_mapping(em_tree, offset, sectorsize);
C
Chris Mason 已提交
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
	read_unlock(&em_tree->lock);

	if (em) {
		end = extent_map_end(em);
		free_extent_map(em);
		if (end - offset > thresh)
			return 0;
	}
	/* if we already have a nice delalloc here, just stop */
	thresh /= 2;
	end = count_range_bits(io_tree, &offset, offset + thresh,
			       thresh, EXTENT_DELALLOC, 1);
	if (end >= thresh)
		return 0;
	return 1;
}

/*
 * helper function to walk through a file and find extents
 * newer than a specific transid, and smaller than thresh.
 *
 * This is used by the defragging code to find new and small
 * extents
 */
static int find_new_extents(struct btrfs_root *root,
			    struct inode *inode, u64 newer_than,
1032
			    u64 *off, u32 thresh)
C
Chris Mason 已提交
1033 1034 1035 1036 1037 1038 1039
{
	struct btrfs_path *path;
	struct btrfs_key min_key;
	struct extent_buffer *leaf;
	struct btrfs_file_extent_item *extent;
	int type;
	int ret;
1040
	u64 ino = btrfs_ino(BTRFS_I(inode));
C
Chris Mason 已提交
1041 1042 1043 1044 1045

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1046
	min_key.objectid = ino;
C
Chris Mason 已提交
1047 1048 1049
	min_key.type = BTRFS_EXTENT_DATA_KEY;
	min_key.offset = *off;

1050
	while (1) {
1051
		ret = btrfs_search_forward(root, &min_key, path, newer_than);
C
Chris Mason 已提交
1052 1053
		if (ret != 0)
			goto none;
1054
process_slot:
1055
		if (min_key.objectid != ino)
C
Chris Mason 已提交
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
			goto none;
		if (min_key.type != BTRFS_EXTENT_DATA_KEY)
			goto none;

		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_file_extent_item);

		type = btrfs_file_extent_type(leaf, extent);
		if (type == BTRFS_FILE_EXTENT_REG &&
		    btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
		    check_defrag_in_cache(inode, min_key.offset, thresh)) {
			*off = min_key.offset;
			btrfs_free_path(path);
			return 0;
		}

1073 1074 1075 1076 1077 1078
		path->slots[0]++;
		if (path->slots[0] < btrfs_header_nritems(leaf)) {
			btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
			goto process_slot;
		}

C
Chris Mason 已提交
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
		if (min_key.offset == (u64)-1)
			goto none;

		min_key.offset++;
		btrfs_release_path(path);
	}
none:
	btrfs_free_path(path);
	return -ENOENT;
}

L
Li Zefan 已提交
1090
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
1091 1092
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
L
Li Zefan 已提交
1093 1094
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct extent_map *em;
1095
	const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
1096

L
Li Zefan 已提交
1097 1098 1099 1100
	/*
	 * hopefully we have this extent in the tree already, try without
	 * the full extent lock
	 */
1101
	read_lock(&em_tree->lock);
1102
	em = lookup_extent_mapping(em_tree, start, sectorsize);
1103 1104
	read_unlock(&em_tree->lock);

L
Li Zefan 已提交
1105
	if (!em) {
1106
		struct extent_state *cached = NULL;
1107
		u64 end = start + sectorsize - 1;
1108

L
Li Zefan 已提交
1109
		/* get the big lock and read metadata off disk */
1110
		lock_extent_bits(io_tree, start, end, &cached);
1111
		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, sectorsize);
1112
		unlock_extent_cached(io_tree, start, end, &cached);
L
Li Zefan 已提交
1113 1114 1115 1116 1117 1118 1119

		if (IS_ERR(em))
			return NULL;
	}

	return em;
}
1120

L
Li Zefan 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
{
	struct extent_map *next;
	bool ret = true;

	/* this is the last extent */
	if (em->start + em->len >= i_size_read(inode))
		return false;

	next = defrag_lookup_extent(inode, em->start + em->len);
1131 1132 1133
	if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
		ret = false;
	else if ((em->block_start + em->block_len == next->block_start) &&
1134
		 (em->block_len > SZ_128K && next->block_len > SZ_128K))
L
Li Zefan 已提交
1135 1136 1137
		ret = false;

	free_extent_map(next);
1138 1139 1140
	return ret;
}

1141
static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1142 1143
			       u64 *last_len, u64 *skip, u64 *defrag_end,
			       int compress)
1144
{
L
Li Zefan 已提交
1145
	struct extent_map *em;
1146
	int ret = 1;
L
Li Zefan 已提交
1147
	bool next_mergeable = true;
1148
	bool prev_mergeable = true;
1149 1150

	/*
1151
	 * make sure that once we start defragging an extent, we keep on
1152 1153 1154 1155 1156 1157 1158
	 * defragging it
	 */
	if (start < *defrag_end)
		return 1;

	*skip = 0;

L
Li Zefan 已提交
1159 1160 1161
	em = defrag_lookup_extent(inode, start);
	if (!em)
		return 0;
1162 1163

	/* this will cover holes, and inline extents */
1164
	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1165
		ret = 0;
1166 1167 1168
		goto out;
	}

1169 1170 1171
	if (!*defrag_end)
		prev_mergeable = false;

L
Li Zefan 已提交
1172
	next_mergeable = defrag_check_next_extent(inode, em);
1173
	/*
L
Li Zefan 已提交
1174 1175
	 * we hit a real extent, if it is big or the next extent is not a
	 * real extent, don't bother defragging it
1176
	 */
1177
	if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1178
	    (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1179
		ret = 0;
1180
out:
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	/*
	 * last_len ends up being a counter of how many bytes we've defragged.
	 * every time we choose not to defrag an extent, we reset *last_len
	 * so that the next tiny extent will force a defrag.
	 *
	 * The end result of this is that tiny extents before a single big
	 * extent will force at least part of that big extent to be defragged.
	 */
	if (ret) {
		*defrag_end = extent_map_end(em);
	} else {
		*last_len = 0;
		*skip = extent_map_end(em);
		*defrag_end = 0;
	}

	free_extent_map(em);
	return ret;
}

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
/*
 * Prepare one page to be defragged.
 *
 * This will ensure:
 *
 * - Returned page is locked and has been set up properly.
 * - No ordered extent exists in the page.
 * - The page is uptodate.
 *
 * NOTE: Caller should also wait for page writeback after the cluster is
 * prepared, here we don't do writeback wait for each page.
 */
static struct page *defrag_prepare_one_page(struct btrfs_inode *inode,
					    pgoff_t index)
{
	struct address_space *mapping = inode->vfs_inode.i_mapping;
	gfp_t mask = btrfs_alloc_write_mask(mapping);
	u64 page_start = (u64)index << PAGE_SHIFT;
	u64 page_end = page_start + PAGE_SIZE - 1;
	struct extent_state *cached_state = NULL;
	struct page *page;
	int ret;

again:
	page = find_or_create_page(mapping, index, mask);
	if (!page)
		return ERR_PTR(-ENOMEM);

	ret = set_page_extent_mapped(page);
	if (ret < 0) {
		unlock_page(page);
		put_page(page);
		return ERR_PTR(ret);
	}

	/* Wait for any existing ordered extent in the range */
	while (1) {
		struct btrfs_ordered_extent *ordered;

		lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
		ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
		unlock_extent_cached(&inode->io_tree, page_start, page_end,
				     &cached_state);
		if (!ordered)
			break;

		unlock_page(page);
		btrfs_start_ordered_extent(ordered, 1);
		btrfs_put_ordered_extent(ordered);
		lock_page(page);
		/*
		 * We unlocked the page above, so we need check if it was
		 * released or not.
		 */
		if (page->mapping != mapping || !PagePrivate(page)) {
			unlock_page(page);
			put_page(page);
			goto again;
		}
	}

	/*
	 * Now the page range has no ordered extent any more.  Read the page to
	 * make it uptodate.
	 */
	if (!PageUptodate(page)) {
		btrfs_readpage(NULL, page);
		lock_page(page);
		if (page->mapping != mapping || !PagePrivate(page)) {
			unlock_page(page);
			put_page(page);
			goto again;
		}
		if (!PageUptodate(page)) {
			unlock_page(page);
			put_page(page);
			return ERR_PTR(-EIO);
		}
	}
	return page;
}

C
Chris Mason 已提交
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
/*
 * it doesn't do much good to defrag one or two pages
 * at a time.  This pulls in a nice chunk of pages
 * to COW and defrag.
 *
 * It also makes sure the delalloc code has enough
 * dirty data to avoid making new small extents as part
 * of the defrag
 *
 * It's a good idea to start RA on this range
 * before calling this.
 */
static int cluster_pages_for_defrag(struct inode *inode,
				    struct page **pages,
				    unsigned long start_index,
1298
				    unsigned long num_pages)
C
Christoph Hellwig 已提交
1299
{
C
Chris Mason 已提交
1300 1301 1302 1303
	unsigned long file_end;
	u64 isize = i_size_read(inode);
	u64 page_start;
	u64 page_end;
1304
	u64 page_cnt;
1305
	u64 start = (u64)start_index << PAGE_SHIFT;
1306
	u64 search_start;
C
Chris Mason 已提交
1307 1308 1309 1310
	int ret;
	int i;
	int i_done;
	struct extent_state *cached_state = NULL;
1311
	struct extent_changeset *data_reserved = NULL;
C
Chris Mason 已提交
1312

1313
	file_end = (isize - 1) >> PAGE_SHIFT;
1314 1315 1316 1317
	if (!isize || start_index > file_end)
		return 0;

	page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
C
Chris Mason 已提交
1318

1319
	ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
1320
			start, page_cnt << PAGE_SHIFT);
C
Chris Mason 已提交
1321 1322 1323 1324 1325
	if (ret)
		return ret;
	i_done = 0;

	/* step one, lock all the pages */
1326
	for (i = 0; i < page_cnt; i++) {
C
Chris Mason 已提交
1327 1328
		struct page *page;

1329 1330 1331
		page = defrag_prepare_one_page(BTRFS_I(inode), start_index + i);
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
1332 1333
			break;
		}
C
Chris Mason 已提交
1334 1335 1336 1337 1338 1339
		pages[i] = page;
		i_done++;
	}
	if (!i_done || ret)
		goto out;

1340
	if (!(inode->i_sb->s_flags & SB_ACTIVE))
C
Chris Mason 已提交
1341 1342 1343
		goto out;

	/*
1344 1345
	 * Now we have a nice long stream of locked and up to date pages, let's
	 * wait on them.
C
Chris Mason 已提交
1346 1347 1348 1349 1350
	 */
	for (i = 0; i < i_done; i++)
		wait_on_page_writeback(pages[i]);

	page_start = page_offset(pages[0]);
1351
	page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
C
Chris Mason 已提交
1352 1353

	lock_extent_bits(&BTRFS_I(inode)->io_tree,
1354
			 page_start, page_end - 1, &cached_state);
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388

	/*
	 * When defragmenting we skip ranges that have holes or inline extents,
	 * (check should_defrag_range()), to avoid unnecessary IO and wasting
	 * space. At btrfs_defrag_file(), we check if a range should be defragged
	 * before locking the inode and then, if it should, we trigger a sync
	 * page cache readahead - we lock the inode only after that to avoid
	 * blocking for too long other tasks that possibly want to operate on
	 * other file ranges. But before we were able to get the inode lock,
	 * some other task may have punched a hole in the range, or we may have
	 * now an inline extent, in which case we should not defrag. So check
	 * for that here, where we have the inode and the range locked, and bail
	 * out if that happened.
	 */
	search_start = page_start;
	while (search_start < page_end) {
		struct extent_map *em;

		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, search_start,
				      page_end - search_start);
		if (IS_ERR(em)) {
			ret = PTR_ERR(em);
			goto out_unlock_range;
		}
		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
			free_extent_map(em);
			/* Ok, 0 means we did not defrag anything */
			ret = 0;
			goto out_unlock_range;
		}
		search_start = extent_map_end(em);
		free_extent_map(em);
	}

C
Chris Mason 已提交
1389
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1390 1391
			  page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
			  EXTENT_DEFRAG, 0, 0, &cached_state);
C
Chris Mason 已提交
1392

1393
	if (i_done != page_cnt) {
1394
		spin_lock(&BTRFS_I(inode)->lock);
1395
		btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1396
		spin_unlock(&BTRFS_I(inode)->lock);
1397
		btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved,
1398
				start, (page_cnt - i_done) << PAGE_SHIFT, true);
C
Chris Mason 已提交
1399 1400 1401
	}


1402
	set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1403
			  &cached_state);
C
Chris Mason 已提交
1404 1405

	unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1406
			     page_start, page_end - 1, &cached_state);
C
Chris Mason 已提交
1407 1408 1409 1410 1411 1412

	for (i = 0; i < i_done; i++) {
		clear_page_dirty_for_io(pages[i]);
		ClearPageChecked(pages[i]);
		set_page_dirty(pages[i]);
		unlock_page(pages[i]);
1413
		put_page(pages[i]);
C
Chris Mason 已提交
1414
	}
1415
	btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
1416
	extent_changeset_free(data_reserved);
C
Chris Mason 已提交
1417
	return i_done;
1418 1419 1420 1421

out_unlock_range:
	unlock_extent_cached(&BTRFS_I(inode)->io_tree,
			     page_start, page_end - 1, &cached_state);
C
Chris Mason 已提交
1422 1423 1424
out:
	for (i = 0; i < i_done; i++) {
		unlock_page(pages[i]);
1425
		put_page(pages[i]);
C
Chris Mason 已提交
1426
	}
1427
	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved,
1428
			start, page_cnt << PAGE_SHIFT, true);
1429
	btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
1430
	extent_changeset_free(data_reserved);
C
Chris Mason 已提交
1431 1432 1433 1434
	return ret;

}

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
struct defrag_target_range {
	struct list_head list;
	u64 start;
	u64 len;
};

/*
 * Collect all valid target extents.
 *
 * @start:	   file offset to lookup
 * @len:	   length to lookup
 * @extent_thresh: file extent size threshold, any extent size >= this value
 *		   will be ignored
 * @newer_than:    only defrag extents newer than this value
 * @do_compress:   whether the defrag is doing compression
 *		   if true, @extent_thresh will be ignored and all regular
 *		   file extents meeting @newer_than will be targets.
 * @target_list:   list of targets file extents
 */
static int defrag_collect_targets(struct btrfs_inode *inode,
				  u64 start, u64 len, u32 extent_thresh,
				  u64 newer_than, bool do_compress,
				  struct list_head *target_list)
{
	u64 cur = start;
	int ret = 0;

	while (cur < start + len) {
		struct extent_map *em;
		struct defrag_target_range *new;
		bool next_mergeable = true;
		u64 range_len;

		em = defrag_lookup_extent(&inode->vfs_inode, cur);
		if (!em)
			break;

		/* Skip hole/inline/preallocated extents */
		if (em->block_start >= EXTENT_MAP_LAST_BYTE ||
		    test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
			goto next;

		/* Skip older extent */
		if (em->generation < newer_than)
			goto next;

		/*
		 * For do_compress case, we want to compress all valid file
		 * extents, thus no @extent_thresh or mergeable check.
		 */
		if (do_compress)
			goto add;

		/* Skip too large extent */
		if (em->len >= extent_thresh)
			goto next;

		next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em);
		if (!next_mergeable) {
			struct defrag_target_range *last;

			/* Empty target list, no way to merge with last entry */
			if (list_empty(target_list))
				goto next;
			last = list_entry(target_list->prev,
					  struct defrag_target_range, list);
			/* Not mergeable with last entry */
			if (last->start + last->len != cur)
				goto next;

			/* Mergeable, fall through to add it to @target_list. */
		}

add:
		range_len = min(extent_map_end(em), start + len) - cur;
		/*
		 * This one is a good target, check if it can be merged into
		 * last range of the target list.
		 */
		if (!list_empty(target_list)) {
			struct defrag_target_range *last;

			last = list_entry(target_list->prev,
					  struct defrag_target_range, list);
			ASSERT(last->start + last->len <= cur);
			if (last->start + last->len == cur) {
				/* Mergeable, enlarge the last entry */
				last->len += range_len;
				goto next;
			}
			/* Fall through to allocate a new entry */
		}

		/* Allocate new defrag_target_range */
		new = kmalloc(sizeof(*new), GFP_NOFS);
		if (!new) {
			free_extent_map(em);
			ret = -ENOMEM;
			break;
		}
		new->start = cur;
		new->len = range_len;
		list_add_tail(&new->list, target_list);

next:
		cur = extent_map_end(em);
		free_extent_map(em);
	}
	if (ret < 0) {
		struct defrag_target_range *entry;
		struct defrag_target_range *tmp;

		list_for_each_entry_safe(entry, tmp, target_list, list) {
			list_del_init(&entry->list);
			kfree(entry);
		}
	}
	return ret;
}

1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
#define CLUSTER_SIZE	(SZ_256K)

/*
 * Defrag one contiguous target range.
 *
 * @inode:	target inode
 * @target:	target range to defrag
 * @pages:	locked pages covering the defrag range
 * @nr_pages:	number of locked pages
 *
 * Caller should ensure:
 *
 * - Pages are prepared
 *   Pages should be locked, no ordered extent in the pages range,
 *   no writeback.
 *
 * - Extent bits are locked
 */
static int defrag_one_locked_target(struct btrfs_inode *inode,
				    struct defrag_target_range *target,
				    struct page **pages, int nr_pages,
				    struct extent_state **cached_state)
{
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
	struct extent_changeset *data_reserved = NULL;
	const u64 start = target->start;
	const u64 len = target->len;
	unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
	unsigned long start_index = start >> PAGE_SHIFT;
	unsigned long first_index = page_index(pages[0]);
	int ret = 0;
	int i;

	ASSERT(last_index - first_index + 1 <= nr_pages);

	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
	if (ret < 0)
		return ret;
	clear_extent_bit(&inode->io_tree, start, start + len - 1,
			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
			 EXTENT_DEFRAG, 0, 0, cached_state);
	set_extent_defrag(&inode->io_tree, start, start + len - 1, cached_state);

	/* Update the page status */
	for (i = start_index - first_index; i <= last_index - first_index; i++) {
		ClearPageChecked(pages[i]);
		btrfs_page_clamp_set_dirty(fs_info, pages[i], start, len);
	}
	btrfs_delalloc_release_extents(inode, len);
	extent_changeset_free(data_reserved);

	return ret;
}

1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
/*
 * Entry point to file defragmentation.
 *
 * @inode:	   inode to be defragged
 * @ra:		   readahead state (can be NUL)
 * @range:	   defrag options including range and flags
 * @newer_than:	   minimum transid to defrag
 * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
 *		   will be defragged.
 */
int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
C
Chris Mason 已提交
1620 1621 1622
		      struct btrfs_ioctl_defrag_range_args *range,
		      u64 newer_than, unsigned long max_to_defrag)
{
1623
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
1624
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Christoph Hellwig 已提交
1625
	unsigned long last_index;
1626
	u64 isize = i_size_read(inode);
1627 1628 1629
	u64 last_len = 0;
	u64 skip = 0;
	u64 defrag_end = 0;
C
Chris Mason 已提交
1630
	u64 newer_off = range->start;
C
Christoph Hellwig 已提交
1631
	unsigned long i;
1632
	unsigned long ra_index = 0;
C
Christoph Hellwig 已提交
1633
	int ret;
C
Chris Mason 已提交
1634
	int defrag_count = 0;
1635
	int compress_type = BTRFS_COMPRESS_ZLIB;
1636
	u32 extent_thresh = range->extent_thresh;
1637
	unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1638
	unsigned long cluster = max_cluster;
1639
	u64 new_align = ~((u64)SZ_128K - 1);
C
Chris Mason 已提交
1640
	struct page **pages = NULL;
1641
	bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
1642
	bool ra_allocated = false;
C
Chris Mason 已提交
1643

1644 1645 1646 1647 1648
	if (isize == 0)
		return 0;

	if (range->start >= isize)
		return -EINVAL;
1649

1650
	if (do_compress) {
1651
		if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
1652 1653 1654 1655
			return -EINVAL;
		if (range->compress_type)
			compress_type = range->compress_type;
	}
C
Christoph Hellwig 已提交
1656

1657
	if (extent_thresh == 0)
1658
		extent_thresh = SZ_256K;
1659

C
Chris Mason 已提交
1660
	/*
1661
	 * If we were not given a ra, allocate a readahead context. As
1662 1663
	 * readahead is just an optimization, defrag will work without it so
	 * we don't error out.
C
Chris Mason 已提交
1664
	 */
1665 1666
	if (!ra) {
		ra_allocated = true;
1667
		ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1668 1669
		if (ra)
			file_ra_state_init(ra, inode->i_mapping);
C
Chris Mason 已提交
1670 1671
	}

1672
	pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL);
C
Chris Mason 已提交
1673 1674 1675 1676 1677 1678
	if (!pages) {
		ret = -ENOMEM;
		goto out_ra;
	}

	/* find the last page to defrag */
C
Chris Mason 已提交
1679
	if (range->start + range->len > range->start) {
1680
		last_index = min_t(u64, isize - 1,
1681
			 range->start + range->len - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
1682
	} else {
1683
		last_index = (isize - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
1684 1685
	}

C
Chris Mason 已提交
1686 1687
	if (newer_than) {
		ret = find_new_extents(root, inode, newer_than,
1688
				       &newer_off, SZ_64K);
C
Chris Mason 已提交
1689 1690 1691 1692 1693 1694
		if (!ret) {
			range->start = newer_off;
			/*
			 * we always align our defrag to help keep
			 * the extents in the file evenly spaced
			 */
1695
			i = (newer_off & new_align) >> PAGE_SHIFT;
C
Chris Mason 已提交
1696 1697 1698
		} else
			goto out_ra;
	} else {
1699
		i = range->start >> PAGE_SHIFT;
C
Chris Mason 已提交
1700 1701
	}
	if (!max_to_defrag)
1702
		max_to_defrag = last_index - i + 1;
C
Chris Mason 已提交
1703

L
Li Zefan 已提交
1704 1705 1706 1707 1708 1709 1710
	/*
	 * make writeback starts from i, so the defrag range can be
	 * written sequentially.
	 */
	if (i < inode->i_mapping->writeback_index)
		inode->i_mapping->writeback_index = i;

1711
	while (i <= last_index && defrag_count < max_to_defrag &&
1712
	       (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
C
Chris Mason 已提交
1713 1714 1715 1716
		/*
		 * make sure we stop running if someone unmounts
		 * the FS
		 */
1717
		if (!(inode->i_sb->s_flags & SB_ACTIVE))
C
Chris Mason 已提交
1718 1719
			break;

1720 1721
		if (btrfs_defrag_cancelled(fs_info)) {
			btrfs_debug(fs_info, "defrag_file cancelled");
1722
			ret = -EAGAIN;
1723
			goto error;
1724 1725
		}

1726
		if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
L
Li Zefan 已提交
1727
					 extent_thresh, &last_len, &skip,
1728
					 &defrag_end, do_compress)){
1729 1730 1731 1732 1733
			unsigned long next;
			/*
			 * the should_defrag function tells us how much to skip
			 * bump our counter by the suggested amount
			 */
1734
			next = DIV_ROUND_UP(skip, PAGE_SIZE);
1735 1736 1737
			i = max(i + 1, next);
			continue;
		}
1738 1739

		if (!newer_than) {
1740 1741
			cluster = (PAGE_ALIGN(defrag_end) >>
				   PAGE_SHIFT) - i;
1742 1743 1744 1745 1746 1747 1748
			cluster = min(cluster, max_cluster);
		} else {
			cluster = max_cluster;
		}

		if (i + cluster > ra_index) {
			ra_index = max(i, ra_index);
1749
			if (ra)
1750
				page_cache_sync_readahead(inode->i_mapping, ra,
1751
						NULL, ra_index, cluster);
1752
			ra_index += cluster;
1753
		}
1754

1755
		btrfs_inode_lock(inode, 0);
1756 1757 1758 1759 1760 1761 1762
		if (IS_SWAPFILE(inode)) {
			ret = -ETXTBSY;
		} else {
			if (do_compress)
				BTRFS_I(inode)->defrag_compress = compress_type;
			ret = cluster_pages_for_defrag(inode, pages, i, cluster);
		}
1763
		if (ret < 0) {
1764
			btrfs_inode_unlock(inode, 0);
C
Chris Mason 已提交
1765
			goto out_ra;
1766
		}
C
Chris Mason 已提交
1767 1768

		defrag_count += ret;
1769
		balance_dirty_pages_ratelimited(inode->i_mapping);
1770
		btrfs_inode_unlock(inode, 0);
C
Chris Mason 已提交
1771 1772 1773 1774 1775

		if (newer_than) {
			if (newer_off == (u64)-1)
				break;

1776 1777 1778
			if (ret > 0)
				i += ret;

C
Chris Mason 已提交
1779
			newer_off = max(newer_off + 1,
1780
					(u64)i << PAGE_SHIFT);
C
Chris Mason 已提交
1781

1782 1783
			ret = find_new_extents(root, inode, newer_than,
					       &newer_off, SZ_64K);
C
Chris Mason 已提交
1784 1785
			if (!ret) {
				range->start = newer_off;
1786
				i = (newer_off & new_align) >> PAGE_SHIFT;
C
Chris Mason 已提交
1787 1788
			} else {
				break;
C
Christoph Hellwig 已提交
1789
			}
C
Chris Mason 已提交
1790
		} else {
1791
			if (ret > 0) {
L
Li Zefan 已提交
1792
				i += ret;
1793
				last_len += ret << PAGE_SHIFT;
1794
			} else {
L
Li Zefan 已提交
1795
				i++;
1796 1797
				last_len = 0;
			}
C
Christoph Hellwig 已提交
1798 1799 1800
		}
	}

1801 1802
	ret = defrag_count;
error:
1803
	if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
C
Chris Mason 已提交
1804
		filemap_flush(inode->i_mapping);
1805 1806 1807 1808
		if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
			     &BTRFS_I(inode)->runtime_flags))
			filemap_flush(inode->i_mapping);
	}
C
Chris Mason 已提交
1809

1810
	if (range->compress_type == BTRFS_COMPRESS_LZO) {
1811
		btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
N
Nick Terrell 已提交
1812 1813
	} else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
		btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1814 1815
	}

C
Chris Mason 已提交
1816
out_ra:
1817
	if (do_compress) {
1818
		btrfs_inode_lock(inode, 0);
1819
		BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1820
		btrfs_inode_unlock(inode, 0);
1821
	}
1822
	if (ra_allocated)
C
Chris Mason 已提交
1823 1824
		kfree(ra);
	kfree(pages);
1825
	return ret;
C
Christoph Hellwig 已提交
1826 1827
}

1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
/*
 * Try to start exclusive operation @type or cancel it if it's running.
 *
 * Return:
 *   0        - normal mode, newly claimed op started
 *  >0        - normal mode, something else is running,
 *              return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS to user space
 * ECANCELED  - cancel mode, successful cancel
 * ENOTCONN   - cancel mode, operation not running anymore
 */
static int exclop_start_or_cancel_reloc(struct btrfs_fs_info *fs_info,
			enum btrfs_exclusive_operation type, bool cancel)
{
	if (!cancel) {
		/* Start normal op */
		if (!btrfs_exclop_start(fs_info, type))
			return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
		/* Exclusive operation is now claimed */
		return 0;
	}

	/* Cancel running op */
	if (btrfs_exclop_start_try_lock(fs_info, type)) {
		/*
		 * This blocks any exclop finish from setting it to NONE, so we
		 * request cancellation. Either it runs and we will wait for it,
		 * or it has finished and no waiting will happen.
		 */
		atomic_inc(&fs_info->reloc_cancel_req);
		btrfs_exclop_start_unlock(fs_info);

		if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
			wait_on_bit(&fs_info->flags, BTRFS_FS_RELOC_RUNNING,
				    TASK_INTERRUPTIBLE);

		return -ECANCELED;
	}

	/* Something else is running or none */
	return -ENOTCONN;
}

1870
static noinline int btrfs_ioctl_resize(struct file *file,
1871
					void __user *arg)
C
Christoph Hellwig 已提交
1872
{
1873 1874
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
1875 1876 1877
	u64 new_size;
	u64 old_size;
	u64 devid = 1;
1878
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Christoph Hellwig 已提交
1879 1880 1881 1882
	struct btrfs_ioctl_vol_args *vol_args;
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device = NULL;
	char *sizestr;
1883
	char *retptr;
C
Christoph Hellwig 已提交
1884 1885 1886
	char *devstr = NULL;
	int ret = 0;
	int mod = 0;
D
David Sterba 已提交
1887
	bool cancel;
C
Christoph Hellwig 已提交
1888

1889 1890 1891
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

1892 1893 1894 1895
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

D
David Sterba 已提交
1896 1897 1898 1899
	/*
	 * Read the arguments before checking exclusivity to be able to
	 * distinguish regular resize and cancel
	 */
L
Li Zefan 已提交
1900
	vol_args = memdup_user(arg, sizeof(*vol_args));
1901 1902
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
David Sterba 已提交
1903
		goto out_drop;
1904
	}
1905
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
C
Christoph Hellwig 已提交
1906
	sizestr = vol_args->name;
D
David Sterba 已提交
1907 1908 1909 1910 1911 1912
	cancel = (strcmp("cancel", sizestr) == 0);
	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_RESIZE, cancel);
	if (ret)
		goto out_free;
	/* Exclusive operation is now claimed */

C
Christoph Hellwig 已提交
1913 1914 1915 1916 1917
	devstr = strchr(sizestr, ':');
	if (devstr) {
		sizestr = devstr + 1;
		*devstr = '\0';
		devstr = vol_args->name;
1918 1919
		ret = kstrtoull(devstr, 10, &devid);
		if (ret)
D
David Sterba 已提交
1920
			goto out_finish;
1921 1922
		if (!devid) {
			ret = -EINVAL;
D
David Sterba 已提交
1923
			goto out_finish;
1924
		}
1925
		btrfs_info(fs_info, "resizing devid %llu", devid);
C
Christoph Hellwig 已提交
1926
	}
M
Miao Xie 已提交
1927

1928
	device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
C
Christoph Hellwig 已提交
1929
	if (!device) {
1930 1931
		btrfs_info(fs_info, "resizer unable to find device %llu",
			   devid);
1932
		ret = -ENODEV;
D
David Sterba 已提交
1933
		goto out_finish;
C
Christoph Hellwig 已提交
1934
	}
M
Miao Xie 已提交
1935

1936
	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1937
		btrfs_info(fs_info,
1938
			   "resizer unable to apply on readonly device %llu",
1939
		       devid);
1940
		ret = -EPERM;
D
David Sterba 已提交
1941
		goto out_finish;
L
Liu Bo 已提交
1942 1943
	}

C
Christoph Hellwig 已提交
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953
	if (!strcmp(sizestr, "max"))
		new_size = device->bdev->bd_inode->i_size;
	else {
		if (sizestr[0] == '-') {
			mod = -1;
			sizestr++;
		} else if (sizestr[0] == '+') {
			mod = 1;
			sizestr++;
		}
1954 1955
		new_size = memparse(sizestr, &retptr);
		if (*retptr != '\0' || new_size == 0) {
C
Christoph Hellwig 已提交
1956
			ret = -EINVAL;
D
David Sterba 已提交
1957
			goto out_finish;
C
Christoph Hellwig 已提交
1958 1959 1960
		}
	}

1961
	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1962
		ret = -EPERM;
D
David Sterba 已提交
1963
		goto out_finish;
1964 1965
	}

1966
	old_size = btrfs_device_get_total_bytes(device);
C
Christoph Hellwig 已提交
1967 1968 1969 1970

	if (mod < 0) {
		if (new_size > old_size) {
			ret = -EINVAL;
D
David Sterba 已提交
1971
			goto out_finish;
C
Christoph Hellwig 已提交
1972 1973 1974
		}
		new_size = old_size - new_size;
	} else if (mod > 0) {
1975
		if (new_size > ULLONG_MAX - old_size) {
1976
			ret = -ERANGE;
D
David Sterba 已提交
1977
			goto out_finish;
1978
		}
C
Christoph Hellwig 已提交
1979 1980 1981
		new_size = old_size + new_size;
	}

1982
	if (new_size < SZ_256M) {
C
Christoph Hellwig 已提交
1983
		ret = -EINVAL;
D
David Sterba 已提交
1984
		goto out_finish;
C
Christoph Hellwig 已提交
1985 1986 1987
	}
	if (new_size > device->bdev->bd_inode->i_size) {
		ret = -EFBIG;
D
David Sterba 已提交
1988
		goto out_finish;
C
Christoph Hellwig 已提交
1989 1990
	}

1991
	new_size = round_down(new_size, fs_info->sectorsize);
C
Christoph Hellwig 已提交
1992 1993

	if (new_size > old_size) {
1994
		trans = btrfs_start_transaction(root, 0);
1995 1996
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
D
David Sterba 已提交
1997
			goto out_finish;
1998
		}
C
Christoph Hellwig 已提交
1999
		ret = btrfs_grow_device(trans, device, new_size);
2000
		btrfs_commit_transaction(trans);
2001
	} else if (new_size < old_size) {
C
Christoph Hellwig 已提交
2002
		ret = btrfs_shrink_device(device, new_size);
2003
	} /* equal, nothing need to do */
C
Christoph Hellwig 已提交
2004

2005 2006 2007 2008 2009
	if (ret == 0 && new_size != old_size)
		btrfs_info_in_rcu(fs_info,
			"resize device %s (devid %llu) from %llu to %llu",
			rcu_str_deref(device->name), device->devid,
			old_size, new_size);
D
David Sterba 已提交
2010 2011
out_finish:
	btrfs_exclop_finish(fs_info);
2012
out_free:
C
Christoph Hellwig 已提交
2013
	kfree(vol_args);
D
David Sterba 已提交
2014
out_drop:
2015
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
2016 2017 2018
	return ret;
}

2019
static noinline int __btrfs_ioctl_snap_create(struct file *file,
2020
				struct user_namespace *mnt_userns,
2021
				const char *name, unsigned long fd, int subvol,
2022
				bool readonly,
2023
				struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
2024 2025
{
	int namelen;
2026
	int ret = 0;
C
Christoph Hellwig 已提交
2027

2028 2029 2030
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

2031 2032 2033 2034
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;

S
Sage Weil 已提交
2035 2036
	namelen = strlen(name);
	if (strchr(name, '/')) {
C
Christoph Hellwig 已提交
2037
		ret = -EINVAL;
2038
		goto out_drop_write;
C
Christoph Hellwig 已提交
2039 2040
	}

2041 2042 2043
	if (name[0] == '.' &&
	   (namelen == 1 || (name[1] == '.' && namelen == 2))) {
		ret = -EEXIST;
2044
		goto out_drop_write;
2045 2046
	}

2047
	if (subvol) {
2048 2049
		ret = btrfs_mksubvol(&file->f_path, mnt_userns, name,
				     namelen, NULL, readonly, inherit);
2050
	} else {
2051
		struct fd src = fdget(fd);
2052
		struct inode *src_inode;
2053
		if (!src.file) {
2054
			ret = -EINVAL;
2055
			goto out_drop_write;
2056 2057
		}

A
Al Viro 已提交
2058 2059
		src_inode = file_inode(src.file);
		if (src_inode->i_sb != file_inode(file)->i_sb) {
J
Josef Bacik 已提交
2060
			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
2061
				   "Snapshot src from another FS");
2062
			ret = -EXDEV;
2063
		} else if (!inode_owner_or_capable(mnt_userns, src_inode)) {
2064 2065 2066 2067 2068
			/*
			 * Subvolume creation is not restricted, but snapshots
			 * are limited to own subvolumes only
			 */
			ret = -EPERM;
2069
		} else {
2070 2071 2072 2073
			ret = btrfs_mksnapshot(&file->f_path, mnt_userns,
					       name, namelen,
					       BTRFS_I(src_inode)->root,
					       readonly, inherit);
2074
		}
2075
		fdput(src);
2076
	}
2077 2078
out_drop_write:
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
2079
out:
S
Sage Weil 已提交
2080 2081 2082 2083
	return ret;
}

static noinline int btrfs_ioctl_snap_create(struct file *file,
2084
					    void __user *arg, int subvol)
S
Sage Weil 已提交
2085
{
2086
	struct btrfs_ioctl_vol_args *vol_args;
S
Sage Weil 已提交
2087 2088
	int ret;

2089 2090 2091
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

2092 2093 2094 2095
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
S
Sage Weil 已提交
2096

2097 2098 2099
	ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
					vol_args->name, vol_args->fd, subvol,
					false, NULL);
2100

2101 2102 2103
	kfree(vol_args);
	return ret;
}
2104

2105 2106 2107 2108 2109
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
					       void __user *arg, int subvol)
{
	struct btrfs_ioctl_vol_args_v2 *vol_args;
	int ret;
L
Li Zefan 已提交
2110
	bool readonly = false;
A
Arne Jansen 已提交
2111
	struct btrfs_qgroup_inherit *inherit = NULL;
2112

2113 2114 2115
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

2116 2117 2118 2119
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
2120

2121
	if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ARGS_MASK) {
L
Li Zefan 已提交
2122
		ret = -EOPNOTSUPP;
D
Dan Carpenter 已提交
2123
		goto free_args;
S
Sage Weil 已提交
2124
	}
2125

L
Li Zefan 已提交
2126 2127
	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
		readonly = true;
A
Arne Jansen 已提交
2128
	if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
2129 2130 2131 2132
		u64 nums;

		if (vol_args->size < sizeof(*inherit) ||
		    vol_args->size > PAGE_SIZE) {
A
Arne Jansen 已提交
2133
			ret = -EINVAL;
D
Dan Carpenter 已提交
2134
			goto free_args;
A
Arne Jansen 已提交
2135 2136 2137 2138
		}
		inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
		if (IS_ERR(inherit)) {
			ret = PTR_ERR(inherit);
D
Dan Carpenter 已提交
2139
			goto free_args;
A
Arne Jansen 已提交
2140
		}
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154

		if (inherit->num_qgroups > PAGE_SIZE ||
		    inherit->num_ref_copies > PAGE_SIZE ||
		    inherit->num_excl_copies > PAGE_SIZE) {
			ret = -EINVAL;
			goto free_inherit;
		}

		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
		       2 * inherit->num_excl_copies;
		if (vol_args->size != struct_size(inherit, qgroups, nums)) {
			ret = -EINVAL;
			goto free_inherit;
		}
A
Arne Jansen 已提交
2155
	}
2156

2157 2158 2159
	ret = __btrfs_ioctl_snap_create(file, file_mnt_user_ns(file),
					vol_args->name, vol_args->fd, subvol,
					readonly, inherit);
D
Dan Carpenter 已提交
2160 2161 2162
	if (ret)
		goto free_inherit;
free_inherit:
A
Arne Jansen 已提交
2163
	kfree(inherit);
D
Dan Carpenter 已提交
2164 2165
free_args:
	kfree(vol_args);
C
Christoph Hellwig 已提交
2166 2167 2168
	return ret;
}

2169 2170 2171
static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
						void __user *arg)
{
A
Al Viro 已提交
2172
	struct inode *inode = file_inode(file);
2173
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2174 2175 2176 2177
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
	u64 flags = 0;

2178
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
2179 2180
		return -EINVAL;

2181
	down_read(&fs_info->subvol_sem);
2182 2183
	if (btrfs_root_readonly(root))
		flags |= BTRFS_SUBVOL_RDONLY;
2184
	up_read(&fs_info->subvol_sem);
2185 2186 2187 2188 2189 2190 2191 2192 2193 2194

	if (copy_to_user(arg, &flags, sizeof(flags)))
		ret = -EFAULT;

	return ret;
}

static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
					      void __user *arg)
{
A
Al Viro 已提交
2195
	struct inode *inode = file_inode(file);
2196
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2197 2198 2199 2200 2201 2202
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	u64 root_flags;
	u64 flags;
	int ret = 0;

2203
	if (!inode_owner_or_capable(file_mnt_user_ns(file), inode))
2204 2205
		return -EPERM;

2206 2207 2208
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;
2209

2210
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2211 2212 2213
		ret = -EINVAL;
		goto out_drop_write;
	}
2214

2215 2216 2217 2218
	if (copy_from_user(&flags, arg, sizeof(flags))) {
		ret = -EFAULT;
		goto out_drop_write;
	}
2219

2220 2221 2222 2223
	if (flags & ~BTRFS_SUBVOL_RDONLY) {
		ret = -EOPNOTSUPP;
		goto out_drop_write;
	}
2224

2225
	down_write(&fs_info->subvol_sem);
2226 2227 2228

	/* nothing to do */
	if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
2229
		goto out_drop_sem;
2230 2231

	root_flags = btrfs_root_flags(&root->root_item);
2232
	if (flags & BTRFS_SUBVOL_RDONLY) {
2233 2234
		btrfs_set_root_flags(&root->root_item,
				     root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
2235 2236 2237 2238 2239 2240 2241 2242
	} else {
		/*
		 * Block RO -> RW transition if this subvolume is involved in
		 * send
		 */
		spin_lock(&root->root_item_lock);
		if (root->send_in_progress == 0) {
			btrfs_set_root_flags(&root->root_item,
2243
				     root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
2244 2245 2246
			spin_unlock(&root->root_item_lock);
		} else {
			spin_unlock(&root->root_item_lock);
2247 2248 2249
			btrfs_warn(fs_info,
				   "Attempt to set subvolume %llu read-write during send",
				   root->root_key.objectid);
2250 2251 2252 2253
			ret = -EPERM;
			goto out_drop_sem;
		}
	}
2254 2255 2256 2257 2258 2259 2260

	trans = btrfs_start_transaction(root, 1);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_reset;
	}

2261
	ret = btrfs_update_root(trans, fs_info->tree_root,
2262
				&root->root_key, &root->root_item);
2263 2264 2265 2266 2267 2268
	if (ret < 0) {
		btrfs_end_transaction(trans);
		goto out_reset;
	}

	ret = btrfs_commit_transaction(trans);
2269 2270 2271 2272

out_reset:
	if (ret)
		btrfs_set_root_flags(&root->root_item, root_flags);
2273
out_drop_sem:
2274
	up_write(&fs_info->subvol_sem);
2275 2276 2277
out_drop_write:
	mnt_drop_write_file(file);
out:
2278 2279 2280
	return ret;
}

2281 2282 2283
static noinline int key_in_sk(struct btrfs_key *key,
			      struct btrfs_ioctl_search_key *sk)
{
2284 2285 2286 2287 2288 2289 2290 2291 2292
	struct btrfs_key test;
	int ret;

	test.objectid = sk->min_objectid;
	test.type = sk->min_type;
	test.offset = sk->min_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret < 0)
2293
		return 0;
2294 2295 2296 2297 2298 2299 2300

	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret > 0)
2301 2302 2303 2304
		return 0;
	return 1;
}

2305
static noinline int copy_to_sk(struct btrfs_path *path,
2306 2307
			       struct btrfs_key *key,
			       struct btrfs_ioctl_search_key *sk,
2308
			       size_t *buf_size,
2309
			       char __user *ubuf,
2310 2311 2312 2313 2314 2315
			       unsigned long *sk_offset,
			       int *num_found)
{
	u64 found_transid;
	struct extent_buffer *leaf;
	struct btrfs_ioctl_search_header sh;
2316
	struct btrfs_key test;
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
	unsigned long item_off;
	unsigned long item_len;
	int nritems;
	int i;
	int slot;
	int ret = 0;

	leaf = path->nodes[0];
	slot = path->slots[0];
	nritems = btrfs_header_nritems(leaf);

	if (btrfs_header_generation(leaf) > sk->max_transid) {
		i = nritems;
		goto advance_key;
	}
	found_transid = btrfs_header_generation(leaf);

	for (i = slot; i < nritems; i++) {
		item_off = btrfs_item_ptr_offset(leaf, i);
		item_len = btrfs_item_size_nr(leaf, i);

2338 2339 2340 2341
		btrfs_item_key_to_cpu(leaf, key, i);
		if (!key_in_sk(key, sk))
			continue;

2342
		if (sizeof(sh) + item_len > *buf_size) {
2343 2344 2345 2346 2347 2348 2349 2350 2351 2352
			if (*num_found) {
				ret = 1;
				goto out;
			}

			/*
			 * return one empty item back for v1, which does not
			 * handle -EOVERFLOW
			 */

2353
			*buf_size = sizeof(sh) + item_len;
2354
			item_len = 0;
2355 2356
			ret = -EOVERFLOW;
		}
2357

2358
		if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2359
			ret = 1;
2360
			goto out;
2361 2362 2363 2364 2365 2366 2367 2368
		}

		sh.objectid = key->objectid;
		sh.offset = key->offset;
		sh.type = key->type;
		sh.len = item_len;
		sh.transid = found_transid;

2369 2370 2371 2372 2373 2374 2375 2376
		/*
		 * Copy search result header. If we fault then loop again so we
		 * can fault in the pages and -EFAULT there if there's a
		 * problem. Otherwise we'll fault and then copy the buffer in
		 * properly this next time through
		 */
		if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) {
			ret = 0;
2377 2378 2379
			goto out;
		}

2380 2381 2382
		*sk_offset += sizeof(sh);

		if (item_len) {
2383
			char __user *up = ubuf + *sk_offset;
2384 2385 2386 2387 2388 2389 2390 2391
			/*
			 * Copy the item, same behavior as above, but reset the
			 * * sk_offset so we copy the full thing again.
			 */
			if (read_extent_buffer_to_user_nofault(leaf, up,
						item_off, item_len)) {
				ret = 0;
				*sk_offset -= sizeof(sh);
2392 2393 2394
				goto out;
			}

2395 2396
			*sk_offset += item_len;
		}
2397
		(*num_found)++;
2398

2399 2400 2401
		if (ret) /* -EOVERFLOW from above */
			goto out;

2402 2403 2404 2405
		if (*num_found >= sk->nr_items) {
			ret = 1;
			goto out;
		}
2406 2407
	}
advance_key:
2408
	ret = 0;
2409 2410 2411 2412 2413 2414
	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;
	if (btrfs_comp_cpu_keys(key, &test) >= 0)
		ret = 1;
	else if (key->offset < (u64)-1)
2415
		key->offset++;
2416
	else if (key->type < (u8)-1) {
2417
		key->offset = 0;
2418
		key->type++;
2419
	} else if (key->objectid < (u64)-1) {
2420 2421
		key->offset = 0;
		key->type = 0;
2422
		key->objectid++;
2423 2424
	} else
		ret = 1;
2425
out:
2426 2427 2428 2429 2430 2431 2432 2433 2434
	/*
	 *  0: all items from this leaf copied, continue with next
	 *  1: * more items can be copied, but unused buffer is too small
	 *     * all items were found
	 *     Either way, it will stops the loop which iterates to the next
	 *     leaf
	 *  -EOVERFLOW: item was to large for buffer
	 *  -EFAULT: could not copy extent buffer back to userspace
	 */
2435 2436 2437 2438
	return ret;
}

static noinline int search_ioctl(struct inode *inode,
2439
				 struct btrfs_ioctl_search_key *sk,
2440
				 size_t *buf_size,
2441
				 char __user *ubuf)
2442
{
2443
	struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2444 2445 2446 2447 2448 2449 2450
	struct btrfs_root *root;
	struct btrfs_key key;
	struct btrfs_path *path;
	int ret;
	int num_found = 0;
	unsigned long sk_offset = 0;

2451 2452
	if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
		*buf_size = sizeof(struct btrfs_ioctl_search_header);
2453
		return -EOVERFLOW;
2454
	}
2455

2456 2457 2458 2459 2460 2461
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	if (sk->tree_id == 0) {
		/* search the root of the inode that was passed */
2462
		root = btrfs_grab_root(BTRFS_I(inode)->root);
2463
	} else {
D
David Sterba 已提交
2464
		root = btrfs_get_fs_root(info, sk->tree_id, true);
2465 2466
		if (IS_ERR(root)) {
			btrfs_free_path(path);
2467
			return PTR_ERR(root);
2468 2469 2470 2471 2472 2473 2474
		}
	}

	key.objectid = sk->min_objectid;
	key.type = sk->min_type;
	key.offset = sk->min_offset;

2475
	while (1) {
2476 2477
		ret = fault_in_pages_writeable(ubuf + sk_offset,
					       *buf_size - sk_offset);
2478 2479 2480
		if (ret)
			break;

2481
		ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2482 2483 2484 2485 2486
		if (ret != 0) {
			if (ret > 0)
				ret = 0;
			goto err;
		}
2487
		ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2488
				 &sk_offset, &num_found);
2489
		btrfs_release_path(path);
2490
		if (ret)
2491 2492 2493
			break;

	}
2494 2495
	if (ret > 0)
		ret = 0;
2496 2497
err:
	sk->nr_items = num_found;
2498
	btrfs_put_root(root);
2499 2500 2501 2502 2503 2504 2505
	btrfs_free_path(path);
	return ret;
}

static noinline int btrfs_ioctl_tree_search(struct file *file,
					   void __user *argp)
{
2506 2507
	struct btrfs_ioctl_search_args __user *uargs;
	struct btrfs_ioctl_search_key sk;
2508 2509 2510
	struct inode *inode;
	int ret;
	size_t buf_size;
2511 2512 2513 2514

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2515 2516 2517 2518
	uargs = (struct btrfs_ioctl_search_args __user *)argp;

	if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
		return -EFAULT;
2519

2520
	buf_size = sizeof(uargs->buf);
2521

A
Al Viro 已提交
2522
	inode = file_inode(file);
2523
	ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2524 2525 2526 2527 2528 2529 2530 2531

	/*
	 * In the origin implementation an overflow is handled by returning a
	 * search header with a len of zero, so reset ret.
	 */
	if (ret == -EOVERFLOW)
		ret = 0;

2532
	if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2533 2534 2535 2536
		ret = -EFAULT;
	return ret;
}

G
Gerhard Heift 已提交
2537 2538 2539 2540 2541 2542 2543 2544
static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
					       void __user *argp)
{
	struct btrfs_ioctl_search_args_v2 __user *uarg;
	struct btrfs_ioctl_search_args_v2 args;
	struct inode *inode;
	int ret;
	size_t buf_size;
2545
	const size_t buf_limit = SZ_16M;
G
Gerhard Heift 已提交
2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	/* copy search header and buffer size */
	uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
	if (copy_from_user(&args, uarg, sizeof(args)))
		return -EFAULT;

	buf_size = args.buf_size;

	/* limit result size to 16MB */
	if (buf_size > buf_limit)
		buf_size = buf_limit;

	inode = file_inode(file);
	ret = search_ioctl(inode, &args.key, &buf_size,
2563
			   (char __user *)(&uarg->buf[0]));
G
Gerhard Heift 已提交
2564 2565 2566 2567 2568 2569
	if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
		ret = -EFAULT;
	else if (ret == -EOVERFLOW &&
		copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
		ret = -EFAULT;

2570 2571 2572
	return ret;
}

2573
/*
2574 2575 2576
 * Search INODE_REFs to identify path name of 'dirid' directory
 * in a 'tree_id' tree. and sets path name to 'name'.
 */
2577 2578 2579 2580 2581
static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
				u64 tree_id, u64 dirid, char *name)
{
	struct btrfs_root *root;
	struct btrfs_key key;
2582
	char *ptr;
2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
	int ret = -1;
	int slot;
	int len;
	int total_len = 0;
	struct btrfs_inode_ref *iref;
	struct extent_buffer *l;
	struct btrfs_path *path;

	if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
		name[0]='\0';
		return 0;
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2600
	ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2601

D
David Sterba 已提交
2602
	root = btrfs_get_fs_root(info, tree_id, true);
2603
	if (IS_ERR(root)) {
2604
		ret = PTR_ERR(root);
2605 2606 2607
		root = NULL;
		goto out;
	}
2608 2609 2610

	key.objectid = dirid;
	key.type = BTRFS_INODE_REF_KEY;
2611
	key.offset = (u64)-1;
2612

2613
	while (1) {
2614
		ret = btrfs_search_backwards(root, &key, path);
2615 2616
		if (ret < 0)
			goto out;
2617
		else if (ret > 0) {
2618 2619
			ret = -ENOENT;
			goto out;
2620
		}
2621 2622 2623 2624 2625 2626 2627 2628

		l = path->nodes[0];
		slot = path->slots[0];

		iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
		len = btrfs_inode_ref_name_len(l, iref);
		ptr -= len + 1;
		total_len += len + 1;
2629 2630
		if (ptr < name) {
			ret = -ENAMETOOLONG;
2631
			goto out;
2632
		}
2633 2634

		*(ptr + len) = '/';
2635
		read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2636 2637 2638 2639

		if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
			break;

2640
		btrfs_release_path(path);
2641
		key.objectid = key.offset;
2642
		key.offset = (u64)-1;
2643 2644
		dirid = key.objectid;
	}
2645
	memmove(name, ptr, total_len);
2646
	name[total_len] = '\0';
2647 2648
	ret = 0;
out:
2649
	btrfs_put_root(root);
2650
	btrfs_free_path(path);
2651 2652 2653
	return ret;
}

2654 2655
static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
				struct inode *inode,
2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
				struct btrfs_ioctl_ino_lookup_user_args *args)
{
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
	struct super_block *sb = inode->i_sb;
	struct btrfs_key upper_limit = BTRFS_I(inode)->location;
	u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
	u64 dirid = args->dirid;
	unsigned long item_off;
	unsigned long item_len;
	struct btrfs_inode_ref *iref;
	struct btrfs_root_ref *rref;
2667
	struct btrfs_root *root = NULL;
2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
	struct btrfs_path *path;
	struct btrfs_key key, key2;
	struct extent_buffer *leaf;
	struct inode *temp_inode;
	char *ptr;
	int slot;
	int len;
	int total_len = 0;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	/*
	 * If the bottom subvolume does not exist directly under upper_limit,
	 * construct the path in from the bottom up.
	 */
	if (dirid != upper_limit.objectid) {
		ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];

D
David Sterba 已提交
2689
		root = btrfs_get_fs_root(fs_info, treeid, true);
2690 2691 2692 2693 2694 2695 2696 2697 2698
		if (IS_ERR(root)) {
			ret = PTR_ERR(root);
			goto out;
		}

		key.objectid = dirid;
		key.type = BTRFS_INODE_REF_KEY;
		key.offset = (u64)-1;
		while (1) {
2699 2700 2701 2702 2703
			ret = btrfs_search_backwards(root, &key, path);
			if (ret < 0)
				goto out_put;
			else if (ret > 0) {
				ret = -ENOENT;
2704
				goto out_put;
2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715
			}

			leaf = path->nodes[0];
			slot = path->slots[0];

			iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
			len = btrfs_inode_ref_name_len(leaf, iref);
			ptr -= len + 1;
			total_len += len + 1;
			if (ptr < args->path) {
				ret = -ENAMETOOLONG;
2716
				goto out_put;
2717 2718 2719 2720 2721 2722 2723 2724 2725 2726
			}

			*(ptr + len) = '/';
			read_extent_buffer(leaf, ptr,
					(unsigned long)(iref + 1), len);

			/* Check the read+exec permission of this directory */
			ret = btrfs_previous_item(root, path, dirid,
						  BTRFS_INODE_ITEM_KEY);
			if (ret < 0) {
2727
				goto out_put;
2728 2729
			} else if (ret > 0) {
				ret = -ENOENT;
2730
				goto out_put;
2731 2732 2733 2734 2735 2736 2737
			}

			leaf = path->nodes[0];
			slot = path->slots[0];
			btrfs_item_key_to_cpu(leaf, &key2, slot);
			if (key2.objectid != dirid) {
				ret = -ENOENT;
2738
				goto out_put;
2739 2740
			}

D
David Sterba 已提交
2741
			temp_inode = btrfs_iget(sb, key2.objectid, root);
2742 2743
			if (IS_ERR(temp_inode)) {
				ret = PTR_ERR(temp_inode);
2744
				goto out_put;
2745
			}
2746
			ret = inode_permission(mnt_userns, temp_inode,
2747
					       MAY_READ | MAY_EXEC);
2748 2749 2750
			iput(temp_inode);
			if (ret) {
				ret = -EACCES;
2751
				goto out_put;
2752 2753 2754 2755 2756 2757
			}

			if (key.offset == upper_limit.objectid)
				break;
			if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
				ret = -EACCES;
2758
				goto out_put;
2759 2760 2761 2762 2763 2764 2765 2766 2767 2768
			}

			btrfs_release_path(path);
			key.objectid = key.offset;
			key.offset = (u64)-1;
			dirid = key.objectid;
		}

		memmove(args->path, ptr, total_len);
		args->path[total_len] = '\0';
2769
		btrfs_put_root(root);
2770
		root = NULL;
2771 2772 2773 2774 2775 2776 2777
		btrfs_release_path(path);
	}

	/* Get the bottom subvolume's name from ROOT_REF */
	key.objectid = treeid;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = args->treeid;
2778
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804
	if (ret < 0) {
		goto out;
	} else if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	slot = path->slots[0];
	btrfs_item_key_to_cpu(leaf, &key, slot);

	item_off = btrfs_item_ptr_offset(leaf, slot);
	item_len = btrfs_item_size_nr(leaf, slot);
	/* Check if dirid in ROOT_REF corresponds to passed dirid */
	rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
	if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
		ret = -EINVAL;
		goto out;
	}

	/* Copy subvolume's name */
	item_off += sizeof(struct btrfs_root_ref);
	item_len -= sizeof(struct btrfs_root_ref);
	read_extent_buffer(leaf, args->name, item_off, item_len);
	args->name[item_len] = 0;

2805
out_put:
2806
	btrfs_put_root(root);
2807 2808 2809 2810 2811
out:
	btrfs_free_path(path);
	return ret;
}

2812 2813 2814
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
					   void __user *argp)
{
2815 2816
	struct btrfs_ioctl_ino_lookup_args *args;
	struct inode *inode;
2817
	int ret = 0;
2818

J
Julia Lawall 已提交
2819 2820 2821
	args = memdup_user(argp, sizeof(*args));
	if (IS_ERR(args))
		return PTR_ERR(args);
2822

A
Al Viro 已提交
2823
	inode = file_inode(file);
2824

2825 2826 2827 2828
	/*
	 * Unprivileged query to obtain the containing subvolume root id. The
	 * path is reset so it's consistent with btrfs_search_path_in_tree.
	 */
2829 2830 2831
	if (args->treeid == 0)
		args->treeid = BTRFS_I(inode)->root->root_key.objectid;

2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
	if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
		args->name[0] = 0;
		goto out;
	}

	if (!capable(CAP_SYS_ADMIN)) {
		ret = -EPERM;
		goto out;
	}

2842 2843 2844 2845
	ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
					args->treeid, args->objectid,
					args->name);

2846
out:
2847 2848 2849 2850
	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
		ret = -EFAULT;

	kfree(args);
2851 2852 2853
	return ret;
}

2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
/*
 * Version of ino_lookup ioctl (unprivileged)
 *
 * The main differences from ino_lookup ioctl are:
 *
 *   1. Read + Exec permission will be checked using inode_permission() during
 *      path construction. -EACCES will be returned in case of failure.
 *   2. Path construction will be stopped at the inode number which corresponds
 *      to the fd with which this ioctl is called. If constructed path does not
 *      exist under fd's inode, -EACCES will be returned.
 *   3. The name of bottom subvolume is also searched and filled.
 */
static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_ino_lookup_user_args *args;
	struct inode *inode;
	int ret;

	args = memdup_user(argp, sizeof(*args));
	if (IS_ERR(args))
		return PTR_ERR(args);

	inode = file_inode(file);

	if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
	    BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
		/*
		 * The subvolume does not exist under fd with which this is
		 * called
		 */
		kfree(args);
		return -EACCES;
	}

2888
	ret = btrfs_search_path_in_tree_user(file_mnt_user_ns(file), inode, args);
2889 2890 2891 2892 2893 2894 2895 2896

	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
		ret = -EFAULT;

	kfree(args);
	return ret;
}

2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928
/* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_get_subvol_info_args *subvol_info;
	struct btrfs_fs_info *fs_info;
	struct btrfs_root *root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_root_item *root_item;
	struct btrfs_root_ref *rref;
	struct extent_buffer *leaf;
	unsigned long item_off;
	unsigned long item_len;
	struct inode *inode;
	int slot;
	int ret = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
	if (!subvol_info) {
		btrfs_free_path(path);
		return -ENOMEM;
	}

	inode = file_inode(file);
	fs_info = BTRFS_I(inode)->root->fs_info;

	/* Get root_item of inode's subvolume */
	key.objectid = BTRFS_I(inode)->root->root_key.objectid;
D
David Sterba 已提交
2929
	root = btrfs_get_fs_root(fs_info, key.objectid, true);
2930 2931
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
2932 2933
		goto out_free;
	}
2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966
	root_item = &root->root_item;

	subvol_info->treeid = key.objectid;

	subvol_info->generation = btrfs_root_generation(root_item);
	subvol_info->flags = btrfs_root_flags(root_item);

	memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
	memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
						    BTRFS_UUID_SIZE);
	memcpy(subvol_info->received_uuid, root_item->received_uuid,
						    BTRFS_UUID_SIZE);

	subvol_info->ctransid = btrfs_root_ctransid(root_item);
	subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
	subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);

	subvol_info->otransid = btrfs_root_otransid(root_item);
	subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
	subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);

	subvol_info->stransid = btrfs_root_stransid(root_item);
	subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
	subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);

	subvol_info->rtransid = btrfs_root_rtransid(root_item);
	subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
	subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);

	if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
		/* Search root tree for ROOT_BACKREF of this subvolume */
		key.type = BTRFS_ROOT_BACKREF_KEY;
		key.offset = 0;
2967
		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2968 2969 2970 2971
		if (ret < 0) {
			goto out;
		} else if (path->slots[0] >=
			   btrfs_header_nritems(path->nodes[0])) {
2972
			ret = btrfs_next_leaf(fs_info->tree_root, path);
2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
			if (ret < 0) {
				goto out;
			} else if (ret > 0) {
				ret = -EUCLEAN;
				goto out;
			}
		}

		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == subvol_info->treeid &&
		    key.type == BTRFS_ROOT_BACKREF_KEY) {
			subvol_info->parent_id = key.offset;

			rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
			subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);

			item_off = btrfs_item_ptr_offset(leaf, slot)
					+ sizeof(struct btrfs_root_ref);
			item_len = btrfs_item_size_nr(leaf, slot)
					- sizeof(struct btrfs_root_ref);
			read_extent_buffer(leaf, subvol_info->name,
					   item_off, item_len);
		} else {
			ret = -ENOENT;
			goto out;
		}
	}

	if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
		ret = -EFAULT;

out:
3007
	btrfs_put_root(root);
3008
out_free:
3009
	btrfs_free_path(path);
3010
	kfree(subvol_info);
3011 3012 3013
	return ret;
}

3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110
/*
 * Return ROOT_REF information of the subvolume containing this inode
 * except the subvolume name.
 */
static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
	struct btrfs_root_ref *rref;
	struct btrfs_root *root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct inode *inode;
	u64 objectid;
	int slot;
	int ret;
	u8 found;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	rootrefs = memdup_user(argp, sizeof(*rootrefs));
	if (IS_ERR(rootrefs)) {
		btrfs_free_path(path);
		return PTR_ERR(rootrefs);
	}

	inode = file_inode(file);
	root = BTRFS_I(inode)->root->fs_info->tree_root;
	objectid = BTRFS_I(inode)->root->root_key.objectid;

	key.objectid = objectid;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = rootrefs->min_treeid;
	found = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0) {
		goto out;
	} else if (path->slots[0] >=
		   btrfs_header_nritems(path->nodes[0])) {
		ret = btrfs_next_leaf(root, path);
		if (ret < 0) {
			goto out;
		} else if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}
	while (1) {
		leaf = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
			ret = 0;
			goto out;
		}

		if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
			ret = -EOVERFLOW;
			goto out;
		}

		rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
		rootrefs->rootref[found].treeid = key.offset;
		rootrefs->rootref[found].dirid =
				  btrfs_root_ref_dirid(leaf, rref);
		found++;

		ret = btrfs_next_item(root, path);
		if (ret < 0) {
			goto out;
		} else if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}

out:
	if (!ret || ret == -EOVERFLOW) {
		rootrefs->num_items = found;
		/* update min_treeid for next search */
		if (found)
			rootrefs->min_treeid =
				rootrefs->rootref[found - 1].treeid + 1;
		if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
			ret = -EFAULT;
	}

	kfree(rootrefs);
	btrfs_free_path(path);

	return ret;
}

3111
static noinline int btrfs_ioctl_snap_destroy(struct file *file,
3112 3113
					     void __user *arg,
					     bool destroy_v2)
3114
{
A
Al Viro 已提交
3115
	struct dentry *parent = file->f_path.dentry;
3116
	struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
3117
	struct dentry *dentry;
3118
	struct inode *dir = d_inode(parent);
3119 3120 3121
	struct inode *inode;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_root *dest = NULL;
3122 3123
	struct btrfs_ioctl_vol_args *vol_args = NULL;
	struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL;
3124
	struct user_namespace *mnt_userns = file_mnt_user_ns(file);
3125 3126
	char *subvol_name, *subvol_name_ptr = NULL;
	int subvol_namelen;
3127
	int err = 0;
3128
	bool destroy_parent = false;
3129

3130 3131 3132 3133
	if (destroy_v2) {
		vol_args2 = memdup_user(arg, sizeof(*vol_args2));
		if (IS_ERR(vol_args2))
			return PTR_ERR(vol_args2);
3134

3135 3136 3137 3138
		if (vol_args2->flags & ~BTRFS_SUBVOL_DELETE_ARGS_MASK) {
			err = -EOPNOTSUPP;
			goto out;
		}
3139

3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151
		/*
		 * If SPEC_BY_ID is not set, we are looking for the subvolume by
		 * name, same as v1 currently does.
		 */
		if (!(vol_args2->flags & BTRFS_SUBVOL_SPEC_BY_ID)) {
			vol_args2->name[BTRFS_SUBVOL_NAME_MAX] = 0;
			subvol_name = vol_args2->name;

			err = mnt_want_write_file(file);
			if (err)
				goto out;
		} else {
3152
			struct inode *old_dir;
3153

3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189
			if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) {
				err = -EINVAL;
				goto out;
			}

			err = mnt_want_write_file(file);
			if (err)
				goto out;

			dentry = btrfs_get_dentry(fs_info->sb,
					BTRFS_FIRST_FREE_OBJECTID,
					vol_args2->subvolid, 0, 0);
			if (IS_ERR(dentry)) {
				err = PTR_ERR(dentry);
				goto out_drop_write;
			}

			/*
			 * Change the default parent since the subvolume being
			 * deleted can be outside of the current mount point.
			 */
			parent = btrfs_get_parent(dentry);

			/*
			 * At this point dentry->d_name can point to '/' if the
			 * subvolume we want to destroy is outsite of the
			 * current mount point, so we need to release the
			 * current dentry and execute the lookup to return a new
			 * one with ->d_name pointing to the
			 * <mount point>/subvol_name.
			 */
			dput(dentry);
			if (IS_ERR(parent)) {
				err = PTR_ERR(parent);
				goto out_drop_write;
			}
3190
			old_dir = dir;
3191 3192 3193 3194 3195 3196 3197 3198 3199 3200
			dir = d_inode(parent);

			/*
			 * If v2 was used with SPEC_BY_ID, a new parent was
			 * allocated since the subvolume can be outside of the
			 * current mount point. Later on we need to release this
			 * new parent dentry.
			 */
			destroy_parent = true;

3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214
			/*
			 * On idmapped mounts, deletion via subvolid is
			 * restricted to subvolumes that are immediate
			 * ancestors of the inode referenced by the file
			 * descriptor in the ioctl. Otherwise the idmapping
			 * could potentially be abused to delete subvolumes
			 * anywhere in the filesystem the user wouldn't be able
			 * to delete without an idmapped mount.
			 */
			if (old_dir != dir && mnt_userns != &init_user_ns) {
				err = -EOPNOTSUPP;
				goto free_parent;
			}

3215 3216 3217 3218 3219 3220
			subvol_name_ptr = btrfs_get_subvol_name_from_objectid(
						fs_info, vol_args2->subvolid);
			if (IS_ERR(subvol_name_ptr)) {
				err = PTR_ERR(subvol_name_ptr);
				goto free_parent;
			}
D
David Sterba 已提交
3221
			/* subvol_name_ptr is already nul terminated */
3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234
			subvol_name = (char *)kbasename(subvol_name_ptr);
		}
	} else {
		vol_args = memdup_user(arg, sizeof(*vol_args));
		if (IS_ERR(vol_args))
			return PTR_ERR(vol_args);

		vol_args->name[BTRFS_PATH_NAME_MAX] = 0;
		subvol_name = vol_args->name;

		err = mnt_want_write_file(file);
		if (err)
			goto out;
3235 3236
	}

3237
	subvol_namelen = strlen(subvol_name);
3238

3239 3240 3241 3242 3243 3244 3245 3246 3247 3248
	if (strchr(subvol_name, '/') ||
	    strncmp(subvol_name, "..", subvol_namelen) == 0) {
		err = -EINVAL;
		goto free_subvol_name;
	}

	if (!S_ISDIR(dir->i_mode)) {
		err = -ENOTDIR;
		goto free_subvol_name;
	}
3249

3250 3251
	err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (err == -EINTR)
3252
		goto free_subvol_name;
3253
	dentry = lookup_one(mnt_userns, subvol_name, parent, subvol_namelen);
3254 3255 3256 3257 3258
	if (IS_ERR(dentry)) {
		err = PTR_ERR(dentry);
		goto out_unlock_dir;
	}

3259
	if (d_really_is_negative(dentry)) {
3260 3261 3262 3263
		err = -ENOENT;
		goto out_dput;
	}

3264
	inode = d_inode(dentry);
3265
	dest = BTRFS_I(inode)->root;
3266
	if (!capable(CAP_SYS_ADMIN)) {
3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280
		/*
		 * Regular user.  Only allow this with a special mount
		 * option, when the user has write+exec access to the
		 * subvol root, and when rmdir(2) would have been
		 * allowed.
		 *
		 * Note that this is _not_ check that the subvol is
		 * empty or doesn't contain data that we wouldn't
		 * otherwise be able to delete.
		 *
		 * Users who want to delete empty subvols should try
		 * rmdir(2).
		 */
		err = -EPERM;
3281
		if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294
			goto out_dput;

		/*
		 * Do not allow deletion if the parent dir is the same
		 * as the dir to be deleted.  That means the ioctl
		 * must be called on the dentry referencing the root
		 * of the subvol, not a random directory contained
		 * within it.
		 */
		err = -EINVAL;
		if (root == dest)
			goto out_dput;

3295
		err = inode_permission(mnt_userns, inode, MAY_WRITE | MAY_EXEC);
3296 3297 3298 3299
		if (err)
			goto out_dput;
	}

3300
	/* check if subvolume may be deleted by a user */
3301
	err = btrfs_may_delete(mnt_userns, dir, dentry, 1);
3302 3303 3304
	if (err)
		goto out_dput;

3305
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
3306 3307 3308 3309
		err = -EINVAL;
		goto out_dput;
	}

3310
	btrfs_inode_lock(inode, 0);
3311
	err = btrfs_delete_subvolume(dir, dentry);
3312
	btrfs_inode_unlock(inode, 0);
3313 3314
	if (!err) {
		fsnotify_rmdir(dir, dentry);
3315
		d_delete(dentry);
3316
	}
3317

3318 3319 3320
out_dput:
	dput(dentry);
out_unlock_dir:
3321
	btrfs_inode_unlock(dir, 0);
3322 3323 3324 3325 3326
free_subvol_name:
	kfree(subvol_name_ptr);
free_parent:
	if (destroy_parent)
		dput(parent);
3327
out_drop_write:
A
Al Viro 已提交
3328
	mnt_drop_write_file(file);
3329
out:
3330
	kfree(vol_args2);
3331 3332 3333 3334
	kfree(vol_args);
	return err;
}

C
Chris Mason 已提交
3335
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
C
Christoph Hellwig 已提交
3336
{
A
Al Viro 已提交
3337
	struct inode *inode = file_inode(file);
C
Christoph Hellwig 已提交
3338
	struct btrfs_root *root = BTRFS_I(inode)->root;
3339
	struct btrfs_ioctl_defrag_range_args range = {0};
Y
Yan Zheng 已提交
3340 3341
	int ret;

3342 3343 3344
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
L
Li Zefan 已提交
3345

3346 3347 3348
	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
3349
	}
C
Christoph Hellwig 已提交
3350

3351 3352 3353 3354 3355 3356
	/* Subpage defrag will be supported in later commits */
	if (root->fs_info->sectorsize < PAGE_SIZE) {
		ret = -ENOTTY;
		goto out;
	}

C
Christoph Hellwig 已提交
3357 3358
	switch (inode->i_mode & S_IFMT) {
	case S_IFDIR:
3359 3360 3361 3362
		if (!capable(CAP_SYS_ADMIN)) {
			ret = -EPERM;
			goto out;
		}
3363
		ret = btrfs_defrag_root(root);
C
Christoph Hellwig 已提交
3364 3365
		break;
	case S_IFREG:
3366 3367 3368 3369 3370 3371
		/*
		 * Note that this does not check the file descriptor for write
		 * access. This prevents defragmenting executables that are
		 * running and allows defrag on files open in read-only mode.
		 */
		if (!capable(CAP_SYS_ADMIN) &&
3372
		    inode_permission(&init_user_ns, inode, MAY_WRITE)) {
3373
			ret = -EPERM;
3374 3375
			goto out;
		}
C
Chris Mason 已提交
3376 3377

		if (argp) {
3378
			if (copy_from_user(&range, argp, sizeof(range))) {
C
Chris Mason 已提交
3379
				ret = -EFAULT;
3380
				goto out;
C
Chris Mason 已提交
3381 3382
			}
			/* compression requires us to start the IO */
3383 3384 3385
			if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
				range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
				range.extent_thresh = (u32)-1;
C
Chris Mason 已提交
3386 3387 3388
			}
		} else {
			/* the rest are all set to zero by kzalloc */
3389
			range.len = (u64)-1;
C
Chris Mason 已提交
3390
		}
3391
		ret = btrfs_defrag_file(file_inode(file), &file->f_ra,
3392
					&range, BTRFS_OLDEST_GENERATION, 0);
C
Chris Mason 已提交
3393 3394
		if (ret > 0)
			ret = 0;
C
Christoph Hellwig 已提交
3395
		break;
3396 3397
	default:
		ret = -EINVAL;
C
Christoph Hellwig 已提交
3398
	}
3399
out:
3400
	mnt_drop_write_file(file);
3401
	return ret;
C
Christoph Hellwig 已提交
3402 3403
}

3404
static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
C
Christoph Hellwig 已提交
3405 3406 3407 3408
{
	struct btrfs_ioctl_vol_args *vol_args;
	int ret;

3409 3410 3411
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3412
	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD))
3413
		return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3414

L
Li Zefan 已提交
3415
	vol_args = memdup_user(arg, sizeof(*vol_args));
3416 3417 3418 3419
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
		goto out;
	}
C
Christoph Hellwig 已提交
3420

3421
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3422
	ret = btrfs_init_new_device(fs_info, vol_args->name);
C
Christoph Hellwig 已提交
3423

A
Anand Jain 已提交
3424
	if (!ret)
3425
		btrfs_info(fs_info, "disk added %s", vol_args->name);
A
Anand Jain 已提交
3426

C
Christoph Hellwig 已提交
3427
	kfree(vol_args);
3428
out:
3429
	btrfs_exclop_finish(fs_info);
C
Christoph Hellwig 已提交
3430 3431 3432
	return ret;
}

3433
static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
3434
{
3435 3436
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3437
	struct btrfs_ioctl_vol_args_v2 *vol_args;
3438 3439
	struct block_device *bdev = NULL;
	fmode_t mode;
C
Christoph Hellwig 已提交
3440
	int ret;
D
David Sterba 已提交
3441
	bool cancel = false;
C
Christoph Hellwig 已提交
3442

3443 3444 3445
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3446 3447 3448
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3449

L
Li Zefan 已提交
3450
	vol_args = memdup_user(arg, sizeof(*vol_args));
3451 3452
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
Dan Carpenter 已提交
3453
		goto err_drop;
3454
	}
C
Christoph Hellwig 已提交
3455

3456
	if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
3457 3458 3459
		ret = -EOPNOTSUPP;
		goto out;
	}
D
David Sterba 已提交
3460 3461 3462 3463
	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
	if (!(vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) &&
	    strcmp("cancel", vol_args->name) == 0)
		cancel = true;
C
Christoph Hellwig 已提交
3464

D
David Sterba 已提交
3465 3466 3467
	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
					   cancel);
	if (ret)
3468
		goto out;
D
David Sterba 已提交
3469
	/* Exclusive operation is now claimed */
3470

D
David Sterba 已提交
3471
	if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3472
		ret = btrfs_rm_device(fs_info, NULL, vol_args->devid, &bdev, &mode);
D
David Sterba 已提交
3473
	else
3474
		ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
D
David Sterba 已提交
3475

3476
	btrfs_exclop_finish(fs_info);
3477

3478
	if (!ret) {
3479
		if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3480
			btrfs_info(fs_info, "device deleted: id %llu",
3481 3482
					vol_args->devid);
		else
3483
			btrfs_info(fs_info, "device deleted: %s",
3484 3485
					vol_args->name);
	}
3486 3487
out:
	kfree(vol_args);
D
Dan Carpenter 已提交
3488
err_drop:
3489
	mnt_drop_write_file(file);
3490 3491
	if (bdev)
		blkdev_put(bdev, mode);
C
Christoph Hellwig 已提交
3492 3493 3494
	return ret;
}

3495
static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
3496
{
3497 3498
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
3499
	struct btrfs_ioctl_vol_args *vol_args;
3500 3501
	struct block_device *bdev = NULL;
	fmode_t mode;
C
Christoph Hellwig 已提交
3502
	int ret;
D
David Sterba 已提交
3503
	bool cancel;
C
Christoph Hellwig 已提交
3504

3505 3506 3507
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3508 3509 3510
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3511

3512 3513 3514
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
David Sterba 已提交
3515
		goto out_drop_write;
3516
	}
3517
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
D
David Sterba 已提交
3518 3519 3520 3521 3522
	cancel = (strcmp("cancel", vol_args->name) == 0);

	ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
					   cancel);
	if (ret == 0) {
3523
		ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
D
David Sterba 已提交
3524 3525 3526 3527
		if (!ret)
			btrfs_info(fs_info, "disk deleted %s", vol_args->name);
		btrfs_exclop_finish(fs_info);
	}
3528 3529

	kfree(vol_args);
3530
out_drop_write:
3531
	mnt_drop_write_file(file);
3532 3533
	if (bdev)
		blkdev_put(bdev, mode);
C
Christoph Hellwig 已提交
3534 3535 3536
	return ret;
}

3537 3538
static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
				void __user *arg)
J
Jan Schmidt 已提交
3539
{
3540
	struct btrfs_ioctl_fs_info_args *fi_args;
J
Jan Schmidt 已提交
3541
	struct btrfs_device *device;
3542
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3543
	u64 flags_in;
3544
	int ret = 0;
J
Jan Schmidt 已提交
3545

3546 3547 3548 3549 3550 3551
	fi_args = memdup_user(arg, sizeof(*fi_args));
	if (IS_ERR(fi_args))
		return PTR_ERR(fi_args);

	flags_in = fi_args->flags;
	memset(fi_args, 0, sizeof(*fi_args));
3552

3553
	rcu_read_lock();
3554
	fi_args->num_devices = fs_devices->num_devices;
J
Jan Schmidt 已提交
3555

3556
	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3557 3558
		if (device->devid > fi_args->max_id)
			fi_args->max_id = device->devid;
J
Jan Schmidt 已提交
3559
	}
3560
	rcu_read_unlock();
J
Jan Schmidt 已提交
3561

3562
	memcpy(&fi_args->fsid, fs_devices->fsid, sizeof(fi_args->fsid));
3563 3564 3565
	fi_args->nodesize = fs_info->nodesize;
	fi_args->sectorsize = fs_info->sectorsize;
	fi_args->clone_alignment = fs_info->sectorsize;
3566

3567 3568 3569 3570 3571 3572
	if (flags_in & BTRFS_FS_INFO_FLAG_CSUM_INFO) {
		fi_args->csum_type = btrfs_super_csum_type(fs_info->super_copy);
		fi_args->csum_size = btrfs_super_csum_size(fs_info->super_copy);
		fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO;
	}

3573 3574 3575 3576 3577
	if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) {
		fi_args->generation = fs_info->generation;
		fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION;
	}

3578 3579 3580 3581 3582 3583
	if (flags_in & BTRFS_FS_INFO_FLAG_METADATA_UUID) {
		memcpy(&fi_args->metadata_uuid, fs_devices->metadata_uuid,
		       sizeof(fi_args->metadata_uuid));
		fi_args->flags |= BTRFS_FS_INFO_FLAG_METADATA_UUID;
	}

3584 3585
	if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
		ret = -EFAULT;
J
Jan Schmidt 已提交
3586

3587 3588
	kfree(fi_args);
	return ret;
J
Jan Schmidt 已提交
3589 3590
}

3591 3592
static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
				 void __user *arg)
J
Jan Schmidt 已提交
3593 3594 3595 3596 3597 3598 3599 3600 3601 3602
{
	struct btrfs_ioctl_dev_info_args *di_args;
	struct btrfs_device *dev;
	int ret = 0;
	char *s_uuid = NULL;

	di_args = memdup_user(arg, sizeof(*di_args));
	if (IS_ERR(di_args))
		return PTR_ERR(di_args);

3603
	if (!btrfs_is_empty_uuid(di_args->uuid))
J
Jan Schmidt 已提交
3604 3605
		s_uuid = di_args->uuid;

3606
	rcu_read_lock();
3607
	dev = btrfs_find_device(fs_info->fs_devices, di_args->devid, s_uuid,
3608
				NULL);
J
Jan Schmidt 已提交
3609 3610 3611 3612 3613 3614 3615

	if (!dev) {
		ret = -ENODEV;
		goto out;
	}

	di_args->devid = dev->devid;
3616 3617
	di_args->bytes_used = btrfs_device_get_bytes_used(dev);
	di_args->total_bytes = btrfs_device_get_total_bytes(dev);
J
Jan Schmidt 已提交
3618
	memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3619
	if (dev->name) {
3620 3621
		strncpy(di_args->path, rcu_str_deref(dev->name),
				sizeof(di_args->path) - 1);
3622 3623
		di_args->path[sizeof(di_args->path) - 1] = 0;
	} else {
3624
		di_args->path[0] = '\0';
3625
	}
J
Jan Schmidt 已提交
3626 3627

out:
3628
	rcu_read_unlock();
J
Jan Schmidt 已提交
3629 3630 3631 3632 3633 3634 3635
	if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
		ret = -EFAULT;

	kfree(di_args);
	return ret;
}

3636 3637
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
{
A
Al Viro 已提交
3638
	struct inode *inode = file_inode(file);
3639
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3640 3641 3642 3643
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root *new_root;
	struct btrfs_dir_item *di;
	struct btrfs_trans_handle *trans;
3644
	struct btrfs_path *path = NULL;
3645 3646 3647
	struct btrfs_disk_key disk_key;
	u64 objectid = 0;
	u64 dir_id;
3648
	int ret;
3649 3650 3651 3652

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3653 3654 3655 3656 3657 3658 3659 3660
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	if (copy_from_user(&objectid, argp, sizeof(objectid))) {
		ret = -EFAULT;
		goto out;
	}
3661 3662

	if (!objectid)
3663
		objectid = BTRFS_FS_TREE_OBJECTID;
3664

D
David Sterba 已提交
3665
	new_root = btrfs_get_fs_root(fs_info, objectid, true);
3666 3667 3668 3669
	if (IS_ERR(new_root)) {
		ret = PTR_ERR(new_root);
		goto out;
	}
3670 3671 3672 3673
	if (!is_fstree(new_root->root_key.objectid)) {
		ret = -ENOENT;
		goto out_free;
	}
3674 3675

	path = btrfs_alloc_path();
3676 3677
	if (!path) {
		ret = -ENOMEM;
3678
		goto out_free;
3679
	}
3680 3681

	trans = btrfs_start_transaction(root, 1);
3682
	if (IS_ERR(trans)) {
3683
		ret = PTR_ERR(trans);
3684
		goto out_free;
3685 3686
	}

3687 3688
	dir_id = btrfs_super_root_dir(fs_info->super_copy);
	di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
3689
				   dir_id, "default", 7, 1);
3690
	if (IS_ERR_OR_NULL(di)) {
3691
		btrfs_release_path(path);
3692
		btrfs_end_transaction(trans);
3693
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
3694
			  "Umm, you don't have the default diritem, this isn't going to work");
3695
		ret = -ENOENT;
3696
		goto out_free;
3697 3698 3699 3700 3701
	}

	btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
	btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
	btrfs_mark_buffer_dirty(path->nodes[0]);
3702
	btrfs_release_path(path);
3703

3704
	btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
3705
	btrfs_end_transaction(trans);
3706
out_free:
3707
	btrfs_put_root(new_root);
3708
	btrfs_free_path(path);
3709 3710 3711
out:
	mnt_drop_write_file(file);
	return ret;
3712 3713
}

3714 3715
static void get_block_group_info(struct list_head *groups_list,
				 struct btrfs_ioctl_space_info *space)
3716
{
3717
	struct btrfs_block_group *block_group;
3718 3719 3720 3721 3722 3723

	space->total_bytes = 0;
	space->used_bytes = 0;
	space->flags = 0;
	list_for_each_entry(block_group, groups_list, list) {
		space->flags = block_group->flags;
3724
		space->total_bytes += block_group->length;
3725
		space->used_bytes += block_group->used;
3726 3727 3728
	}
}

3729 3730
static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
				   void __user *arg)
J
Josef Bacik 已提交
3731 3732 3733 3734
{
	struct btrfs_ioctl_space_args space_args;
	struct btrfs_ioctl_space_info space;
	struct btrfs_ioctl_space_info *dest;
3735
	struct btrfs_ioctl_space_info *dest_orig;
3736
	struct btrfs_ioctl_space_info __user *user_dest;
J
Josef Bacik 已提交
3737
	struct btrfs_space_info *info;
3738 3739 3740 3741 3742 3743
	static const u64 types[] = {
		BTRFS_BLOCK_GROUP_DATA,
		BTRFS_BLOCK_GROUP_SYSTEM,
		BTRFS_BLOCK_GROUP_METADATA,
		BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
	};
3744
	int num_types = 4;
3745
	int alloc_size;
J
Josef Bacik 已提交
3746
	int ret = 0;
3747
	u64 slot_count = 0;
3748
	int i, c;
J
Josef Bacik 已提交
3749 3750 3751 3752 3753 3754

	if (copy_from_user(&space_args,
			   (struct btrfs_ioctl_space_args __user *)arg,
			   sizeof(space_args)))
		return -EFAULT;

3755 3756 3757 3758
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

		info = NULL;
3759
		list_for_each_entry(tmp, &fs_info->space_info, list) {
3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}

		if (!info)
			continue;

		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c]))
				slot_count++;
		}
		up_read(&info->groups_sem);
	}
3776

3777 3778 3779 3780 3781
	/*
	 * Global block reserve, exported as a space_info
	 */
	slot_count++;

3782 3783 3784 3785 3786
	/* space_slots == 0 means they are asking for a count */
	if (space_args.space_slots == 0) {
		space_args.total_spaces = slot_count;
		goto out;
	}
3787

3788
	slot_count = min_t(u64, space_args.space_slots, slot_count);
3789

3790
	alloc_size = sizeof(*dest) * slot_count;
3791

3792 3793 3794
	/* we generally have at most 6 or so space infos, one for each raid
	 * level.  So, a whole page should be more than enough for everyone
	 */
3795
	if (alloc_size > PAGE_SIZE)
3796 3797
		return -ENOMEM;

J
Josef Bacik 已提交
3798
	space_args.total_spaces = 0;
3799
	dest = kmalloc(alloc_size, GFP_KERNEL);
3800 3801 3802
	if (!dest)
		return -ENOMEM;
	dest_orig = dest;
J
Josef Bacik 已提交
3803

3804
	/* now we have a buffer to copy into */
3805 3806 3807
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

3808 3809 3810
		if (!slot_count)
			break;

3811
		info = NULL;
3812
		list_for_each_entry(tmp, &fs_info->space_info, list) {
3813 3814 3815 3816 3817
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}
3818

3819 3820 3821 3822 3823
		if (!info)
			continue;
		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c])) {
3824 3825
				get_block_group_info(&info->block_groups[c],
						     &space);
3826 3827 3828
				memcpy(dest, &space, sizeof(space));
				dest++;
				space_args.total_spaces++;
3829
				slot_count--;
3830
			}
3831 3832
			if (!slot_count)
				break;
3833 3834
		}
		up_read(&info->groups_sem);
J
Josef Bacik 已提交
3835 3836
	}

3837 3838 3839 3840
	/*
	 * Add global block reserve
	 */
	if (slot_count) {
3841
		struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3842 3843 3844 3845 3846 3847 3848 3849 3850 3851

		spin_lock(&block_rsv->lock);
		space.total_bytes = block_rsv->size;
		space.used_bytes = block_rsv->size - block_rsv->reserved;
		spin_unlock(&block_rsv->lock);
		space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
		memcpy(dest, &space, sizeof(space));
		space_args.total_spaces++;
	}

D
Daniel J Blueman 已提交
3852
	user_dest = (struct btrfs_ioctl_space_info __user *)
3853 3854 3855 3856 3857 3858 3859 3860
		(arg + sizeof(struct btrfs_ioctl_space_args));

	if (copy_to_user(user_dest, dest_orig, alloc_size))
		ret = -EFAULT;

	kfree(dest_orig);
out:
	if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
J
Josef Bacik 已提交
3861 3862 3863 3864 3865
		ret = -EFAULT;

	return ret;
}

3866 3867
static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
					    void __user *argp)
3868 3869 3870
{
	struct btrfs_trans_handle *trans;
	u64 transid;
T
Tsutomu Itoh 已提交
3871
	int ret;
3872

M
Miao Xie 已提交
3873
	trans = btrfs_attach_transaction_barrier(root);
3874 3875 3876 3877 3878 3879 3880 3881
	if (IS_ERR(trans)) {
		if (PTR_ERR(trans) != -ENOENT)
			return PTR_ERR(trans);

		/* No running transaction, don't bother */
		transid = root->fs_info->last_trans_committed;
		goto out;
	}
3882
	transid = trans->transid;
3883
	ret = btrfs_commit_transaction_async(trans);
3884
	if (ret) {
3885
		btrfs_end_transaction(trans);
T
Tsutomu Itoh 已提交
3886
		return ret;
3887
	}
3888
out:
3889 3890 3891 3892 3893 3894
	if (argp)
		if (copy_to_user(argp, &transid, sizeof(transid)))
			return -EFAULT;
	return 0;
}

3895
static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
3896
					   void __user *argp)
3897 3898 3899 3900 3901 3902 3903 3904 3905
{
	u64 transid;

	if (argp) {
		if (copy_from_user(&transid, argp, sizeof(transid)))
			return -EFAULT;
	} else {
		transid = 0;  /* current trans */
	}
3906
	return btrfs_wait_for_commit(fs_info, transid);
3907 3908
}

M
Miao Xie 已提交
3909
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
J
Jan Schmidt 已提交
3910
{
3911
	struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
J
Jan Schmidt 已提交
3912
	struct btrfs_ioctl_scrub_args *sa;
M
Miao Xie 已提交
3913
	int ret;
J
Jan Schmidt 已提交
3914 3915 3916 3917 3918 3919 3920 3921

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

M
Miao Xie 已提交
3922 3923 3924 3925 3926 3927
	if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
		ret = mnt_want_write_file(file);
		if (ret)
			goto out;
	}

3928
	ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
3929 3930
			      &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
			      0);
J
Jan Schmidt 已提交
3931

3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944
	/*
	 * Copy scrub args to user space even if btrfs_scrub_dev() returned an
	 * error. This is important as it allows user space to know how much
	 * progress scrub has done. For example, if scrub is canceled we get
	 * -ECANCELED from btrfs_scrub_dev() and return that error back to user
	 * space. Later user space can inspect the progress from the structure
	 * btrfs_ioctl_scrub_args and resume scrub from where it left off
	 * previously (btrfs-progs does this).
	 * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
	 * then return -EFAULT to signal the structure was not copied or it may
	 * be corrupt and unreliable due to a partial copy.
	 */
	if (copy_to_user(arg, sa, sizeof(*sa)))
J
Jan Schmidt 已提交
3945 3946
		ret = -EFAULT;

M
Miao Xie 已提交
3947 3948 3949
	if (!(sa->flags & BTRFS_SCRUB_READONLY))
		mnt_drop_write_file(file);
out:
J
Jan Schmidt 已提交
3950 3951 3952 3953
	kfree(sa);
	return ret;
}

3954
static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
J
Jan Schmidt 已提交
3955 3956 3957 3958
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3959
	return btrfs_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
3960 3961
}

3962
static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
J
Jan Schmidt 已提交
3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974
				       void __user *arg)
{
	struct btrfs_ioctl_scrub_args *sa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

3975
	ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
J
Jan Schmidt 已提交
3976

3977
	if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
J
Jan Schmidt 已提交
3978 3979 3980 3981 3982 3983
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

3984
static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
3985
				      void __user *arg)
3986 3987 3988 3989 3990 3991 3992 3993
{
	struct btrfs_ioctl_get_dev_stats *sa;
	int ret;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

3994 3995 3996 3997 3998
	if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
		kfree(sa);
		return -EPERM;
	}

3999
	ret = btrfs_get_dev_stats(fs_info, sa);
4000

4001
	if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
4002 4003 4004 4005 4006 4007
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

4008 4009
static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
				    void __user *arg)
4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022
{
	struct btrfs_ioctl_dev_replace_args *p;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	p = memdup_user(arg, sizeof(*p));
	if (IS_ERR(p))
		return PTR_ERR(p);

	switch (p->cmd) {
	case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4023
		if (sb_rdonly(fs_info->sb)) {
4024 4025 4026
			ret = -EROFS;
			goto out;
		}
4027
		if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) {
4028
			ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4029
		} else {
4030
			ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4031
			btrfs_exclop_finish(fs_info);
4032 4033 4034
		}
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4035
		btrfs_dev_replace_status(fs_info, p);
4036 4037 4038
		ret = 0;
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4039
		p->result = btrfs_dev_replace_cancel(fs_info);
4040
		ret = 0;
4041 4042 4043 4044 4045 4046
		break;
	default:
		ret = -EINVAL;
		break;
	}

4047
	if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p)))
4048
		ret = -EFAULT;
4049
out:
4050 4051 4052 4053
	kfree(p);
	return ret;
}

4054 4055 4056 4057
static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
{
	int ret = 0;
	int i;
4058
	u64 rel_ptr;
4059
	int size;
4060
	struct btrfs_ioctl_ino_path_args *ipa = NULL;
4061 4062 4063
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_path *path;

4064
	if (!capable(CAP_DAC_READ_SEARCH))
4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092
		return -EPERM;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	ipa = memdup_user(arg, sizeof(*ipa));
	if (IS_ERR(ipa)) {
		ret = PTR_ERR(ipa);
		ipa = NULL;
		goto out;
	}

	size = min_t(u32, ipa->size, 4096);
	ipath = init_ipath(size, root, path);
	if (IS_ERR(ipath)) {
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto out;
	}

	ret = paths_from_inode(ipa->inum, ipath);
	if (ret < 0)
		goto out;

	for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4093 4094
		rel_ptr = ipath->fspath->val[i] -
			  (u64)(unsigned long)ipath->fspath->val;
4095
		ipath->fspath->val[i] = rel_ptr;
4096 4097
	}

4098 4099
	ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
			   ipath->fspath, size);
4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132
	if (ret) {
		ret = -EFAULT;
		goto out;
	}

out:
	btrfs_free_path(path);
	free_ipath(ipath);
	kfree(ipa);

	return ret;
}

static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
{
	struct btrfs_data_container *inodes = ctx;
	const size_t c = 3 * sizeof(u64);

	if (inodes->bytes_left >= c) {
		inodes->bytes_left -= c;
		inodes->val[inodes->elem_cnt] = inum;
		inodes->val[inodes->elem_cnt + 1] = offset;
		inodes->val[inodes->elem_cnt + 2] = root;
		inodes->elem_cnt += 3;
	} else {
		inodes->bytes_missing += c - inodes->bytes_left;
		inodes->bytes_left = 0;
		inodes->elem_missed += 3;
	}

	return 0;
}

4133
static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4134
					void __user *arg, int version)
4135 4136 4137 4138 4139 4140
{
	int ret = 0;
	int size;
	struct btrfs_ioctl_logical_ino_args *loi;
	struct btrfs_data_container *inodes = NULL;
	struct btrfs_path *path = NULL;
4141
	bool ignore_offset;
4142 4143 4144 4145 4146

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	loi = memdup_user(arg, sizeof(*loi));
4147 4148
	if (IS_ERR(loi))
		return PTR_ERR(loi);
4149

4150 4151
	if (version == 1) {
		ignore_offset = false;
4152
		size = min_t(u32, loi->size, SZ_64K);
4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164
	} else {
		/* All reserved bits must be 0 for now */
		if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
			ret = -EINVAL;
			goto out_loi;
		}
		/* Only accept flags we have defined so far */
		if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
			ret = -EINVAL;
			goto out_loi;
		}
		ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
4165
		size = min_t(u32, loi->size, SZ_16M);
4166 4167
	}

4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180
	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	inodes = init_data_container(size);
	if (IS_ERR(inodes)) {
		ret = PTR_ERR(inodes);
		inodes = NULL;
		goto out;
	}

4181
	ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4182
					  build_ino_list, inodes, ignore_offset);
L
Liu Bo 已提交
4183
	if (ret == -EINVAL)
4184 4185 4186 4187
		ret = -ENOENT;
	if (ret < 0)
		goto out;

4188 4189
	ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
			   size);
4190 4191 4192 4193 4194
	if (ret)
		ret = -EFAULT;

out:
	btrfs_free_path(path);
4195
	kvfree(inodes);
4196
out_loi:
4197 4198 4199 4200 4201
	kfree(loi);

	return ret;
}

4202
void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
4203 4204 4205 4206 4207 4208
			       struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	bargs->flags = bctl->flags;

4209
	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
4210 4211 4212
		bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
	if (atomic_read(&fs_info->balance_pause_req))
		bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4213 4214
	if (atomic_read(&fs_info->balance_cancel_req))
		bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4215

4216 4217 4218
	memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
	memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
	memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4219

4220 4221 4222
	spin_lock(&fs_info->balance_lock);
	memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
	spin_unlock(&fs_info->balance_lock);
4223 4224
}

4225
static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4226
{
A
Al Viro 已提交
4227
	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4228 4229 4230
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_ioctl_balance_args *bargs;
	struct btrfs_balance_control *bctl;
4231
	bool need_unlock; /* for mut. excl. ops lock */
4232 4233 4234 4235 4236
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4237
	ret = mnt_want_write_file(file);
4238 4239 4240
	if (ret)
		return ret;

4241
again:
4242
	if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
4243 4244 4245 4246 4247 4248
		mutex_lock(&fs_info->balance_mutex);
		need_unlock = true;
		goto locked;
	}

	/*
4249
	 * mut. excl. ops lock is locked.  Three possibilities:
4250 4251 4252 4253
	 *   (1) some other op is running
	 *   (2) balance is running
	 *   (3) balance is paused -- special case (think resume)
	 */
4254
	mutex_lock(&fs_info->balance_mutex);
4255 4256
	if (fs_info->balance_ctl) {
		/* this is either (2) or (3) */
4257
		if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4258
			mutex_unlock(&fs_info->balance_mutex);
4259 4260 4261 4262
			/*
			 * Lock released to allow other waiters to continue,
			 * we'll reexamine the status again.
			 */
4263 4264 4265
			mutex_lock(&fs_info->balance_mutex);

			if (fs_info->balance_ctl &&
4266
			    !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282
				/* this is (3) */
				need_unlock = false;
				goto locked;
			}

			mutex_unlock(&fs_info->balance_mutex);
			goto again;
		} else {
			/* this is (2) */
			mutex_unlock(&fs_info->balance_mutex);
			ret = -EINPROGRESS;
			goto out;
		}
	} else {
		/* this is (1) */
		mutex_unlock(&fs_info->balance_mutex);
4283
		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4284 4285 4286 4287
		goto out;
	}

locked:
4288 4289 4290 4291 4292

	if (arg) {
		bargs = memdup_user(arg, sizeof(*bargs));
		if (IS_ERR(bargs)) {
			ret = PTR_ERR(bargs);
4293
			goto out_unlock;
4294
		}
4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308

		if (bargs->flags & BTRFS_BALANCE_RESUME) {
			if (!fs_info->balance_ctl) {
				ret = -ENOTCONN;
				goto out_bargs;
			}

			bctl = fs_info->balance_ctl;
			spin_lock(&fs_info->balance_lock);
			bctl->flags |= BTRFS_BALANCE_RESUME;
			spin_unlock(&fs_info->balance_lock);

			goto do_balance;
		}
4309 4310 4311 4312
	} else {
		bargs = NULL;
	}

4313
	if (fs_info->balance_ctl) {
4314 4315 4316 4317
		ret = -EINPROGRESS;
		goto out_bargs;
	}

4318
	bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329
	if (!bctl) {
		ret = -ENOMEM;
		goto out_bargs;
	}

	if (arg) {
		memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
		memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
		memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));

		bctl->flags = bargs->flags;
4330 4331 4332
	} else {
		/* balance everything - no filters */
		bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4333 4334
	}

4335 4336
	if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
		ret = -EINVAL;
4337
		goto out_bctl;
4338 4339
	}

4340
do_balance:
4341
	/*
4342 4343 4344 4345
	 * Ownership of bctl and exclusive operation goes to btrfs_balance.
	 * bctl is freed in reset_balance_state, or, if restriper was paused
	 * all the way until unmount, in free_fs_info.  The flag should be
	 * cleared after reset_balance_state.
4346
	 */
4347 4348
	need_unlock = false;

4349
	ret = btrfs_balance(fs_info, bctl, bargs);
4350
	bctl = NULL;
4351

4352
	if ((ret == 0 || ret == -ECANCELED) && arg) {
4353 4354 4355 4356
		if (copy_to_user(arg, bargs, sizeof(*bargs)))
			ret = -EFAULT;
	}

4357 4358
out_bctl:
	kfree(bctl);
4359 4360
out_bargs:
	kfree(bargs);
4361
out_unlock:
4362
	mutex_unlock(&fs_info->balance_mutex);
4363
	if (need_unlock)
4364
		btrfs_exclop_finish(fs_info);
4365
out:
4366
	mnt_drop_write_file(file);
4367 4368 4369
	return ret;
}

4370
static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
4371 4372 4373 4374 4375 4376
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	switch (cmd) {
	case BTRFS_BALANCE_CTL_PAUSE:
4377
		return btrfs_pause_balance(fs_info);
4378
	case BTRFS_BALANCE_CTL_CANCEL:
4379
		return btrfs_cancel_balance(fs_info);
4380 4381 4382 4383 4384
	}

	return -EINVAL;
}

4385
static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399
					 void __user *arg)
{
	struct btrfs_ioctl_balance_args *bargs;
	int ret = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		ret = -ENOTCONN;
		goto out;
	}

4400
	bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4401 4402 4403 4404 4405
	if (!bargs) {
		ret = -ENOMEM;
		goto out;
	}

4406
	btrfs_update_ioctl_balance_args(fs_info, bargs);
4407 4408 4409 4410 4411 4412 4413 4414 4415 4416

	if (copy_to_user(arg, bargs, sizeof(*bargs)))
		ret = -EFAULT;

	kfree(bargs);
out:
	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

4417
static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4418
{
4419 4420
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
A
Arne Jansen 已提交
4421 4422 4423 4424 4425 4426
	struct btrfs_ioctl_quota_ctl_args *sa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4427 4428 4429
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4430 4431

	sa = memdup_user(arg, sizeof(*sa));
4432 4433 4434 4435
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4436

4437
	down_write(&fs_info->subvol_sem);
A
Arne Jansen 已提交
4438 4439 4440

	switch (sa->cmd) {
	case BTRFS_QUOTA_CTL_ENABLE:
4441
		ret = btrfs_quota_enable(fs_info);
A
Arne Jansen 已提交
4442 4443
		break;
	case BTRFS_QUOTA_CTL_DISABLE:
4444
		ret = btrfs_quota_disable(fs_info);
A
Arne Jansen 已提交
4445 4446 4447 4448 4449 4450 4451
		break;
	default:
		ret = -EINVAL;
		break;
	}

	kfree(sa);
4452
	up_write(&fs_info->subvol_sem);
4453 4454
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4455 4456 4457
	return ret;
}

4458
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4459
{
4460 4461 4462
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4463 4464 4465 4466 4467 4468 4469 4470
	struct btrfs_ioctl_qgroup_assign_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4471 4472 4473
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4474 4475

	sa = memdup_user(arg, sizeof(*sa));
4476 4477 4478 4479
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4480 4481 4482 4483 4484 4485 4486 4487

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	if (sa->assign) {
4488
		ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
A
Arne Jansen 已提交
4489
	} else {
4490
		ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
A
Arne Jansen 已提交
4491 4492
	}

4493
	/* update qgroup status and info */
4494
	err = btrfs_run_qgroups(trans);
4495
	if (err < 0)
4496 4497
		btrfs_handle_fs_error(fs_info, err,
				      "failed to update qgroup status and info");
4498
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4499 4500 4501 4502 4503
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4504 4505
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4506 4507 4508
	return ret;
}

4509
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4510
{
4511 4512
	struct inode *inode = file_inode(file);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4513 4514 4515 4516 4517 4518 4519 4520
	struct btrfs_ioctl_qgroup_create_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4521 4522 4523
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4524 4525

	sa = memdup_user(arg, sizeof(*sa));
4526 4527 4528 4529
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4530

M
Miao Xie 已提交
4531 4532 4533 4534 4535
	if (!sa->qgroupid) {
		ret = -EINVAL;
		goto out;
	}

A
Arne Jansen 已提交
4536 4537 4538 4539 4540 4541 4542
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	if (sa->create) {
4543
		ret = btrfs_create_qgroup(trans, sa->qgroupid);
A
Arne Jansen 已提交
4544
	} else {
4545
		ret = btrfs_remove_qgroup(trans, sa->qgroupid);
A
Arne Jansen 已提交
4546 4547
	}

4548
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4549 4550 4551 4552 4553
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4554 4555
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4556 4557 4558
	return ret;
}

4559
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4560
{
4561 4562
	struct inode *inode = file_inode(file);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4563 4564 4565 4566 4567 4568 4569 4570 4571
	struct btrfs_ioctl_qgroup_limit_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;
	u64 qgroupid;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4572 4573 4574
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4575 4576

	sa = memdup_user(arg, sizeof(*sa));
4577 4578 4579 4580
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	qgroupid = sa->qgroupid;
	if (!qgroupid) {
		/* take the current subvol as qgroup */
		qgroupid = root->root_key.objectid;
	}

4594
	ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
A
Arne Jansen 已提交
4595

4596
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4597 4598 4599 4600 4601
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4602 4603
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4604 4605 4606
	return ret;
}

J
Jan Schmidt 已提交
4607 4608
static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
{
4609 4610
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
J
Jan Schmidt 已提交
4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631
	struct btrfs_ioctl_quota_rescan_args *qsa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	qsa = memdup_user(arg, sizeof(*qsa));
	if (IS_ERR(qsa)) {
		ret = PTR_ERR(qsa);
		goto drop_write;
	}

	if (qsa->flags) {
		ret = -EINVAL;
		goto out;
	}

4632
	ret = btrfs_qgroup_rescan(fs_info);
J
Jan Schmidt 已提交
4633 4634 4635 4636 4637 4638 4639 4640

out:
	kfree(qsa);
drop_write:
	mnt_drop_write_file(file);
	return ret;
}

4641 4642
static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info,
						void __user *arg)
J
Jan Schmidt 已提交
4643
{
4644
	struct btrfs_ioctl_quota_rescan_args qsa = {0};
J
Jan Schmidt 已提交
4645 4646 4647 4648

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4649
	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4650 4651
		qsa.flags = 1;
		qsa.progress = fs_info->qgroup_rescan_progress.objectid;
J
Jan Schmidt 已提交
4652 4653
	}

4654
	if (copy_to_user(arg, &qsa, sizeof(qsa)))
4655
		return -EFAULT;
J
Jan Schmidt 已提交
4656

4657
	return 0;
J
Jan Schmidt 已提交
4658 4659
}

4660 4661
static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info,
						void __user *arg)
4662 4663 4664 4665
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4666
	return btrfs_qgroup_wait_for_completion(fs_info, true);
4667 4668
}

4669
static long _btrfs_ioctl_set_received_subvol(struct file *file,
4670
					    struct user_namespace *mnt_userns,
4671
					    struct btrfs_ioctl_received_subvol_args *sa)
4672
{
A
Al Viro 已提交
4673
	struct inode *inode = file_inode(file);
4674
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4675 4676 4677
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root_item *root_item = &root->root_item;
	struct btrfs_trans_handle *trans;
4678
	struct timespec64 ct = current_time(inode);
4679
	int ret = 0;
4680
	int received_uuid_changed;
4681

4682
	if (!inode_owner_or_capable(mnt_userns, inode))
4683 4684
		return -EPERM;

4685 4686 4687 4688
	ret = mnt_want_write_file(file);
	if (ret < 0)
		return ret;

4689
	down_write(&fs_info->subvol_sem);
4690

4691
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
4692 4693 4694 4695 4696 4697 4698 4699 4700
		ret = -EINVAL;
		goto out;
	}

	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
	}

4701 4702 4703 4704 4705
	/*
	 * 1 - root item
	 * 2 - uuid items (received uuid + subvol uuid)
	 */
	trans = btrfs_start_transaction(root, 3);
4706 4707 4708 4709 4710 4711 4712 4713 4714 4715
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		trans = NULL;
		goto out;
	}

	sa->rtransid = trans->transid;
	sa->rtime.sec = ct.tv_sec;
	sa->rtime.nsec = ct.tv_nsec;

4716 4717 4718
	received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
				       BTRFS_UUID_SIZE);
	if (received_uuid_changed &&
4719
	    !btrfs_is_empty_uuid(root_item->received_uuid)) {
4720
		ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
4721 4722 4723 4724 4725 4726 4727 4728
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  root->root_key.objectid);
		if (ret && ret != -ENOENT) {
		        btrfs_abort_transaction(trans, ret);
		        btrfs_end_transaction(trans);
		        goto out;
		}
	}
4729 4730 4731
	memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
	btrfs_set_root_stransid(root_item, sa->stransid);
	btrfs_set_root_rtransid(root_item, sa->rtransid);
4732 4733 4734 4735
	btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
	btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
4736

4737
	ret = btrfs_update_root(trans, fs_info->tree_root,
4738 4739
				&root->root_key, &root->root_item);
	if (ret < 0) {
4740
		btrfs_end_transaction(trans);
4741
		goto out;
4742 4743
	}
	if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
4744
		ret = btrfs_uuid_tree_add(trans, sa->uuid,
4745 4746 4747
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  root->root_key.objectid);
		if (ret < 0 && ret != -EEXIST) {
4748
			btrfs_abort_transaction(trans, ret);
4749
			btrfs_end_transaction(trans);
4750
			goto out;
4751 4752
		}
	}
4753
	ret = btrfs_commit_transaction(trans);
4754
out:
4755
	up_write(&fs_info->subvol_sem);
4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768
	mnt_drop_write_file(file);
	return ret;
}

#ifdef CONFIG_64BIT
static long btrfs_ioctl_set_received_subvol_32(struct file *file,
						void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
	struct btrfs_ioctl_received_subvol_args *args64 = NULL;
	int ret = 0;

	args32 = memdup_user(arg, sizeof(*args32));
4769 4770
	if (IS_ERR(args32))
		return PTR_ERR(args32);
4771

4772
	args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
4773 4774
	if (!args64) {
		ret = -ENOMEM;
4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786
		goto out;
	}

	memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
	args64->stransid = args32->stransid;
	args64->rtransid = args32->rtransid;
	args64->stime.sec = args32->stime.sec;
	args64->stime.nsec = args32->stime.nsec;
	args64->rtime.sec = args32->rtime.sec;
	args64->rtime.nsec = args32->rtime.nsec;
	args64->flags = args32->flags;

4787
	ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), args64);
4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817
	if (ret)
		goto out;

	memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
	args32->stransid = args64->stransid;
	args32->rtransid = args64->rtransid;
	args32->stime.sec = args64->stime.sec;
	args32->stime.nsec = args64->stime.nsec;
	args32->rtime.sec = args64->rtime.sec;
	args32->rtime.nsec = args64->rtime.nsec;
	args32->flags = args64->flags;

	ret = copy_to_user(arg, args32, sizeof(*args32));
	if (ret)
		ret = -EFAULT;

out:
	kfree(args32);
	kfree(args64);
	return ret;
}
#endif

static long btrfs_ioctl_set_received_subvol(struct file *file,
					    void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args *sa = NULL;
	int ret = 0;

	sa = memdup_user(arg, sizeof(*sa));
4818 4819
	if (IS_ERR(sa))
		return PTR_ERR(sa);
4820

4821
	ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_user_ns(file), sa);
4822 4823 4824 4825

	if (ret)
		goto out;

4826 4827 4828 4829 4830 4831 4832 4833 4834
	ret = copy_to_user(arg, sa, sizeof(*sa));
	if (ret)
		ret = -EFAULT;

out:
	kfree(sa);
	return ret;
}

4835 4836
static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info,
					void __user *arg)
4837
{
4838
	size_t len;
4839
	int ret;
4840 4841
	char label[BTRFS_LABEL_SIZE];

4842 4843 4844
	spin_lock(&fs_info->super_lock);
	memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
	spin_unlock(&fs_info->super_lock);
4845 4846

	len = strnlen(label, BTRFS_LABEL_SIZE);
4847 4848

	if (len == BTRFS_LABEL_SIZE) {
4849 4850 4851
		btrfs_warn(fs_info,
			   "label is too long, return the first %zu bytes",
			   --len);
4852 4853 4854 4855 4856 4857 4858
	}

	ret = copy_to_user(arg, label, len);

	return ret ? -EFAULT : 0;
}

4859 4860
static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
{
4861 4862 4863 4864
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875
	struct btrfs_trans_handle *trans;
	char label[BTRFS_LABEL_SIZE];
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(label, arg, sizeof(label)))
		return -EFAULT;

	if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
4876
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
4877 4878
			  "unable to set label with more than %d bytes",
			  BTRFS_LABEL_SIZE - 1);
4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891
		return -EINVAL;
	}

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_unlock;
	}

4892
	spin_lock(&fs_info->super_lock);
4893
	strcpy(super_block->label, label);
4894
	spin_unlock(&fs_info->super_lock);
4895
	ret = btrfs_commit_transaction(trans);
4896 4897 4898 4899 4900 4901

out_unlock:
	mnt_drop_write_file(file);
	return ret;
}

4902 4903 4904 4905 4906
#define INIT_FEATURE_FLAGS(suffix) \
	{ .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
	  .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
	  .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }

4907
int btrfs_ioctl_get_supported_features(void __user *arg)
4908
{
D
David Sterba 已提交
4909
	static const struct btrfs_ioctl_feature_flags features[3] = {
4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920
		INIT_FEATURE_FLAGS(SUPP),
		INIT_FEATURE_FLAGS(SAFE_SET),
		INIT_FEATURE_FLAGS(SAFE_CLEAR)
	};

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

4921 4922
static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info,
					void __user *arg)
4923
{
4924
	struct btrfs_super_block *super_block = fs_info->super_copy;
4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936
	struct btrfs_ioctl_feature_flags features;

	features.compat_flags = btrfs_super_compat_flags(super_block);
	features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
	features.incompat_flags = btrfs_super_incompat_flags(super_block);

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

4937
static int check_feature_bits(struct btrfs_fs_info *fs_info,
4938
			      enum btrfs_feature_set set,
4939 4940 4941
			      u64 change_mask, u64 flags, u64 supported_flags,
			      u64 safe_set, u64 safe_clear)
{
4942
	const char *type = btrfs_feature_set_name(set);
4943
	char *names;
4944 4945 4946 4947 4948 4949
	u64 disallowed, unsupported;
	u64 set_mask = flags & change_mask;
	u64 clear_mask = ~flags & change_mask;

	unsupported = set_mask & ~supported_flags;
	if (unsupported) {
4950 4951
		names = btrfs_printable_features(set, unsupported);
		if (names) {
4952 4953 4954
			btrfs_warn(fs_info,
				   "this kernel does not support the %s feature bit%s",
				   names, strchr(names, ',') ? "s" : "");
4955 4956
			kfree(names);
		} else
4957 4958 4959
			btrfs_warn(fs_info,
				   "this kernel does not support %s bits 0x%llx",
				   type, unsupported);
4960 4961 4962 4963 4964
		return -EOPNOTSUPP;
	}

	disallowed = set_mask & ~safe_set;
	if (disallowed) {
4965 4966
		names = btrfs_printable_features(set, disallowed);
		if (names) {
4967 4968 4969
			btrfs_warn(fs_info,
				   "can't set the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
4970 4971
			kfree(names);
		} else
4972 4973 4974
			btrfs_warn(fs_info,
				   "can't set %s bits 0x%llx while mounted",
				   type, disallowed);
4975 4976 4977 4978 4979
		return -EPERM;
	}

	disallowed = clear_mask & ~safe_clear;
	if (disallowed) {
4980 4981
		names = btrfs_printable_features(set, disallowed);
		if (names) {
4982 4983 4984
			btrfs_warn(fs_info,
				   "can't clear the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
4985 4986
			kfree(names);
		} else
4987 4988 4989
			btrfs_warn(fs_info,
				   "can't clear %s bits 0x%llx while mounted",
				   type, disallowed);
4990 4991 4992 4993 4994 4995
		return -EPERM;
	}

	return 0;
}

4996 4997
#define check_feature(fs_info, change_mask, flags, mask_base)	\
check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags,	\
4998 4999 5000 5001 5002 5003
		   BTRFS_FEATURE_ ## mask_base ## _SUPP,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_SET,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)

static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
{
5004 5005 5006 5007
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023
	struct btrfs_ioctl_feature_flags flags[2];
	struct btrfs_trans_handle *trans;
	u64 newflags;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(flags, arg, sizeof(flags)))
		return -EFAULT;

	/* Nothing to do */
	if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
	    !flags[0].incompat_flags)
		return 0;

5024
	ret = check_feature(fs_info, flags[0].compat_flags,
5025 5026 5027 5028
			    flags[1].compat_flags, COMPAT);
	if (ret)
		return ret;

5029
	ret = check_feature(fs_info, flags[0].compat_ro_flags,
5030 5031 5032 5033
			    flags[1].compat_ro_flags, COMPAT_RO);
	if (ret)
		return ret;

5034
	ret = check_feature(fs_info, flags[0].incompat_flags,
5035 5036 5037 5038
			    flags[1].incompat_flags, INCOMPAT);
	if (ret)
		return ret;

5039 5040 5041 5042
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

5043
	trans = btrfs_start_transaction(root, 0);
5044 5045 5046 5047
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_drop_write;
	}
5048

5049
	spin_lock(&fs_info->super_lock);
5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063
	newflags = btrfs_super_compat_flags(super_block);
	newflags |= flags[0].compat_flags & flags[1].compat_flags;
	newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
	btrfs_set_super_compat_flags(super_block, newflags);

	newflags = btrfs_super_compat_ro_flags(super_block);
	newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
	newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
	btrfs_set_super_compat_ro_flags(super_block, newflags);

	newflags = btrfs_super_incompat_flags(super_block);
	newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
	newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
	btrfs_set_super_incompat_flags(super_block, newflags);
5064
	spin_unlock(&fs_info->super_lock);
5065

5066
	ret = btrfs_commit_transaction(trans);
5067 5068 5069 5070
out_drop_write:
	mnt_drop_write_file(file);

	return ret;
5071 5072
}

5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107
static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
{
	struct btrfs_ioctl_send_args *arg;
	int ret;

	if (compat) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
		struct btrfs_ioctl_send_args_32 args32;

		ret = copy_from_user(&args32, argp, sizeof(args32));
		if (ret)
			return -EFAULT;
		arg = kzalloc(sizeof(*arg), GFP_KERNEL);
		if (!arg)
			return -ENOMEM;
		arg->send_fd = args32.send_fd;
		arg->clone_sources_count = args32.clone_sources_count;
		arg->clone_sources = compat_ptr(args32.clone_sources);
		arg->parent_root = args32.parent_root;
		arg->flags = args32.flags;
		memcpy(arg->reserved, args32.reserved,
		       sizeof(args32.reserved));
#else
		return -ENOTTY;
#endif
	} else {
		arg = memdup_user(argp, sizeof(*arg));
		if (IS_ERR(arg))
			return PTR_ERR(arg);
	}
	ret = btrfs_ioctl_send(file, arg);
	kfree(arg);
	return ret;
}

C
Christoph Hellwig 已提交
5108 5109 5110
long btrfs_ioctl(struct file *file, unsigned int
		cmd, unsigned long arg)
{
5111 5112 5113
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
5114
	void __user *argp = (void __user *)arg;
C
Christoph Hellwig 已提交
5115 5116

	switch (cmd) {
5117 5118
	case FS_IOC_GETVERSION:
		return btrfs_ioctl_getversion(file, argp);
5119
	case FS_IOC_GETFSLABEL:
5120
		return btrfs_ioctl_get_fslabel(fs_info, argp);
5121 5122
	case FS_IOC_SETFSLABEL:
		return btrfs_ioctl_set_fslabel(file, argp);
5123
	case FITRIM:
5124
		return btrfs_ioctl_fitrim(fs_info, argp);
C
Christoph Hellwig 已提交
5125
	case BTRFS_IOC_SNAP_CREATE:
5126
		return btrfs_ioctl_snap_create(file, argp, 0);
5127
	case BTRFS_IOC_SNAP_CREATE_V2:
5128
		return btrfs_ioctl_snap_create_v2(file, argp, 0);
5129
	case BTRFS_IOC_SUBVOL_CREATE:
5130
		return btrfs_ioctl_snap_create(file, argp, 1);
A
Arne Jansen 已提交
5131 5132
	case BTRFS_IOC_SUBVOL_CREATE_V2:
		return btrfs_ioctl_snap_create_v2(file, argp, 1);
5133
	case BTRFS_IOC_SNAP_DESTROY:
5134 5135 5136
		return btrfs_ioctl_snap_destroy(file, argp, false);
	case BTRFS_IOC_SNAP_DESTROY_V2:
		return btrfs_ioctl_snap_destroy(file, argp, true);
5137 5138 5139 5140
	case BTRFS_IOC_SUBVOL_GETFLAGS:
		return btrfs_ioctl_subvol_getflags(file, argp);
	case BTRFS_IOC_SUBVOL_SETFLAGS:
		return btrfs_ioctl_subvol_setflags(file, argp);
5141 5142
	case BTRFS_IOC_DEFAULT_SUBVOL:
		return btrfs_ioctl_default_subvol(file, argp);
C
Christoph Hellwig 已提交
5143
	case BTRFS_IOC_DEFRAG:
C
Chris Mason 已提交
5144 5145 5146
		return btrfs_ioctl_defrag(file, NULL);
	case BTRFS_IOC_DEFRAG_RANGE:
		return btrfs_ioctl_defrag(file, argp);
C
Christoph Hellwig 已提交
5147
	case BTRFS_IOC_RESIZE:
5148
		return btrfs_ioctl_resize(file, argp);
C
Christoph Hellwig 已提交
5149
	case BTRFS_IOC_ADD_DEV:
5150
		return btrfs_ioctl_add_dev(fs_info, argp);
C
Christoph Hellwig 已提交
5151
	case BTRFS_IOC_RM_DEV:
5152
		return btrfs_ioctl_rm_dev(file, argp);
5153 5154
	case BTRFS_IOC_RM_DEV_V2:
		return btrfs_ioctl_rm_dev_v2(file, argp);
J
Jan Schmidt 已提交
5155
	case BTRFS_IOC_FS_INFO:
5156
		return btrfs_ioctl_fs_info(fs_info, argp);
J
Jan Schmidt 已提交
5157
	case BTRFS_IOC_DEV_INFO:
5158
		return btrfs_ioctl_dev_info(fs_info, argp);
C
Christoph Hellwig 已提交
5159
	case BTRFS_IOC_BALANCE:
5160
		return btrfs_ioctl_balance(file, NULL);
5161 5162
	case BTRFS_IOC_TREE_SEARCH:
		return btrfs_ioctl_tree_search(file, argp);
G
Gerhard Heift 已提交
5163 5164
	case BTRFS_IOC_TREE_SEARCH_V2:
		return btrfs_ioctl_tree_search_v2(file, argp);
5165 5166
	case BTRFS_IOC_INO_LOOKUP:
		return btrfs_ioctl_ino_lookup(file, argp);
5167 5168 5169
	case BTRFS_IOC_INO_PATHS:
		return btrfs_ioctl_ino_to_path(root, argp);
	case BTRFS_IOC_LOGICAL_INO:
5170 5171 5172
		return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
	case BTRFS_IOC_LOGICAL_INO_V2:
		return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
J
Josef Bacik 已提交
5173
	case BTRFS_IOC_SPACE_INFO:
5174
		return btrfs_ioctl_space_info(fs_info, argp);
5175 5176 5177
	case BTRFS_IOC_SYNC: {
		int ret;

5178
		ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
5179 5180
		if (ret)
			return ret;
5181
		ret = btrfs_sync_fs(inode->i_sb, 1);
5182 5183
		/*
		 * The transaction thread may want to do more work,
5184
		 * namely it pokes the cleaner kthread that will start
5185 5186
		 * processing uncleaned subvols.
		 */
5187
		wake_up_process(fs_info->transaction_kthread);
5188 5189
		return ret;
	}
5190
	case BTRFS_IOC_START_SYNC:
5191
		return btrfs_ioctl_start_sync(root, argp);
5192
	case BTRFS_IOC_WAIT_SYNC:
5193
		return btrfs_ioctl_wait_sync(fs_info, argp);
J
Jan Schmidt 已提交
5194
	case BTRFS_IOC_SCRUB:
M
Miao Xie 已提交
5195
		return btrfs_ioctl_scrub(file, argp);
J
Jan Schmidt 已提交
5196
	case BTRFS_IOC_SCRUB_CANCEL:
5197
		return btrfs_ioctl_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
5198
	case BTRFS_IOC_SCRUB_PROGRESS:
5199
		return btrfs_ioctl_scrub_progress(fs_info, argp);
5200
	case BTRFS_IOC_BALANCE_V2:
5201
		return btrfs_ioctl_balance(file, argp);
5202
	case BTRFS_IOC_BALANCE_CTL:
5203
		return btrfs_ioctl_balance_ctl(fs_info, arg);
5204
	case BTRFS_IOC_BALANCE_PROGRESS:
5205
		return btrfs_ioctl_balance_progress(fs_info, argp);
5206 5207
	case BTRFS_IOC_SET_RECEIVED_SUBVOL:
		return btrfs_ioctl_set_received_subvol(file, argp);
5208 5209 5210 5211
#ifdef CONFIG_64BIT
	case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
		return btrfs_ioctl_set_received_subvol_32(file, argp);
#endif
5212
	case BTRFS_IOC_SEND:
5213 5214 5215 5216 5217
		return _btrfs_ioctl_send(file, argp, false);
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
	case BTRFS_IOC_SEND_32:
		return _btrfs_ioctl_send(file, argp, true);
#endif
5218
	case BTRFS_IOC_GET_DEV_STATS:
5219
		return btrfs_ioctl_get_dev_stats(fs_info, argp);
A
Arne Jansen 已提交
5220
	case BTRFS_IOC_QUOTA_CTL:
5221
		return btrfs_ioctl_quota_ctl(file, argp);
A
Arne Jansen 已提交
5222
	case BTRFS_IOC_QGROUP_ASSIGN:
5223
		return btrfs_ioctl_qgroup_assign(file, argp);
A
Arne Jansen 已提交
5224
	case BTRFS_IOC_QGROUP_CREATE:
5225
		return btrfs_ioctl_qgroup_create(file, argp);
A
Arne Jansen 已提交
5226
	case BTRFS_IOC_QGROUP_LIMIT:
5227
		return btrfs_ioctl_qgroup_limit(file, argp);
J
Jan Schmidt 已提交
5228 5229 5230
	case BTRFS_IOC_QUOTA_RESCAN:
		return btrfs_ioctl_quota_rescan(file, argp);
	case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5231
		return btrfs_ioctl_quota_rescan_status(fs_info, argp);
5232
	case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5233
		return btrfs_ioctl_quota_rescan_wait(fs_info, argp);
5234
	case BTRFS_IOC_DEV_REPLACE:
5235
		return btrfs_ioctl_dev_replace(fs_info, argp);
5236
	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5237
		return btrfs_ioctl_get_supported_features(argp);
5238
	case BTRFS_IOC_GET_FEATURES:
5239
		return btrfs_ioctl_get_features(fs_info, argp);
5240 5241
	case BTRFS_IOC_SET_FEATURES:
		return btrfs_ioctl_set_features(file, argp);
5242 5243
	case BTRFS_IOC_GET_SUBVOL_INFO:
		return btrfs_ioctl_get_subvol_info(file, argp);
5244 5245
	case BTRFS_IOC_GET_SUBVOL_ROOTREF:
		return btrfs_ioctl_get_subvol_rootref(file, argp);
5246 5247
	case BTRFS_IOC_INO_LOOKUP_USER:
		return btrfs_ioctl_ino_lookup_user(file, argp);
B
Boris Burkov 已提交
5248 5249 5250 5251
	case FS_IOC_ENABLE_VERITY:
		return fsverity_ioctl_enable(file, (const void __user *)argp);
	case FS_IOC_MEASURE_VERITY:
		return fsverity_ioctl_measure(file, argp);
C
Christoph Hellwig 已提交
5252 5253 5254 5255
	}

	return -ENOTTY;
}
5256 5257 5258 5259

#ifdef CONFIG_COMPAT
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
5260 5261 5262 5263
	/*
	 * These all access 32-bit values anyway so no further
	 * handling is necessary.
	 */
5264 5265 5266 5267 5268 5269 5270 5271 5272
	switch (cmd) {
	case FS_IOC32_GETVERSION:
		cmd = FS_IOC_GETVERSION;
		break;
	}

	return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif