ioctl.c 146.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Christoph Hellwig 已提交
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/file.h>
#include <linux/fs.h>
10
#include <linux/fsnotify.h>
C
Christoph Hellwig 已提交
11 12 13 14 15
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
16 17
#include <linux/mount.h>
#include <linux/namei.h>
C
Christoph Hellwig 已提交
18 19
#include <linux/writeback.h>
#include <linux/compat.h>
20
#include <linux/security.h>
C
Christoph Hellwig 已提交
21
#include <linux/xattr.h>
22
#include <linux/mm.h>
23
#include <linux/slab.h>
24
#include <linux/blkdev.h>
25
#include <linux/uuid.h>
26
#include <linux/btrfs.h>
M
Mark Fasheh 已提交
27
#include <linux/uaccess.h>
28
#include <linux/iversion.h>
C
Christoph Hellwig 已提交
29 30 31 32 33 34
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "print-tree.h"
#include "volumes.h"
35
#include "locking.h"
36
#include "inode-map.h"
37
#include "backref.h"
38
#include "rcu-string.h"
39
#include "send.h"
40
#include "dev-replace.h"
41
#include "props.h"
42
#include "sysfs.h"
J
Josef Bacik 已提交
43
#include "qgroup.h"
44
#include "tree-log.h"
45
#include "compression.h"
C
Christoph Hellwig 已提交
46

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#ifdef CONFIG_64BIT
/* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
 * structures are incorrect, as the timespec structure from userspace
 * is 4 bytes too small. We define these alternatives here to teach
 * the kernel about the 32-bit struct packing.
 */
struct btrfs_ioctl_timespec_32 {
	__u64 sec;
	__u32 nsec;
} __attribute__ ((__packed__));

struct btrfs_ioctl_received_subvol_args_32 {
	char	uuid[BTRFS_UUID_SIZE];	/* in */
	__u64	stransid;		/* in */
	__u64	rtransid;		/* out */
	struct btrfs_ioctl_timespec_32 stime; /* in */
	struct btrfs_ioctl_timespec_32 rtime; /* out */
	__u64	flags;			/* in */
	__u64	reserved[16];		/* in */
} __attribute__ ((__packed__));

#define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
				struct btrfs_ioctl_received_subvol_args_32)
#endif

72 73 74 75 76 77 78 79 80 81 82 83 84
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
struct btrfs_ioctl_send_args_32 {
	__s64 send_fd;			/* in */
	__u64 clone_sources_count;	/* in */
	compat_uptr_t clone_sources;	/* in */
	__u64 parent_root;		/* in */
	__u64 flags;			/* in */
	__u64 reserved[4];		/* in */
} __attribute__ ((__packed__));

#define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
			       struct btrfs_ioctl_send_args_32)
#endif
85

M
Mark Fasheh 已提交
86
static int btrfs_clone(struct inode *src, struct inode *inode,
87 88
		       u64 off, u64 olen, u64 olen_aligned, u64 destoff,
		       int no_time_update);
M
Mark Fasheh 已提交
89

90
/* Mask out flags that are inappropriate for the given type of inode. */
91 92
static unsigned int btrfs_mask_fsflags_for_type(struct inode *inode,
		unsigned int flags)
93
{
94
	if (S_ISDIR(inode->i_mode))
95
		return flags;
96
	else if (S_ISREG(inode->i_mode))
97 98 99 100 101 102
		return flags & ~FS_DIRSYNC_FL;
	else
		return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
}

/*
103 104
 * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS
 * ioctl.
105
 */
106
static unsigned int btrfs_inode_flags_to_fsflags(unsigned int flags)
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
{
	unsigned int iflags = 0;

	if (flags & BTRFS_INODE_SYNC)
		iflags |= FS_SYNC_FL;
	if (flags & BTRFS_INODE_IMMUTABLE)
		iflags |= FS_IMMUTABLE_FL;
	if (flags & BTRFS_INODE_APPEND)
		iflags |= FS_APPEND_FL;
	if (flags & BTRFS_INODE_NODUMP)
		iflags |= FS_NODUMP_FL;
	if (flags & BTRFS_INODE_NOATIME)
		iflags |= FS_NOATIME_FL;
	if (flags & BTRFS_INODE_DIRSYNC)
		iflags |= FS_DIRSYNC_FL;
L
Li Zefan 已提交
122 123 124
	if (flags & BTRFS_INODE_NODATACOW)
		iflags |= FS_NOCOW_FL;

125
	if (flags & BTRFS_INODE_NOCOMPRESS)
L
Li Zefan 已提交
126
		iflags |= FS_NOCOMP_FL;
127 128
	else if (flags & BTRFS_INODE_COMPRESS)
		iflags |= FS_COMPR_FL;
129 130 131 132 133 134 135

	return iflags;
}

/*
 * Update inode->i_flags based on the btrfs internal flags.
 */
136
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode)
137
{
138
	struct btrfs_inode *binode = BTRFS_I(inode);
139
	unsigned int new_fl = 0;
140

141
	if (binode->flags & BTRFS_INODE_SYNC)
142
		new_fl |= S_SYNC;
143
	if (binode->flags & BTRFS_INODE_IMMUTABLE)
144
		new_fl |= S_IMMUTABLE;
145
	if (binode->flags & BTRFS_INODE_APPEND)
146
		new_fl |= S_APPEND;
147
	if (binode->flags & BTRFS_INODE_NOATIME)
148
		new_fl |= S_NOATIME;
149
	if (binode->flags & BTRFS_INODE_DIRSYNC)
150 151 152 153 154
		new_fl |= S_DIRSYNC;

	set_mask_bits(&inode->i_flags,
		      S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
		      new_fl);
155 156 157 158
}

static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
{
159 160
	struct btrfs_inode *binode = BTRFS_I(file_inode(file));
	unsigned int flags = btrfs_inode_flags_to_fsflags(binode->flags);
161 162 163 164 165 166

	if (copy_to_user(arg, &flags, sizeof(flags)))
		return -EFAULT;
	return 0;
}

167 168
/* Check if @flags are a supported and valid set of FS_*_FL flags */
static int check_fsflags(unsigned int flags)
169 170 171 172
{
	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
		      FS_NOATIME_FL | FS_NODUMP_FL | \
		      FS_SYNC_FL | FS_DIRSYNC_FL | \
L
Li Zefan 已提交
173 174
		      FS_NOCOMP_FL | FS_COMPR_FL |
		      FS_NOCOW_FL))
175 176 177 178 179 180 181 182
		return -EOPNOTSUPP;

	if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
		return -EINVAL;

	return 0;
}

183 184
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
A
Al Viro 已提交
185
	struct inode *inode = file_inode(file);
186
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
187 188
	struct btrfs_inode *binode = BTRFS_I(inode);
	struct btrfs_root *root = binode->root;
189
	struct btrfs_trans_handle *trans;
190
	unsigned int fsflags, old_fsflags;
191
	int ret;
192 193
	u64 old_flags;
	unsigned int old_i_flags;
194
	umode_t mode;
195

196 197 198
	if (!inode_owner_or_capable(inode))
		return -EPERM;

L
Li Zefan 已提交
199 200 201
	if (btrfs_root_readonly(root))
		return -EROFS;

202
	if (copy_from_user(&fsflags, arg, sizeof(fsflags)))
203 204
		return -EFAULT;

205
	ret = check_fsflags(fsflags);
206 207
	if (ret)
		return ret;
C
Christoph Hellwig 已提交
208

209 210 211 212
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

A
Al Viro 已提交
213
	inode_lock(inode);
214

215 216
	old_flags = binode->flags;
	old_i_flags = inode->i_flags;
217
	mode = inode->i_mode;
218

219 220 221
	fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
	old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
	if ((fsflags ^ old_fsflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
222 223 224 225 226 227
		if (!capable(CAP_LINUX_IMMUTABLE)) {
			ret = -EPERM;
			goto out_unlock;
		}
	}

228 229
	if (fsflags & FS_SYNC_FL)
		binode->flags |= BTRFS_INODE_SYNC;
230
	else
231 232 233
		binode->flags &= ~BTRFS_INODE_SYNC;
	if (fsflags & FS_IMMUTABLE_FL)
		binode->flags |= BTRFS_INODE_IMMUTABLE;
234
	else
235 236 237
		binode->flags &= ~BTRFS_INODE_IMMUTABLE;
	if (fsflags & FS_APPEND_FL)
		binode->flags |= BTRFS_INODE_APPEND;
238
	else
239 240 241
		binode->flags &= ~BTRFS_INODE_APPEND;
	if (fsflags & FS_NODUMP_FL)
		binode->flags |= BTRFS_INODE_NODUMP;
242
	else
243 244 245
		binode->flags &= ~BTRFS_INODE_NODUMP;
	if (fsflags & FS_NOATIME_FL)
		binode->flags |= BTRFS_INODE_NOATIME;
246
	else
247 248 249
		binode->flags &= ~BTRFS_INODE_NOATIME;
	if (fsflags & FS_DIRSYNC_FL)
		binode->flags |= BTRFS_INODE_DIRSYNC;
250
	else
251 252
		binode->flags &= ~BTRFS_INODE_DIRSYNC;
	if (fsflags & FS_NOCOW_FL) {
253 254 255 256 257 258 259
		if (S_ISREG(mode)) {
			/*
			 * It's safe to turn csums off here, no extents exist.
			 * Otherwise we want the flag to reflect the real COW
			 * status of the file and will not set it.
			 */
			if (inode->i_size == 0)
260 261
				binode->flags |= BTRFS_INODE_NODATACOW
					      | BTRFS_INODE_NODATASUM;
262
		} else {
263
			binode->flags |= BTRFS_INODE_NODATACOW;
264 265 266
		}
	} else {
		/*
267
		 * Revert back under same assumptions as above
268 269 270
		 */
		if (S_ISREG(mode)) {
			if (inode->i_size == 0)
271
				binode->flags &= ~(BTRFS_INODE_NODATACOW
272 273
				             | BTRFS_INODE_NODATASUM);
		} else {
274
			binode->flags &= ~BTRFS_INODE_NODATACOW;
275 276
		}
	}
277

278 279 280 281 282
	/*
	 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
	 * flag may be changed automatically if compression code won't make
	 * things smaller.
	 */
283 284 285
	if (fsflags & FS_NOCOMP_FL) {
		binode->flags &= ~BTRFS_INODE_COMPRESS;
		binode->flags |= BTRFS_INODE_NOCOMPRESS;
286 287 288 289

		ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
		if (ret && ret != -ENODATA)
			goto out_drop;
290
	} else if (fsflags & FS_COMPR_FL) {
291 292
		const char *comp;

293 294
		binode->flags |= BTRFS_INODE_COMPRESS;
		binode->flags &= ~BTRFS_INODE_NOCOMPRESS;
295

296 297 298 299
		comp = btrfs_compress_type2str(fs_info->compress_type);
		if (!comp || comp[0] == 0)
			comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);

300 301 302 303 304
		ret = btrfs_set_prop(inode, "btrfs.compression",
				     comp, strlen(comp), 0);
		if (ret)
			goto out_drop;

L
Li Zefan 已提交
305
	} else {
306 307 308
		ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
		if (ret && ret != -ENODATA)
			goto out_drop;
309
		binode->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
310
	}
311

312
	trans = btrfs_start_transaction(root, 1);
313 314 315 316
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_drop;
	}
317

318
	btrfs_sync_inode_flags_to_i_flags(inode);
319
	inode_inc_iversion(inode);
320
	inode->i_ctime = current_time(inode);
321 322
	ret = btrfs_update_inode(trans, root, inode);

323
	btrfs_end_transaction(trans);
324 325
 out_drop:
	if (ret) {
326 327
		binode->flags = old_flags;
		inode->i_flags = old_i_flags;
328
	}
329 330

 out_unlock:
A
Al Viro 已提交
331
	inode_unlock(inode);
332
	mnt_drop_write_file(file);
333
	return ret;
334 335
}

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
/*
 * Translate btrfs internal inode flags to xflags as expected by the
 * FS_IOC_FSGETXATT ioctl. Filter only the supported ones, unknown flags are
 * silently dropped.
 */
static unsigned int btrfs_inode_flags_to_xflags(unsigned int flags)
{
	unsigned int xflags = 0;

	if (flags & BTRFS_INODE_APPEND)
		xflags |= FS_XFLAG_APPEND;
	if (flags & BTRFS_INODE_IMMUTABLE)
		xflags |= FS_XFLAG_IMMUTABLE;
	if (flags & BTRFS_INODE_NOATIME)
		xflags |= FS_XFLAG_NOATIME;
	if (flags & BTRFS_INODE_NODUMP)
		xflags |= FS_XFLAG_NODUMP;
	if (flags & BTRFS_INODE_SYNC)
		xflags |= FS_XFLAG_SYNC;

	return xflags;
}

/* Check if @flags are a supported and valid set of FS_XFLAGS_* flags */
static int check_xflags(unsigned int flags)
{
	if (flags & ~(FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE | FS_XFLAG_NOATIME |
		      FS_XFLAG_NODUMP | FS_XFLAG_SYNC))
		return -EOPNOTSUPP;
	return 0;
}

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
/*
 * Set the xflags from the internal inode flags. The remaining items of fsxattr
 * are zeroed.
 */
static int btrfs_ioctl_fsgetxattr(struct file *file, void __user *arg)
{
	struct btrfs_inode *binode = BTRFS_I(file_inode(file));
	struct fsxattr fa;

	memset(&fa, 0, sizeof(fa));
	fa.fsx_xflags = btrfs_inode_flags_to_xflags(binode->flags);

	if (copy_to_user(arg, &fa, sizeof(fa)))
		return -EFAULT;

	return 0;
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
{
	struct inode *inode = file_inode(file);
	struct btrfs_inode *binode = BTRFS_I(inode);
	struct btrfs_root *root = binode->root;
	struct btrfs_trans_handle *trans;
	struct fsxattr fa;
	unsigned old_flags;
	unsigned old_i_flags;
	int ret = 0;

	if (!inode_owner_or_capable(inode))
		return -EPERM;

	if (btrfs_root_readonly(root))
		return -EROFS;

	memset(&fa, 0, sizeof(fa));
	if (copy_from_user(&fa, arg, sizeof(fa)))
		return -EFAULT;

	ret = check_xflags(fa.fsx_xflags);
	if (ret)
		return ret;

	if (fa.fsx_extsize != 0 || fa.fsx_projid != 0 || fa.fsx_cowextsize != 0)
		return -EOPNOTSUPP;

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	inode_lock(inode);

	old_flags = binode->flags;
	old_i_flags = inode->i_flags;

	/* We need the capabilities to change append-only or immutable inode */
	if (((old_flags & (BTRFS_INODE_APPEND | BTRFS_INODE_IMMUTABLE)) ||
	     (fa.fsx_xflags & (FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE))) &&
	    !capable(CAP_LINUX_IMMUTABLE)) {
		ret = -EPERM;
		goto out_unlock;
	}

	if (fa.fsx_xflags & FS_XFLAG_SYNC)
		binode->flags |= BTRFS_INODE_SYNC;
	else
		binode->flags &= ~BTRFS_INODE_SYNC;
	if (fa.fsx_xflags & FS_XFLAG_IMMUTABLE)
		binode->flags |= BTRFS_INODE_IMMUTABLE;
	else
		binode->flags &= ~BTRFS_INODE_IMMUTABLE;
	if (fa.fsx_xflags & FS_XFLAG_APPEND)
		binode->flags |= BTRFS_INODE_APPEND;
	else
		binode->flags &= ~BTRFS_INODE_APPEND;
	if (fa.fsx_xflags & FS_XFLAG_NODUMP)
		binode->flags |= BTRFS_INODE_NODUMP;
	else
		binode->flags &= ~BTRFS_INODE_NODUMP;
	if (fa.fsx_xflags & FS_XFLAG_NOATIME)
		binode->flags |= BTRFS_INODE_NOATIME;
	else
		binode->flags &= ~BTRFS_INODE_NOATIME;

	/* 1 item for the inode */
	trans = btrfs_start_transaction(root, 1);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_unlock;
	}

	btrfs_sync_inode_flags_to_i_flags(inode);
	inode_inc_iversion(inode);
	inode->i_ctime = current_time(inode);
	ret = btrfs_update_inode(trans, root, inode);

	btrfs_end_transaction(trans);

out_unlock:
	if (ret) {
		binode->flags = old_flags;
		inode->i_flags = old_i_flags;
	}

	inode_unlock(inode);
	mnt_drop_write_file(file);

	return ret;
}

478 479
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
{
A
Al Viro 已提交
480
	struct inode *inode = file_inode(file);
481 482 483

	return put_user(inode->i_generation, arg);
}
C
Christoph Hellwig 已提交
484

485 486
static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
{
487 488
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
489 490 491 492 493 494 495 496 497 498
	struct btrfs_device *device;
	struct request_queue *q;
	struct fstrim_range range;
	u64 minlen = ULLONG_MAX;
	u64 num_devices = 0;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

499 500 501
	rcu_read_lock();
	list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
				dev_list) {
502 503 504 505 506
		if (!device->bdev)
			continue;
		q = bdev_get_queue(device->bdev);
		if (blk_queue_discard(q)) {
			num_devices++;
507
			minlen = min_t(u64, q->limits.discard_granularity,
508 509 510
				     minlen);
		}
	}
511
	rcu_read_unlock();
512

513 514 515 516
	if (!num_devices)
		return -EOPNOTSUPP;
	if (copy_from_user(&range, arg, sizeof(range)))
		return -EFAULT;
517 518 519 520 521 522 523

	/*
	 * NOTE: Don't truncate the range using super->total_bytes.  Bytenr of
	 * block group is in the logical address space, which can be any
	 * sectorsize aligned bytenr in  the range [0, U64_MAX].
	 */
	if (range.len < fs_info->sb->s_blocksize)
524
		return -EINVAL;
525 526

	range.minlen = max(range.minlen, minlen);
527
	ret = btrfs_trim_fs(fs_info, &range);
528 529 530 531 532 533 534 535 536
	if (ret < 0)
		return ret;

	if (copy_to_user(arg, &range, sizeof(range)))
		return -EFAULT;

	return 0;
}

537 538
int btrfs_is_empty_uuid(u8 *uuid)
{
C
Chris Mason 已提交
539 540 541 542 543 544 545
	int i;

	for (i = 0; i < BTRFS_UUID_SIZE; i++) {
		if (uuid[i])
			return 0;
	}
	return 1;
546 547
}

548
static noinline int create_subvol(struct inode *dir,
549
				  struct dentry *dentry,
550
				  const char *name, int namelen,
A
Arne Jansen 已提交
551
				  u64 *async_transid,
552
				  struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
553
{
554
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
C
Christoph Hellwig 已提交
555 556
	struct btrfs_trans_handle *trans;
	struct btrfs_key key;
557
	struct btrfs_root_item *root_item;
C
Christoph Hellwig 已提交
558 559
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
560
	struct btrfs_root *root = BTRFS_I(dir)->root;
561
	struct btrfs_root *new_root;
562
	struct btrfs_block_rsv block_rsv;
563
	struct timespec64 cur_time = current_time(dir);
564
	struct inode *inode;
C
Christoph Hellwig 已提交
565 566 567 568
	int ret;
	int err;
	u64 objectid;
	u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
569
	u64 index = 0;
570
	uuid_le new_uuid;
C
Christoph Hellwig 已提交
571

572 573 574 575
	root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
	if (!root_item)
		return -ENOMEM;

576
	ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
577
	if (ret)
578
		goto fail_free;
579

580 581
	/*
	 * Don't create subvolume whose level is not zero. Or qgroup will be
582
	 * screwed up since it assumes subvolume qgroup's level to be 0.
583
	 */
584 585 586 587
	if (btrfs_qgroup_level(objectid)) {
		ret = -ENOSPC;
		goto fail_free;
	}
588

589
	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
J
Josef Bacik 已提交
590
	/*
591 592
	 * The same as the snapshot creation, please see the comment
	 * of create_snapshot().
J
Josef Bacik 已提交
593
	 */
594
	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false);
595
	if (ret)
596
		goto fail_free;
597 598 599 600

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
601
		btrfs_subvolume_release_metadata(fs_info, &block_rsv);
602
		goto fail_free;
603 604 605
	}
	trans->block_rsv = &block_rsv;
	trans->bytes_reserved = block_rsv.size;
C
Christoph Hellwig 已提交
606

607
	ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
A
Arne Jansen 已提交
608 609 610
	if (ret)
		goto fail;

611
	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
612 613 614 615
	if (IS_ERR(leaf)) {
		ret = PTR_ERR(leaf);
		goto fail;
	}
C
Christoph Hellwig 已提交
616 617 618

	btrfs_mark_buffer_dirty(leaf);

619
	inode_item = &root_item->inode;
620 621 622
	btrfs_set_stack_inode_generation(inode_item, 1);
	btrfs_set_stack_inode_size(inode_item, 3);
	btrfs_set_stack_inode_nlink(inode_item, 1);
623
	btrfs_set_stack_inode_nbytes(inode_item,
624
				     fs_info->nodesize);
625
	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
C
Christoph Hellwig 已提交
626

627 628
	btrfs_set_root_flags(root_item, 0);
	btrfs_set_root_limit(root_item, 0);
629
	btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
630

631 632 633 634 635 636
	btrfs_set_root_bytenr(root_item, leaf->start);
	btrfs_set_root_generation(root_item, trans->transid);
	btrfs_set_root_level(root_item, 0);
	btrfs_set_root_refs(root_item, 1);
	btrfs_set_root_used(root_item, leaf->len);
	btrfs_set_root_last_snapshot(root_item, 0);
C
Christoph Hellwig 已提交
637

638 639
	btrfs_set_root_generation_v2(root_item,
			btrfs_root_generation(root_item));
640
	uuid_le_gen(&new_uuid);
641 642 643 644 645 646
	memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
	btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
	btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
	root_item->ctime = root_item->otime;
	btrfs_set_root_ctransid(root_item, trans->transid);
	btrfs_set_root_otransid(root_item, trans->transid);
C
Christoph Hellwig 已提交
647

648
	btrfs_tree_unlock(leaf);
C
Christoph Hellwig 已提交
649 650 651
	free_extent_buffer(leaf);
	leaf = NULL;

652
	btrfs_set_root_dirid(root_item, new_dirid);
C
Christoph Hellwig 已提交
653 654

	key.objectid = objectid;
655
	key.offset = 0;
656
	key.type = BTRFS_ROOT_ITEM_KEY;
657
	ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
658
				root_item);
C
Christoph Hellwig 已提交
659 660 661
	if (ret)
		goto fail;

662
	key.offset = (u64)-1;
663
	new_root = btrfs_read_fs_root_no_name(fs_info, &key);
664 665
	if (IS_ERR(new_root)) {
		ret = PTR_ERR(new_root);
666
		btrfs_abort_transaction(trans, ret);
667 668
		goto fail;
	}
669 670 671

	btrfs_record_root_in_trans(trans, new_root);

672
	ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
673 674
	if (ret) {
		/* We potentially lose an unused inode item here */
675
		btrfs_abort_transaction(trans, ret);
676 677 678
		goto fail;
	}

679 680 681 682
	mutex_lock(&new_root->objectid_mutex);
	new_root->highest_objectid = new_dirid;
	mutex_unlock(&new_root->objectid_mutex);

C
Christoph Hellwig 已提交
683 684 685
	/*
	 * insert the directory item
	 */
686
	ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
687
	if (ret) {
688
		btrfs_abort_transaction(trans, ret);
689 690
		goto fail;
	}
691

692
	ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
693
				    BTRFS_FT_DIR, index);
694
	if (ret) {
695
		btrfs_abort_transaction(trans, ret);
C
Christoph Hellwig 已提交
696
		goto fail;
697
	}
698

699
	btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
700 701 702
	ret = btrfs_update_inode(trans, root, dir);
	BUG_ON(ret);

703
	ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
704
				 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
705
	BUG_ON(ret);
C
Christoph Hellwig 已提交
706

707
	ret = btrfs_uuid_tree_add(trans, root_item->uuid,
708
				  BTRFS_UUID_KEY_SUBVOL, objectid);
709
	if (ret)
710
		btrfs_abort_transaction(trans, ret);
711

C
Christoph Hellwig 已提交
712
fail:
713
	kfree(root_item);
714 715
	trans->block_rsv = NULL;
	trans->bytes_reserved = 0;
716
	btrfs_subvolume_release_metadata(fs_info, &block_rsv);
717

S
Sage Weil 已提交
718 719
	if (async_transid) {
		*async_transid = trans->transid;
720
		err = btrfs_commit_transaction_async(trans, 1);
721
		if (err)
722
			err = btrfs_commit_transaction(trans);
S
Sage Weil 已提交
723
	} else {
724
		err = btrfs_commit_transaction(trans);
S
Sage Weil 已提交
725
	}
C
Christoph Hellwig 已提交
726 727
	if (err && !ret)
		ret = err;
728

729 730
	if (!ret) {
		inode = btrfs_lookup_dentry(dir, dentry);
731 732
		if (IS_ERR(inode))
			return PTR_ERR(inode);
733 734
		d_instantiate(dentry, inode);
	}
C
Christoph Hellwig 已提交
735
	return ret;
736 737 738 739

fail_free:
	kfree(root_item);
	return ret;
C
Christoph Hellwig 已提交
740 741
}

742
static int create_snapshot(struct btrfs_root *root, struct inode *dir,
743
			   struct dentry *dentry,
744 745
			   u64 *async_transid, bool readonly,
			   struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
746
{
747
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
748
	struct inode *inode;
C
Christoph Hellwig 已提交
749 750
	struct btrfs_pending_snapshot *pending_snapshot;
	struct btrfs_trans_handle *trans;
751
	int ret;
752
	bool snapshot_force_cow = false;
C
Christoph Hellwig 已提交
753

754
	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
C
Christoph Hellwig 已提交
755 756
		return -EINVAL;

757
	pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
758 759 760
	if (!pending_snapshot)
		return -ENOMEM;

761
	pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
762
			GFP_KERNEL);
763 764
	pending_snapshot->path = btrfs_alloc_path();
	if (!pending_snapshot->root_item || !pending_snapshot->path) {
765 766 767 768
		ret = -ENOMEM;
		goto free_pending;
	}

769 770 771 772 773
	/*
	 * Force new buffered writes to reserve space even when NOCOW is
	 * possible. This is to avoid later writeback (running dealloc) to
	 * fallback to COW mode and unexpectedly fail with ENOSPC.
	 */
774
	atomic_inc(&root->will_be_snapshotted);
775
	smp_mb__after_atomic();
776 777 778
	/* wait for no snapshot writes */
	wait_event(root->subv_writers->wait,
		   percpu_counter_sum(&root->subv_writers->counter) == 0);
779

780
	ret = btrfs_start_delalloc_inodes(root);
781
	if (ret)
782
		goto dec_and_free;
783

784 785 786 787 788 789 790 791
	/*
	 * All previous writes have started writeback in NOCOW mode, so now
	 * we force future writes to fallback to COW mode during snapshot
	 * creation.
	 */
	atomic_inc(&root->snapshot_force_cow);
	snapshot_force_cow = true;

792
	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
793

794 795
	btrfs_init_block_rsv(&pending_snapshot->block_rsv,
			     BTRFS_BLOCK_RSV_TEMP);
796 797 798 799 800 801
	/*
	 * 1 - parent dir inode
	 * 2 - dir entries
	 * 1 - root item
	 * 2 - root ref/backref
	 * 1 - root of snapshot
802
	 * 1 - UUID item
803 804
	 */
	ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
805
					&pending_snapshot->block_rsv, 8,
806
					false);
807
	if (ret)
808
		goto dec_and_free;
809

810
	pending_snapshot->dentry = dentry;
C
Christoph Hellwig 已提交
811
	pending_snapshot->root = root;
L
Li Zefan 已提交
812
	pending_snapshot->readonly = readonly;
813
	pending_snapshot->dir = dir;
814
	pending_snapshot->inherit = inherit;
815

816
	trans = btrfs_start_transaction(root, 0);
817 818 819 820 821
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto fail;
	}

822
	spin_lock(&fs_info->trans_lock);
C
Christoph Hellwig 已提交
823 824
	list_add(&pending_snapshot->list,
		 &trans->transaction->pending_snapshots);
825
	spin_unlock(&fs_info->trans_lock);
S
Sage Weil 已提交
826 827
	if (async_transid) {
		*async_transid = trans->transid;
828
		ret = btrfs_commit_transaction_async(trans, 1);
829
		if (ret)
830
			ret = btrfs_commit_transaction(trans);
S
Sage Weil 已提交
831
	} else {
832
		ret = btrfs_commit_transaction(trans);
S
Sage Weil 已提交
833
	}
834
	if (ret)
835
		goto fail;
836 837 838 839 840

	ret = pending_snapshot->error;
	if (ret)
		goto fail;

841 842 843 844
	ret = btrfs_orphan_cleanup(pending_snapshot->snap);
	if (ret)
		goto fail;

845
	inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
846 847 848 849
	if (IS_ERR(inode)) {
		ret = PTR_ERR(inode);
		goto fail;
	}
850

851 852 853
	d_instantiate(dentry, inode);
	ret = 0;
fail:
854
	btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
855
dec_and_free:
856 857
	if (snapshot_force_cow)
		atomic_dec(&root->snapshot_force_cow);
858
	if (atomic_dec_and_test(&root->will_be_snapshotted))
859
		wake_up_var(&root->will_be_snapshotted);
860 861
free_pending:
	kfree(pending_snapshot->root_item);
862
	btrfs_free_path(pending_snapshot->path);
863 864
	kfree(pending_snapshot);

C
Christoph Hellwig 已提交
865 866 867
	return ret;
}

868 869 870 871 872 873 874 875 876 877 878
/*  copy of may_delete in fs/namei.c()
 *	Check whether we can remove a link victim from directory dir, check
 *  whether the type of victim is right.
 *  1. We can't do it if dir is read-only (done in permission())
 *  2. We should have write and exec permissions on dir
 *  3. We can't remove anything from append-only dir
 *  4. We can't do anything with immutable dir (done in permission())
 *  5. If the sticky bit on dir is set we should either
 *	a. be owner of dir, or
 *	b. be owner of victim, or
 *	c. have CAP_FOWNER capability
879
 *  6. If the victim is append-only or immutable we can't do anything with
880 881 882 883 884 885 886 887
 *     links pointing to it.
 *  7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
 *  8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
 *  9. We can't remove a root or mountpoint.
 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
 *     nfs_async_unlink().
 */

888
static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
889 890 891
{
	int error;

892
	if (d_really_is_negative(victim))
893 894
		return -ENOENT;

895
	BUG_ON(d_inode(victim->d_parent) != dir);
896
	audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
897 898 899 900 901 902

	error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
	if (error)
		return error;
	if (IS_APPEND(dir))
		return -EPERM;
903 904
	if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
	    IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
905 906
		return -EPERM;
	if (isdir) {
907
		if (!d_is_dir(victim))
908 909 910
			return -ENOTDIR;
		if (IS_ROOT(victim))
			return -EBUSY;
911
	} else if (d_is_dir(victim))
912 913 914 915 916 917 918 919
		return -EISDIR;
	if (IS_DEADDIR(dir))
		return -ENOENT;
	if (victim->d_flags & DCACHE_NFSFS_RENAMED)
		return -EBUSY;
	return 0;
}

920 921 922
/* copy of may_create in fs/namei.c() */
static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
{
923
	if (d_really_is_positive(child))
924 925 926 927 928 929 930 931 932 933 934
		return -EEXIST;
	if (IS_DEADDIR(dir))
		return -ENOENT;
	return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}

/*
 * Create a new subvolume below @parent.  This is largely modeled after
 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
 * inside this filesystem so it's quite a bit simpler.
 */
A
Al Viro 已提交
935
static noinline int btrfs_mksubvol(const struct path *parent,
936
				   const char *name, int namelen,
S
Sage Weil 已提交
937
				   struct btrfs_root *snap_src,
A
Arne Jansen 已提交
938
				   u64 *async_transid, bool readonly,
939
				   struct btrfs_qgroup_inherit *inherit)
940
{
941 942
	struct inode *dir = d_inode(parent->dentry);
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
943 944 945
	struct dentry *dentry;
	int error;

946 947 948
	error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (error == -EINTR)
		return error;
949 950 951 952 953 954

	dentry = lookup_one_len(name, parent->dentry, namelen);
	error = PTR_ERR(dentry);
	if (IS_ERR(dentry))
		goto out_unlock;

955
	error = btrfs_may_create(dir, dentry);
956
	if (error)
957
		goto out_dput;
958

C
Chris Mason 已提交
959 960 961 962 963 964 965 966 967 968
	/*
	 * even if this name doesn't exist, we may get hash collisions.
	 * check for them now when we can safely fail
	 */
	error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
					       dir->i_ino, name,
					       namelen);
	if (error)
		goto out_dput;

969
	down_read(&fs_info->subvol_sem);
970 971 972 973

	if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
		goto out_up_read;

974
	if (snap_src) {
975
		error = create_snapshot(snap_src, dir, dentry,
A
Arne Jansen 已提交
976
					async_transid, readonly, inherit);
977
	} else {
978 979
		error = create_subvol(dir, dentry, name, namelen,
				      async_transid, inherit);
980
	}
981 982 983
	if (!error)
		fsnotify_mkdir(dir, dentry);
out_up_read:
984
	up_read(&fs_info->subvol_sem);
985 986 987
out_dput:
	dput(dentry);
out_unlock:
A
Al Viro 已提交
988
	inode_unlock(dir);
989 990 991
	return error;
}

C
Chris Mason 已提交
992 993 994 995 996 997 998
/*
 * When we're defragging a range, we don't want to kick it off again
 * if it is really just waiting for delalloc to send it down.
 * If we find a nice big extent or delalloc range for the bytes in the
 * file you want to defrag, we return 0 to let you know to skip this
 * part of the file
 */
999
static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
C
Chris Mason 已提交
1000 1001 1002 1003 1004 1005 1006
{
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct extent_map *em = NULL;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	u64 end;

	read_lock(&em_tree->lock);
1007
	em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
C
Chris Mason 已提交
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
	read_unlock(&em_tree->lock);

	if (em) {
		end = extent_map_end(em);
		free_extent_map(em);
		if (end - offset > thresh)
			return 0;
	}
	/* if we already have a nice delalloc here, just stop */
	thresh /= 2;
	end = count_range_bits(io_tree, &offset, offset + thresh,
			       thresh, EXTENT_DELALLOC, 1);
	if (end >= thresh)
		return 0;
	return 1;
}

/*
 * helper function to walk through a file and find extents
 * newer than a specific transid, and smaller than thresh.
 *
 * This is used by the defragging code to find new and small
 * extents
 */
static int find_new_extents(struct btrfs_root *root,
			    struct inode *inode, u64 newer_than,
1034
			    u64 *off, u32 thresh)
C
Chris Mason 已提交
1035 1036 1037 1038 1039 1040 1041
{
	struct btrfs_path *path;
	struct btrfs_key min_key;
	struct extent_buffer *leaf;
	struct btrfs_file_extent_item *extent;
	int type;
	int ret;
1042
	u64 ino = btrfs_ino(BTRFS_I(inode));
C
Chris Mason 已提交
1043 1044 1045 1046 1047

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1048
	min_key.objectid = ino;
C
Chris Mason 已提交
1049 1050 1051
	min_key.type = BTRFS_EXTENT_DATA_KEY;
	min_key.offset = *off;

1052
	while (1) {
1053
		ret = btrfs_search_forward(root, &min_key, path, newer_than);
C
Chris Mason 已提交
1054 1055
		if (ret != 0)
			goto none;
1056
process_slot:
1057
		if (min_key.objectid != ino)
C
Chris Mason 已提交
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
			goto none;
		if (min_key.type != BTRFS_EXTENT_DATA_KEY)
			goto none;

		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_file_extent_item);

		type = btrfs_file_extent_type(leaf, extent);
		if (type == BTRFS_FILE_EXTENT_REG &&
		    btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
		    check_defrag_in_cache(inode, min_key.offset, thresh)) {
			*off = min_key.offset;
			btrfs_free_path(path);
			return 0;
		}

1075 1076 1077 1078 1079 1080
		path->slots[0]++;
		if (path->slots[0] < btrfs_header_nritems(leaf)) {
			btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
			goto process_slot;
		}

C
Chris Mason 已提交
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
		if (min_key.offset == (u64)-1)
			goto none;

		min_key.offset++;
		btrfs_release_path(path);
	}
none:
	btrfs_free_path(path);
	return -ENOENT;
}

L
Li Zefan 已提交
1092
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
1093 1094
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
L
Li Zefan 已提交
1095 1096
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct extent_map *em;
1097
	u64 len = PAGE_SIZE;
1098

L
Li Zefan 已提交
1099 1100 1101 1102
	/*
	 * hopefully we have this extent in the tree already, try without
	 * the full extent lock
	 */
1103
	read_lock(&em_tree->lock);
L
Li Zefan 已提交
1104
	em = lookup_extent_mapping(em_tree, start, len);
1105 1106
	read_unlock(&em_tree->lock);

L
Li Zefan 已提交
1107
	if (!em) {
1108 1109 1110
		struct extent_state *cached = NULL;
		u64 end = start + len - 1;

L
Li Zefan 已提交
1111
		/* get the big lock and read metadata off disk */
1112
		lock_extent_bits(io_tree, start, end, &cached);
1113
		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
1114
		unlock_extent_cached(io_tree, start, end, &cached);
L
Li Zefan 已提交
1115 1116 1117 1118 1119 1120 1121

		if (IS_ERR(em))
			return NULL;
	}

	return em;
}
1122

L
Li Zefan 已提交
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
{
	struct extent_map *next;
	bool ret = true;

	/* this is the last extent */
	if (em->start + em->len >= i_size_read(inode))
		return false;

	next = defrag_lookup_extent(inode, em->start + em->len);
1133 1134 1135
	if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
		ret = false;
	else if ((em->block_start + em->block_len == next->block_start) &&
1136
		 (em->block_len > SZ_128K && next->block_len > SZ_128K))
L
Li Zefan 已提交
1137 1138 1139
		ret = false;

	free_extent_map(next);
1140 1141 1142
	return ret;
}

1143
static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1144 1145
			       u64 *last_len, u64 *skip, u64 *defrag_end,
			       int compress)
1146
{
L
Li Zefan 已提交
1147
	struct extent_map *em;
1148
	int ret = 1;
L
Li Zefan 已提交
1149
	bool next_mergeable = true;
1150
	bool prev_mergeable = true;
1151 1152

	/*
1153
	 * make sure that once we start defragging an extent, we keep on
1154 1155 1156 1157 1158 1159 1160
	 * defragging it
	 */
	if (start < *defrag_end)
		return 1;

	*skip = 0;

L
Li Zefan 已提交
1161 1162 1163
	em = defrag_lookup_extent(inode, start);
	if (!em)
		return 0;
1164 1165

	/* this will cover holes, and inline extents */
1166
	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1167
		ret = 0;
1168 1169 1170
		goto out;
	}

1171 1172 1173
	if (!*defrag_end)
		prev_mergeable = false;

L
Li Zefan 已提交
1174
	next_mergeable = defrag_check_next_extent(inode, em);
1175
	/*
L
Li Zefan 已提交
1176 1177
	 * we hit a real extent, if it is big or the next extent is not a
	 * real extent, don't bother defragging it
1178
	 */
1179
	if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1180
	    (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1181
		ret = 0;
1182
out:
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
	/*
	 * last_len ends up being a counter of how many bytes we've defragged.
	 * every time we choose not to defrag an extent, we reset *last_len
	 * so that the next tiny extent will force a defrag.
	 *
	 * The end result of this is that tiny extents before a single big
	 * extent will force at least part of that big extent to be defragged.
	 */
	if (ret) {
		*defrag_end = extent_map_end(em);
	} else {
		*last_len = 0;
		*skip = extent_map_end(em);
		*defrag_end = 0;
	}

	free_extent_map(em);
	return ret;
}

C
Chris Mason 已提交
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
/*
 * it doesn't do much good to defrag one or two pages
 * at a time.  This pulls in a nice chunk of pages
 * to COW and defrag.
 *
 * It also makes sure the delalloc code has enough
 * dirty data to avoid making new small extents as part
 * of the defrag
 *
 * It's a good idea to start RA on this range
 * before calling this.
 */
static int cluster_pages_for_defrag(struct inode *inode,
				    struct page **pages,
				    unsigned long start_index,
1218
				    unsigned long num_pages)
C
Christoph Hellwig 已提交
1219
{
C
Chris Mason 已提交
1220 1221 1222 1223
	unsigned long file_end;
	u64 isize = i_size_read(inode);
	u64 page_start;
	u64 page_end;
1224
	u64 page_cnt;
C
Chris Mason 已提交
1225 1226 1227
	int ret;
	int i;
	int i_done;
1228
	struct btrfs_ordered_extent *ordered;
C
Chris Mason 已提交
1229
	struct extent_state *cached_state = NULL;
1230
	struct extent_io_tree *tree;
1231
	struct extent_changeset *data_reserved = NULL;
1232
	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
C
Chris Mason 已提交
1233

1234
	file_end = (isize - 1) >> PAGE_SHIFT;
1235 1236 1237 1238
	if (!isize || start_index > file_end)
		return 0;

	page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
C
Chris Mason 已提交
1239

1240
	ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
1241 1242
			start_index << PAGE_SHIFT,
			page_cnt << PAGE_SHIFT);
C
Chris Mason 已提交
1243 1244 1245
	if (ret)
		return ret;
	i_done = 0;
1246
	tree = &BTRFS_I(inode)->io_tree;
C
Chris Mason 已提交
1247 1248

	/* step one, lock all the pages */
1249
	for (i = 0; i < page_cnt; i++) {
C
Chris Mason 已提交
1250
		struct page *page;
1251
again:
1252
		page = find_or_create_page(inode->i_mapping,
1253
					   start_index + i, mask);
C
Chris Mason 已提交
1254 1255 1256
		if (!page)
			break;

1257
		page_start = page_offset(page);
1258
		page_end = page_start + PAGE_SIZE - 1;
1259
		while (1) {
1260
			lock_extent_bits(tree, page_start, page_end,
1261
					 &cached_state);
1262 1263
			ordered = btrfs_lookup_ordered_extent(inode,
							      page_start);
1264
			unlock_extent_cached(tree, page_start, page_end,
1265
					     &cached_state);
1266 1267 1268 1269 1270 1271 1272
			if (!ordered)
				break;

			unlock_page(page);
			btrfs_start_ordered_extent(inode, ordered, 1);
			btrfs_put_ordered_extent(ordered);
			lock_page(page);
1273 1274 1275 1276 1277 1278
			/*
			 * we unlocked the page above, so we need check if
			 * it was released or not.
			 */
			if (page->mapping != inode->i_mapping) {
				unlock_page(page);
1279
				put_page(page);
1280 1281
				goto again;
			}
1282 1283
		}

C
Chris Mason 已提交
1284 1285 1286 1287 1288
		if (!PageUptodate(page)) {
			btrfs_readpage(NULL, page);
			lock_page(page);
			if (!PageUptodate(page)) {
				unlock_page(page);
1289
				put_page(page);
C
Chris Mason 已提交
1290 1291 1292 1293
				ret = -EIO;
				break;
			}
		}
1294 1295 1296

		if (page->mapping != inode->i_mapping) {
			unlock_page(page);
1297
			put_page(page);
1298 1299 1300
			goto again;
		}

C
Chris Mason 已提交
1301 1302 1303 1304 1305 1306
		pages[i] = page;
		i_done++;
	}
	if (!i_done || ret)
		goto out;

1307
	if (!(inode->i_sb->s_flags & SB_ACTIVE))
C
Chris Mason 已提交
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
		goto out;

	/*
	 * so now we have a nice long stream of locked
	 * and up to date pages, lets wait on them
	 */
	for (i = 0; i < i_done; i++)
		wait_on_page_writeback(pages[i]);

	page_start = page_offset(pages[0]);
1318
	page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
C
Chris Mason 已提交
1319 1320

	lock_extent_bits(&BTRFS_I(inode)->io_tree,
1321
			 page_start, page_end - 1, &cached_state);
C
Chris Mason 已提交
1322 1323
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
			  page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1324
			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1325
			  &cached_state);
C
Chris Mason 已提交
1326

1327
	if (i_done != page_cnt) {
1328
		spin_lock(&BTRFS_I(inode)->lock);
1329
		btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1330
		spin_unlock(&BTRFS_I(inode)->lock);
1331
		btrfs_delalloc_release_space(inode, data_reserved,
1332
				start_index << PAGE_SHIFT,
1333
				(page_cnt - i_done) << PAGE_SHIFT, true);
C
Chris Mason 已提交
1334 1335 1336
	}


1337
	set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1338
			  &cached_state);
C
Chris Mason 已提交
1339 1340

	unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1341
			     page_start, page_end - 1, &cached_state);
C
Chris Mason 已提交
1342 1343 1344 1345 1346 1347 1348

	for (i = 0; i < i_done; i++) {
		clear_page_dirty_for_io(pages[i]);
		ClearPageChecked(pages[i]);
		set_page_extent_mapped(pages[i]);
		set_page_dirty(pages[i]);
		unlock_page(pages[i]);
1349
		put_page(pages[i]);
C
Chris Mason 已提交
1350
	}
1351 1352
	btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
				       false);
1353
	extent_changeset_free(data_reserved);
C
Chris Mason 已提交
1354 1355 1356 1357
	return i_done;
out:
	for (i = 0; i < i_done; i++) {
		unlock_page(pages[i]);
1358
		put_page(pages[i]);
C
Chris Mason 已提交
1359
	}
1360
	btrfs_delalloc_release_space(inode, data_reserved,
1361
			start_index << PAGE_SHIFT,
1362 1363 1364
			page_cnt << PAGE_SHIFT, true);
	btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
				       true);
1365
	extent_changeset_free(data_reserved);
C
Chris Mason 已提交
1366 1367 1368 1369 1370 1371 1372 1373
	return ret;

}

int btrfs_defrag_file(struct inode *inode, struct file *file,
		      struct btrfs_ioctl_defrag_range_args *range,
		      u64 newer_than, unsigned long max_to_defrag)
{
1374
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
1375 1376
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct file_ra_state *ra = NULL;
C
Christoph Hellwig 已提交
1377
	unsigned long last_index;
1378
	u64 isize = i_size_read(inode);
1379 1380 1381
	u64 last_len = 0;
	u64 skip = 0;
	u64 defrag_end = 0;
C
Chris Mason 已提交
1382
	u64 newer_off = range->start;
C
Christoph Hellwig 已提交
1383
	unsigned long i;
1384
	unsigned long ra_index = 0;
C
Christoph Hellwig 已提交
1385
	int ret;
C
Chris Mason 已提交
1386
	int defrag_count = 0;
1387
	int compress_type = BTRFS_COMPRESS_ZLIB;
1388
	u32 extent_thresh = range->extent_thresh;
1389
	unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1390
	unsigned long cluster = max_cluster;
1391
	u64 new_align = ~((u64)SZ_128K - 1);
C
Chris Mason 已提交
1392
	struct page **pages = NULL;
1393
	bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
C
Chris Mason 已提交
1394

1395 1396 1397 1398 1399
	if (isize == 0)
		return 0;

	if (range->start >= isize)
		return -EINVAL;
1400

1401
	if (do_compress) {
1402 1403 1404 1405 1406
		if (range->compress_type > BTRFS_COMPRESS_TYPES)
			return -EINVAL;
		if (range->compress_type)
			compress_type = range->compress_type;
	}
C
Christoph Hellwig 已提交
1407

1408
	if (extent_thresh == 0)
1409
		extent_thresh = SZ_256K;
1410

C
Chris Mason 已提交
1411
	/*
1412 1413 1414
	 * If we were not given a file, allocate a readahead context. As
	 * readahead is just an optimization, defrag will work without it so
	 * we don't error out.
C
Chris Mason 已提交
1415 1416
	 */
	if (!file) {
1417
		ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1418 1419
		if (ra)
			file_ra_state_init(ra, inode->i_mapping);
C
Chris Mason 已提交
1420 1421 1422 1423
	} else {
		ra = &file->f_ra;
	}

1424
	pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL);
C
Chris Mason 已提交
1425 1426 1427 1428 1429 1430
	if (!pages) {
		ret = -ENOMEM;
		goto out_ra;
	}

	/* find the last page to defrag */
C
Chris Mason 已提交
1431
	if (range->start + range->len > range->start) {
1432
		last_index = min_t(u64, isize - 1,
1433
			 range->start + range->len - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
1434
	} else {
1435
		last_index = (isize - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
1436 1437
	}

C
Chris Mason 已提交
1438 1439
	if (newer_than) {
		ret = find_new_extents(root, inode, newer_than,
1440
				       &newer_off, SZ_64K);
C
Chris Mason 已提交
1441 1442 1443 1444 1445 1446
		if (!ret) {
			range->start = newer_off;
			/*
			 * we always align our defrag to help keep
			 * the extents in the file evenly spaced
			 */
1447
			i = (newer_off & new_align) >> PAGE_SHIFT;
C
Chris Mason 已提交
1448 1449 1450
		} else
			goto out_ra;
	} else {
1451
		i = range->start >> PAGE_SHIFT;
C
Chris Mason 已提交
1452 1453
	}
	if (!max_to_defrag)
1454
		max_to_defrag = last_index - i + 1;
C
Chris Mason 已提交
1455

L
Li Zefan 已提交
1456 1457 1458 1459 1460 1461 1462
	/*
	 * make writeback starts from i, so the defrag range can be
	 * written sequentially.
	 */
	if (i < inode->i_mapping->writeback_index)
		inode->i_mapping->writeback_index = i;

1463
	while (i <= last_index && defrag_count < max_to_defrag &&
1464
	       (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
C
Chris Mason 已提交
1465 1466 1467 1468
		/*
		 * make sure we stop running if someone unmounts
		 * the FS
		 */
1469
		if (!(inode->i_sb->s_flags & SB_ACTIVE))
C
Chris Mason 已提交
1470 1471
			break;

1472 1473
		if (btrfs_defrag_cancelled(fs_info)) {
			btrfs_debug(fs_info, "defrag_file cancelled");
1474 1475 1476 1477
			ret = -EAGAIN;
			break;
		}

1478
		if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
L
Li Zefan 已提交
1479
					 extent_thresh, &last_len, &skip,
1480
					 &defrag_end, do_compress)){
1481 1482 1483 1484 1485
			unsigned long next;
			/*
			 * the should_defrag function tells us how much to skip
			 * bump our counter by the suggested amount
			 */
1486
			next = DIV_ROUND_UP(skip, PAGE_SIZE);
1487 1488 1489
			i = max(i + 1, next);
			continue;
		}
1490 1491

		if (!newer_than) {
1492 1493
			cluster = (PAGE_ALIGN(defrag_end) >>
				   PAGE_SHIFT) - i;
1494 1495 1496 1497 1498 1499 1500
			cluster = min(cluster, max_cluster);
		} else {
			cluster = max_cluster;
		}

		if (i + cluster > ra_index) {
			ra_index = max(i, ra_index);
1501
			if (ra)
1502 1503
				page_cache_sync_readahead(inode->i_mapping, ra,
						file, ra_index, cluster);
1504
			ra_index += cluster;
1505
		}
1506

A
Al Viro 已提交
1507
		inode_lock(inode);
1508
		if (do_compress)
1509
			BTRFS_I(inode)->defrag_compress = compress_type;
1510
		ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1511
		if (ret < 0) {
A
Al Viro 已提交
1512
			inode_unlock(inode);
C
Chris Mason 已提交
1513
			goto out_ra;
1514
		}
C
Chris Mason 已提交
1515 1516

		defrag_count += ret;
1517
		balance_dirty_pages_ratelimited(inode->i_mapping);
A
Al Viro 已提交
1518
		inode_unlock(inode);
C
Chris Mason 已提交
1519 1520 1521 1522 1523

		if (newer_than) {
			if (newer_off == (u64)-1)
				break;

1524 1525 1526
			if (ret > 0)
				i += ret;

C
Chris Mason 已提交
1527
			newer_off = max(newer_off + 1,
1528
					(u64)i << PAGE_SHIFT);
C
Chris Mason 已提交
1529

1530 1531
			ret = find_new_extents(root, inode, newer_than,
					       &newer_off, SZ_64K);
C
Chris Mason 已提交
1532 1533
			if (!ret) {
				range->start = newer_off;
1534
				i = (newer_off & new_align) >> PAGE_SHIFT;
C
Chris Mason 已提交
1535 1536
			} else {
				break;
C
Christoph Hellwig 已提交
1537
			}
C
Chris Mason 已提交
1538
		} else {
1539
			if (ret > 0) {
L
Li Zefan 已提交
1540
				i += ret;
1541
				last_len += ret << PAGE_SHIFT;
1542
			} else {
L
Li Zefan 已提交
1543
				i++;
1544 1545
				last_len = 0;
			}
C
Christoph Hellwig 已提交
1546 1547 1548
		}
	}

1549
	if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
C
Chris Mason 已提交
1550
		filemap_flush(inode->i_mapping);
1551 1552 1553 1554
		if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
			     &BTRFS_I(inode)->runtime_flags))
			filemap_flush(inode->i_mapping);
	}
C
Chris Mason 已提交
1555

1556
	if (range->compress_type == BTRFS_COMPRESS_LZO) {
1557
		btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
N
Nick Terrell 已提交
1558 1559
	} else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
		btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1560 1561
	}

1562
	ret = defrag_count;
1563

C
Chris Mason 已提交
1564
out_ra:
1565
	if (do_compress) {
A
Al Viro 已提交
1566
		inode_lock(inode);
1567
		BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
A
Al Viro 已提交
1568
		inode_unlock(inode);
1569
	}
C
Chris Mason 已提交
1570 1571 1572
	if (!file)
		kfree(ra);
	kfree(pages);
1573
	return ret;
C
Christoph Hellwig 已提交
1574 1575
}

1576
static noinline int btrfs_ioctl_resize(struct file *file,
1577
					void __user *arg)
C
Christoph Hellwig 已提交
1578
{
1579 1580
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
1581 1582 1583
	u64 new_size;
	u64 old_size;
	u64 devid = 1;
1584
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Christoph Hellwig 已提交
1585 1586 1587 1588
	struct btrfs_ioctl_vol_args *vol_args;
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device = NULL;
	char *sizestr;
1589
	char *retptr;
C
Christoph Hellwig 已提交
1590 1591 1592 1593
	char *devstr = NULL;
	int ret = 0;
	int mod = 0;

1594 1595 1596
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

1597 1598 1599 1600
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

1601
	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
1602
		mnt_drop_write_file(file);
1603
		return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1604 1605
	}

L
Li Zefan 已提交
1606
	vol_args = memdup_user(arg, sizeof(*vol_args));
1607 1608 1609 1610
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
		goto out;
	}
1611 1612

	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
C
Christoph Hellwig 已提交
1613 1614 1615 1616 1617 1618 1619

	sizestr = vol_args->name;
	devstr = strchr(sizestr, ':');
	if (devstr) {
		sizestr = devstr + 1;
		*devstr = '\0';
		devstr = vol_args->name;
1620 1621 1622
		ret = kstrtoull(devstr, 10, &devid);
		if (ret)
			goto out_free;
1623 1624 1625 1626
		if (!devid) {
			ret = -EINVAL;
			goto out_free;
		}
1627
		btrfs_info(fs_info, "resizing devid %llu", devid);
C
Christoph Hellwig 已提交
1628
	}
M
Miao Xie 已提交
1629

1630
	device = btrfs_find_device(fs_info, devid, NULL, NULL);
C
Christoph Hellwig 已提交
1631
	if (!device) {
1632 1633
		btrfs_info(fs_info, "resizer unable to find device %llu",
			   devid);
1634
		ret = -ENODEV;
1635
		goto out_free;
C
Christoph Hellwig 已提交
1636
	}
M
Miao Xie 已提交
1637

1638
	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1639
		btrfs_info(fs_info,
1640
			   "resizer unable to apply on readonly device %llu",
1641
		       devid);
1642
		ret = -EPERM;
L
Liu Bo 已提交
1643 1644 1645
		goto out_free;
	}

C
Christoph Hellwig 已提交
1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
	if (!strcmp(sizestr, "max"))
		new_size = device->bdev->bd_inode->i_size;
	else {
		if (sizestr[0] == '-') {
			mod = -1;
			sizestr++;
		} else if (sizestr[0] == '+') {
			mod = 1;
			sizestr++;
		}
1656 1657
		new_size = memparse(sizestr, &retptr);
		if (*retptr != '\0' || new_size == 0) {
C
Christoph Hellwig 已提交
1658
			ret = -EINVAL;
1659
			goto out_free;
C
Christoph Hellwig 已提交
1660 1661 1662
		}
	}

1663
	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1664
		ret = -EPERM;
1665 1666 1667
		goto out_free;
	}

1668
	old_size = btrfs_device_get_total_bytes(device);
C
Christoph Hellwig 已提交
1669 1670 1671 1672

	if (mod < 0) {
		if (new_size > old_size) {
			ret = -EINVAL;
1673
			goto out_free;
C
Christoph Hellwig 已提交
1674 1675 1676
		}
		new_size = old_size - new_size;
	} else if (mod > 0) {
1677
		if (new_size > ULLONG_MAX - old_size) {
1678
			ret = -ERANGE;
1679 1680
			goto out_free;
		}
C
Christoph Hellwig 已提交
1681 1682 1683
		new_size = old_size + new_size;
	}

1684
	if (new_size < SZ_256M) {
C
Christoph Hellwig 已提交
1685
		ret = -EINVAL;
1686
		goto out_free;
C
Christoph Hellwig 已提交
1687 1688 1689
	}
	if (new_size > device->bdev->bd_inode->i_size) {
		ret = -EFBIG;
1690
		goto out_free;
C
Christoph Hellwig 已提交
1691 1692
	}

1693
	new_size = round_down(new_size, fs_info->sectorsize);
C
Christoph Hellwig 已提交
1694

1695 1696
	btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
			  rcu_str_deref(device->name), new_size);
C
Christoph Hellwig 已提交
1697 1698

	if (new_size > old_size) {
1699
		trans = btrfs_start_transaction(root, 0);
1700 1701
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
1702
			goto out_free;
1703
		}
C
Christoph Hellwig 已提交
1704
		ret = btrfs_grow_device(trans, device, new_size);
1705
		btrfs_commit_transaction(trans);
1706
	} else if (new_size < old_size) {
C
Christoph Hellwig 已提交
1707
		ret = btrfs_shrink_device(device, new_size);
1708
	} /* equal, nothing need to do */
C
Christoph Hellwig 已提交
1709

1710
out_free:
C
Christoph Hellwig 已提交
1711
	kfree(vol_args);
1712
out:
1713
	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
1714
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
1715 1716 1717
	return ret;
}

S
Sage Weil 已提交
1718
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1719
				const char *name, unsigned long fd, int subvol,
A
Arne Jansen 已提交
1720
				u64 *transid, bool readonly,
1721
				struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
1722 1723
{
	int namelen;
1724
	int ret = 0;
C
Christoph Hellwig 已提交
1725

1726 1727 1728
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1729 1730 1731 1732
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;

S
Sage Weil 已提交
1733 1734
	namelen = strlen(name);
	if (strchr(name, '/')) {
C
Christoph Hellwig 已提交
1735
		ret = -EINVAL;
1736
		goto out_drop_write;
C
Christoph Hellwig 已提交
1737 1738
	}

1739 1740 1741
	if (name[0] == '.' &&
	   (namelen == 1 || (name[1] == '.' && namelen == 2))) {
		ret = -EEXIST;
1742
		goto out_drop_write;
1743 1744
	}

1745
	if (subvol) {
S
Sage Weil 已提交
1746
		ret = btrfs_mksubvol(&file->f_path, name, namelen,
A
Arne Jansen 已提交
1747
				     NULL, transid, readonly, inherit);
1748
	} else {
1749
		struct fd src = fdget(fd);
1750
		struct inode *src_inode;
1751
		if (!src.file) {
1752
			ret = -EINVAL;
1753
			goto out_drop_write;
1754 1755
		}

A
Al Viro 已提交
1756 1757
		src_inode = file_inode(src.file);
		if (src_inode->i_sb != file_inode(file)->i_sb) {
J
Josef Bacik 已提交
1758
			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1759
				   "Snapshot src from another FS");
1760
			ret = -EXDEV;
1761 1762 1763 1764 1765 1766
		} else if (!inode_owner_or_capable(src_inode)) {
			/*
			 * Subvolume creation is not restricted, but snapshots
			 * are limited to own subvolumes only
			 */
			ret = -EPERM;
1767 1768 1769 1770
		} else {
			ret = btrfs_mksubvol(&file->f_path, name, namelen,
					     BTRFS_I(src_inode)->root,
					     transid, readonly, inherit);
1771
		}
1772
		fdput(src);
1773
	}
1774 1775
out_drop_write:
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
1776
out:
S
Sage Weil 已提交
1777 1778 1779 1780
	return ret;
}

static noinline int btrfs_ioctl_snap_create(struct file *file,
1781
					    void __user *arg, int subvol)
S
Sage Weil 已提交
1782
{
1783
	struct btrfs_ioctl_vol_args *vol_args;
S
Sage Weil 已提交
1784 1785
	int ret;

1786 1787 1788
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1789 1790 1791 1792
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
S
Sage Weil 已提交
1793

1794
	ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
L
Li Zefan 已提交
1795
					      vol_args->fd, subvol,
A
Arne Jansen 已提交
1796
					      NULL, false, NULL);
1797

1798 1799 1800
	kfree(vol_args);
	return ret;
}
1801

1802 1803 1804 1805 1806 1807 1808
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
					       void __user *arg, int subvol)
{
	struct btrfs_ioctl_vol_args_v2 *vol_args;
	int ret;
	u64 transid = 0;
	u64 *ptr = NULL;
L
Li Zefan 已提交
1809
	bool readonly = false;
A
Arne Jansen 已提交
1810
	struct btrfs_qgroup_inherit *inherit = NULL;
1811

1812 1813 1814
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1815 1816 1817 1818
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1819

L
Li Zefan 已提交
1820
	if (vol_args->flags &
A
Arne Jansen 已提交
1821 1822
	    ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
	      BTRFS_SUBVOL_QGROUP_INHERIT)) {
L
Li Zefan 已提交
1823
		ret = -EOPNOTSUPP;
D
Dan Carpenter 已提交
1824
		goto free_args;
S
Sage Weil 已提交
1825
	}
1826 1827 1828

	if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
		ptr = &transid;
L
Li Zefan 已提交
1829 1830
	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
		readonly = true;
A
Arne Jansen 已提交
1831
	if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1832
		if (vol_args->size > PAGE_SIZE) {
A
Arne Jansen 已提交
1833
			ret = -EINVAL;
D
Dan Carpenter 已提交
1834
			goto free_args;
A
Arne Jansen 已提交
1835 1836 1837 1838
		}
		inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
		if (IS_ERR(inherit)) {
			ret = PTR_ERR(inherit);
D
Dan Carpenter 已提交
1839
			goto free_args;
A
Arne Jansen 已提交
1840 1841
		}
	}
1842 1843

	ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
A
Arne Jansen 已提交
1844
					      vol_args->fd, subvol, ptr,
1845
					      readonly, inherit);
D
Dan Carpenter 已提交
1846 1847
	if (ret)
		goto free_inherit;
1848

D
Dan Carpenter 已提交
1849 1850 1851 1852
	if (ptr && copy_to_user(arg +
				offsetof(struct btrfs_ioctl_vol_args_v2,
					transid),
				ptr, sizeof(*ptr)))
1853
		ret = -EFAULT;
D
Dan Carpenter 已提交
1854 1855

free_inherit:
A
Arne Jansen 已提交
1856
	kfree(inherit);
D
Dan Carpenter 已提交
1857 1858
free_args:
	kfree(vol_args);
C
Christoph Hellwig 已提交
1859 1860 1861
	return ret;
}

1862 1863 1864
static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
						void __user *arg)
{
A
Al Viro 已提交
1865
	struct inode *inode = file_inode(file);
1866
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1867 1868 1869 1870
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
	u64 flags = 0;

1871
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
1872 1873
		return -EINVAL;

1874
	down_read(&fs_info->subvol_sem);
1875 1876
	if (btrfs_root_readonly(root))
		flags |= BTRFS_SUBVOL_RDONLY;
1877
	up_read(&fs_info->subvol_sem);
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887

	if (copy_to_user(arg, &flags, sizeof(flags)))
		ret = -EFAULT;

	return ret;
}

static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
					      void __user *arg)
{
A
Al Viro 已提交
1888
	struct inode *inode = file_inode(file);
1889
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1890 1891 1892 1893 1894 1895
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	u64 root_flags;
	u64 flags;
	int ret = 0;

1896 1897 1898
	if (!inode_owner_or_capable(inode))
		return -EPERM;

1899 1900 1901
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;
1902

1903
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
1904 1905 1906
		ret = -EINVAL;
		goto out_drop_write;
	}
1907

1908 1909 1910 1911
	if (copy_from_user(&flags, arg, sizeof(flags))) {
		ret = -EFAULT;
		goto out_drop_write;
	}
1912

1913 1914 1915 1916
	if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
		ret = -EINVAL;
		goto out_drop_write;
	}
1917

1918 1919 1920 1921
	if (flags & ~BTRFS_SUBVOL_RDONLY) {
		ret = -EOPNOTSUPP;
		goto out_drop_write;
	}
1922

1923
	down_write(&fs_info->subvol_sem);
1924 1925 1926

	/* nothing to do */
	if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1927
		goto out_drop_sem;
1928 1929

	root_flags = btrfs_root_flags(&root->root_item);
1930
	if (flags & BTRFS_SUBVOL_RDONLY) {
1931 1932
		btrfs_set_root_flags(&root->root_item,
				     root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1933 1934 1935 1936 1937 1938 1939 1940
	} else {
		/*
		 * Block RO -> RW transition if this subvolume is involved in
		 * send
		 */
		spin_lock(&root->root_item_lock);
		if (root->send_in_progress == 0) {
			btrfs_set_root_flags(&root->root_item,
1941
				     root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1942 1943 1944
			spin_unlock(&root->root_item_lock);
		} else {
			spin_unlock(&root->root_item_lock);
1945 1946 1947
			btrfs_warn(fs_info,
				   "Attempt to set subvolume %llu read-write during send",
				   root->root_key.objectid);
1948 1949 1950 1951
			ret = -EPERM;
			goto out_drop_sem;
		}
	}
1952 1953 1954 1955 1956 1957 1958

	trans = btrfs_start_transaction(root, 1);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_reset;
	}

1959
	ret = btrfs_update_root(trans, fs_info->tree_root,
1960
				&root->root_key, &root->root_item);
1961 1962 1963 1964 1965 1966
	if (ret < 0) {
		btrfs_end_transaction(trans);
		goto out_reset;
	}

	ret = btrfs_commit_transaction(trans);
1967 1968 1969 1970

out_reset:
	if (ret)
		btrfs_set_root_flags(&root->root_item, root_flags);
1971
out_drop_sem:
1972
	up_write(&fs_info->subvol_sem);
1973 1974 1975
out_drop_write:
	mnt_drop_write_file(file);
out:
1976 1977 1978
	return ret;
}

1979 1980 1981
static noinline int key_in_sk(struct btrfs_key *key,
			      struct btrfs_ioctl_search_key *sk)
{
1982 1983 1984 1985 1986 1987 1988 1989 1990
	struct btrfs_key test;
	int ret;

	test.objectid = sk->min_objectid;
	test.type = sk->min_type;
	test.offset = sk->min_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret < 0)
1991
		return 0;
1992 1993 1994 1995 1996 1997 1998

	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret > 0)
1999 2000 2001 2002
		return 0;
	return 1;
}

2003
static noinline int copy_to_sk(struct btrfs_path *path,
2004 2005
			       struct btrfs_key *key,
			       struct btrfs_ioctl_search_key *sk,
2006
			       size_t *buf_size,
2007
			       char __user *ubuf,
2008 2009 2010 2011 2012 2013
			       unsigned long *sk_offset,
			       int *num_found)
{
	u64 found_transid;
	struct extent_buffer *leaf;
	struct btrfs_ioctl_search_header sh;
2014
	struct btrfs_key test;
2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035
	unsigned long item_off;
	unsigned long item_len;
	int nritems;
	int i;
	int slot;
	int ret = 0;

	leaf = path->nodes[0];
	slot = path->slots[0];
	nritems = btrfs_header_nritems(leaf);

	if (btrfs_header_generation(leaf) > sk->max_transid) {
		i = nritems;
		goto advance_key;
	}
	found_transid = btrfs_header_generation(leaf);

	for (i = slot; i < nritems; i++) {
		item_off = btrfs_item_ptr_offset(leaf, i);
		item_len = btrfs_item_size_nr(leaf, i);

2036 2037 2038 2039
		btrfs_item_key_to_cpu(leaf, key, i);
		if (!key_in_sk(key, sk))
			continue;

2040
		if (sizeof(sh) + item_len > *buf_size) {
2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
			if (*num_found) {
				ret = 1;
				goto out;
			}

			/*
			 * return one empty item back for v1, which does not
			 * handle -EOVERFLOW
			 */

2051
			*buf_size = sizeof(sh) + item_len;
2052
			item_len = 0;
2053 2054
			ret = -EOVERFLOW;
		}
2055

2056
		if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2057
			ret = 1;
2058
			goto out;
2059 2060 2061 2062 2063 2064 2065 2066 2067
		}

		sh.objectid = key->objectid;
		sh.offset = key->offset;
		sh.type = key->type;
		sh.len = item_len;
		sh.transid = found_transid;

		/* copy search result header */
2068 2069 2070 2071 2072
		if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
			ret = -EFAULT;
			goto out;
		}

2073 2074 2075
		*sk_offset += sizeof(sh);

		if (item_len) {
2076
			char __user *up = ubuf + *sk_offset;
2077
			/* copy the item */
2078 2079 2080 2081 2082 2083
			if (read_extent_buffer_to_user(leaf, up,
						       item_off, item_len)) {
				ret = -EFAULT;
				goto out;
			}

2084 2085
			*sk_offset += item_len;
		}
2086
		(*num_found)++;
2087

2088 2089 2090
		if (ret) /* -EOVERFLOW from above */
			goto out;

2091 2092 2093 2094
		if (*num_found >= sk->nr_items) {
			ret = 1;
			goto out;
		}
2095 2096
	}
advance_key:
2097
	ret = 0;
2098 2099 2100 2101 2102 2103
	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;
	if (btrfs_comp_cpu_keys(key, &test) >= 0)
		ret = 1;
	else if (key->offset < (u64)-1)
2104
		key->offset++;
2105
	else if (key->type < (u8)-1) {
2106
		key->offset = 0;
2107
		key->type++;
2108
	} else if (key->objectid < (u64)-1) {
2109 2110
		key->offset = 0;
		key->type = 0;
2111
		key->objectid++;
2112 2113
	} else
		ret = 1;
2114
out:
2115 2116 2117 2118 2119 2120 2121 2122 2123
	/*
	 *  0: all items from this leaf copied, continue with next
	 *  1: * more items can be copied, but unused buffer is too small
	 *     * all items were found
	 *     Either way, it will stops the loop which iterates to the next
	 *     leaf
	 *  -EOVERFLOW: item was to large for buffer
	 *  -EFAULT: could not copy extent buffer back to userspace
	 */
2124 2125 2126 2127
	return ret;
}

static noinline int search_ioctl(struct inode *inode,
2128
				 struct btrfs_ioctl_search_key *sk,
2129
				 size_t *buf_size,
2130
				 char __user *ubuf)
2131
{
2132
	struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2133 2134 2135 2136 2137 2138 2139
	struct btrfs_root *root;
	struct btrfs_key key;
	struct btrfs_path *path;
	int ret;
	int num_found = 0;
	unsigned long sk_offset = 0;

2140 2141
	if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
		*buf_size = sizeof(struct btrfs_ioctl_search_header);
2142
		return -EOVERFLOW;
2143
	}
2144

2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	if (sk->tree_id == 0) {
		/* search the root of the inode that was passed */
		root = BTRFS_I(inode)->root;
	} else {
		key.objectid = sk->tree_id;
		key.type = BTRFS_ROOT_ITEM_KEY;
		key.offset = (u64)-1;
		root = btrfs_read_fs_root_no_name(info, &key);
		if (IS_ERR(root)) {
			btrfs_free_path(path);
2159
			return PTR_ERR(root);
2160 2161 2162 2163 2164 2165 2166
		}
	}

	key.objectid = sk->min_objectid;
	key.type = sk->min_type;
	key.offset = sk->min_offset;

2167
	while (1) {
2168
		ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2169 2170 2171 2172 2173
		if (ret != 0) {
			if (ret > 0)
				ret = 0;
			goto err;
		}
2174
		ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2175
				 &sk_offset, &num_found);
2176
		btrfs_release_path(path);
2177
		if (ret)
2178 2179 2180
			break;

	}
2181 2182
	if (ret > 0)
		ret = 0;
2183 2184 2185 2186 2187 2188 2189 2190 2191
err:
	sk->nr_items = num_found;
	btrfs_free_path(path);
	return ret;
}

static noinline int btrfs_ioctl_tree_search(struct file *file,
					   void __user *argp)
{
2192 2193
	struct btrfs_ioctl_search_args __user *uargs;
	struct btrfs_ioctl_search_key sk;
2194 2195 2196
	struct inode *inode;
	int ret;
	size_t buf_size;
2197 2198 2199 2200

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2201 2202 2203 2204
	uargs = (struct btrfs_ioctl_search_args __user *)argp;

	if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
		return -EFAULT;
2205

2206
	buf_size = sizeof(uargs->buf);
2207

A
Al Viro 已提交
2208
	inode = file_inode(file);
2209
	ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2210 2211 2212 2213 2214 2215 2216 2217

	/*
	 * In the origin implementation an overflow is handled by returning a
	 * search header with a len of zero, so reset ret.
	 */
	if (ret == -EOVERFLOW)
		ret = 0;

2218
	if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2219 2220 2221 2222
		ret = -EFAULT;
	return ret;
}

G
Gerhard Heift 已提交
2223 2224 2225 2226 2227 2228 2229 2230
static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
					       void __user *argp)
{
	struct btrfs_ioctl_search_args_v2 __user *uarg;
	struct btrfs_ioctl_search_args_v2 args;
	struct inode *inode;
	int ret;
	size_t buf_size;
2231
	const size_t buf_limit = SZ_16M;
G
Gerhard Heift 已提交
2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	/* copy search header and buffer size */
	uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
	if (copy_from_user(&args, uarg, sizeof(args)))
		return -EFAULT;

	buf_size = args.buf_size;

	/* limit result size to 16MB */
	if (buf_size > buf_limit)
		buf_size = buf_limit;

	inode = file_inode(file);
	ret = search_ioctl(inode, &args.key, &buf_size,
2249
			   (char __user *)(&uarg->buf[0]));
G
Gerhard Heift 已提交
2250 2251 2252 2253 2254 2255
	if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
		ret = -EFAULT;
	else if (ret == -EOVERFLOW &&
		copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
		ret = -EFAULT;

2256 2257 2258
	return ret;
}

2259
/*
2260 2261 2262
 * Search INODE_REFs to identify path name of 'dirid' directory
 * in a 'tree_id' tree. and sets path name to 'name'.
 */
2263 2264 2265 2266 2267
static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
				u64 tree_id, u64 dirid, char *name)
{
	struct btrfs_root *root;
	struct btrfs_key key;
2268
	char *ptr;
2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285
	int ret = -1;
	int slot;
	int len;
	int total_len = 0;
	struct btrfs_inode_ref *iref;
	struct extent_buffer *l;
	struct btrfs_path *path;

	if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
		name[0]='\0';
		return 0;
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2286
	ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2287 2288 2289 2290 2291 2292

	key.objectid = tree_id;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
	root = btrfs_read_fs_root_no_name(info, &key);
	if (IS_ERR(root)) {
2293
		ret = PTR_ERR(root);
2294
		goto out;
2295 2296 2297 2298
	}

	key.objectid = dirid;
	key.type = BTRFS_INODE_REF_KEY;
2299
	key.offset = (u64)-1;
2300

2301
	while (1) {
2302 2303 2304
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;
2305 2306 2307 2308 2309 2310 2311 2312 2313 2314
		else if (ret > 0) {
			ret = btrfs_previous_item(root, path, dirid,
						  BTRFS_INODE_REF_KEY);
			if (ret < 0)
				goto out;
			else if (ret > 0) {
				ret = -ENOENT;
				goto out;
			}
		}
2315 2316 2317 2318 2319 2320 2321 2322 2323

		l = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(l, &key, slot);

		iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
		len = btrfs_inode_ref_name_len(l, iref);
		ptr -= len + 1;
		total_len += len + 1;
2324 2325
		if (ptr < name) {
			ret = -ENAMETOOLONG;
2326
			goto out;
2327
		}
2328 2329

		*(ptr + len) = '/';
2330
		read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2331 2332 2333 2334

		if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
			break;

2335
		btrfs_release_path(path);
2336
		key.objectid = key.offset;
2337
		key.offset = (u64)-1;
2338 2339
		dirid = key.objectid;
	}
2340
	memmove(name, ptr, total_len);
2341
	name[total_len] = '\0';
2342 2343 2344
	ret = 0;
out:
	btrfs_free_path(path);
2345 2346 2347
	return ret;
}

2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444
static int btrfs_search_path_in_tree_user(struct inode *inode,
				struct btrfs_ioctl_ino_lookup_user_args *args)
{
	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
	struct super_block *sb = inode->i_sb;
	struct btrfs_key upper_limit = BTRFS_I(inode)->location;
	u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
	u64 dirid = args->dirid;
	unsigned long item_off;
	unsigned long item_len;
	struct btrfs_inode_ref *iref;
	struct btrfs_root_ref *rref;
	struct btrfs_root *root;
	struct btrfs_path *path;
	struct btrfs_key key, key2;
	struct extent_buffer *leaf;
	struct inode *temp_inode;
	char *ptr;
	int slot;
	int len;
	int total_len = 0;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	/*
	 * If the bottom subvolume does not exist directly under upper_limit,
	 * construct the path in from the bottom up.
	 */
	if (dirid != upper_limit.objectid) {
		ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];

		key.objectid = treeid;
		key.type = BTRFS_ROOT_ITEM_KEY;
		key.offset = (u64)-1;
		root = btrfs_read_fs_root_no_name(fs_info, &key);
		if (IS_ERR(root)) {
			ret = PTR_ERR(root);
			goto out;
		}

		key.objectid = dirid;
		key.type = BTRFS_INODE_REF_KEY;
		key.offset = (u64)-1;
		while (1) {
			ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
			if (ret < 0) {
				goto out;
			} else if (ret > 0) {
				ret = btrfs_previous_item(root, path, dirid,
							  BTRFS_INODE_REF_KEY);
				if (ret < 0) {
					goto out;
				} else if (ret > 0) {
					ret = -ENOENT;
					goto out;
				}
			}

			leaf = path->nodes[0];
			slot = path->slots[0];
			btrfs_item_key_to_cpu(leaf, &key, slot);

			iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref);
			len = btrfs_inode_ref_name_len(leaf, iref);
			ptr -= len + 1;
			total_len += len + 1;
			if (ptr < args->path) {
				ret = -ENAMETOOLONG;
				goto out;
			}

			*(ptr + len) = '/';
			read_extent_buffer(leaf, ptr,
					(unsigned long)(iref + 1), len);

			/* Check the read+exec permission of this directory */
			ret = btrfs_previous_item(root, path, dirid,
						  BTRFS_INODE_ITEM_KEY);
			if (ret < 0) {
				goto out;
			} else if (ret > 0) {
				ret = -ENOENT;
				goto out;
			}

			leaf = path->nodes[0];
			slot = path->slots[0];
			btrfs_item_key_to_cpu(leaf, &key2, slot);
			if (key2.objectid != dirid) {
				ret = -ENOENT;
				goto out;
			}

			temp_inode = btrfs_iget(sb, &key2, root, NULL);
2445 2446 2447 2448
			if (IS_ERR(temp_inode)) {
				ret = PTR_ERR(temp_inode);
				goto out;
			}
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
			ret = inode_permission(temp_inode, MAY_READ | MAY_EXEC);
			iput(temp_inode);
			if (ret) {
				ret = -EACCES;
				goto out;
			}

			if (key.offset == upper_limit.objectid)
				break;
			if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) {
				ret = -EACCES;
				goto out;
			}

			btrfs_release_path(path);
			key.objectid = key.offset;
			key.offset = (u64)-1;
			dirid = key.objectid;
		}

		memmove(args->path, ptr, total_len);
		args->path[total_len] = '\0';
		btrfs_release_path(path);
	}

	/* Get the bottom subvolume's name from ROOT_REF */
	root = fs_info->tree_root;
	key.objectid = treeid;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = args->treeid;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0) {
		goto out;
	} else if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	leaf = path->nodes[0];
	slot = path->slots[0];
	btrfs_item_key_to_cpu(leaf, &key, slot);

	item_off = btrfs_item_ptr_offset(leaf, slot);
	item_len = btrfs_item_size_nr(leaf, slot);
	/* Check if dirid in ROOT_REF corresponds to passed dirid */
	rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
	if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) {
		ret = -EINVAL;
		goto out;
	}

	/* Copy subvolume's name */
	item_off += sizeof(struct btrfs_root_ref);
	item_len -= sizeof(struct btrfs_root_ref);
	read_extent_buffer(leaf, args->name, item_off, item_len);
	args->name[item_len] = 0;

out:
	btrfs_free_path(path);
	return ret;
}

2511 2512 2513
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
					   void __user *argp)
{
2514 2515
	struct btrfs_ioctl_ino_lookup_args *args;
	struct inode *inode;
2516
	int ret = 0;
2517

J
Julia Lawall 已提交
2518 2519 2520
	args = memdup_user(argp, sizeof(*args));
	if (IS_ERR(args))
		return PTR_ERR(args);
2521

A
Al Viro 已提交
2522
	inode = file_inode(file);
2523

2524 2525 2526 2527
	/*
	 * Unprivileged query to obtain the containing subvolume root id. The
	 * path is reset so it's consistent with btrfs_search_path_in_tree.
	 */
2528 2529 2530
	if (args->treeid == 0)
		args->treeid = BTRFS_I(inode)->root->root_key.objectid;

2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
	if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
		args->name[0] = 0;
		goto out;
	}

	if (!capable(CAP_SYS_ADMIN)) {
		ret = -EPERM;
		goto out;
	}

2541 2542 2543 2544
	ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
					args->treeid, args->objectid,
					args->name);

2545
out:
2546 2547 2548 2549
	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
		ret = -EFAULT;

	kfree(args);
2550 2551 2552
	return ret;
}

2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595
/*
 * Version of ino_lookup ioctl (unprivileged)
 *
 * The main differences from ino_lookup ioctl are:
 *
 *   1. Read + Exec permission will be checked using inode_permission() during
 *      path construction. -EACCES will be returned in case of failure.
 *   2. Path construction will be stopped at the inode number which corresponds
 *      to the fd with which this ioctl is called. If constructed path does not
 *      exist under fd's inode, -EACCES will be returned.
 *   3. The name of bottom subvolume is also searched and filled.
 */
static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_ino_lookup_user_args *args;
	struct inode *inode;
	int ret;

	args = memdup_user(argp, sizeof(*args));
	if (IS_ERR(args))
		return PTR_ERR(args);

	inode = file_inode(file);

	if (args->dirid == BTRFS_FIRST_FREE_OBJECTID &&
	    BTRFS_I(inode)->location.objectid != BTRFS_FIRST_FREE_OBJECTID) {
		/*
		 * The subvolume does not exist under fd with which this is
		 * called
		 */
		kfree(args);
		return -EACCES;
	}

	ret = btrfs_search_path_in_tree_user(inode, args);

	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
		ret = -EFAULT;

	kfree(args);
	return ret;
}

2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714
/* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */
static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_get_subvol_info_args *subvol_info;
	struct btrfs_fs_info *fs_info;
	struct btrfs_root *root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_root_item *root_item;
	struct btrfs_root_ref *rref;
	struct extent_buffer *leaf;
	unsigned long item_off;
	unsigned long item_len;
	struct inode *inode;
	int slot;
	int ret = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL);
	if (!subvol_info) {
		btrfs_free_path(path);
		return -ENOMEM;
	}

	inode = file_inode(file);
	fs_info = BTRFS_I(inode)->root->fs_info;

	/* Get root_item of inode's subvolume */
	key.objectid = BTRFS_I(inode)->root->root_key.objectid;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
	root = btrfs_read_fs_root_no_name(fs_info, &key);
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
		goto out;
	}
	root_item = &root->root_item;

	subvol_info->treeid = key.objectid;

	subvol_info->generation = btrfs_root_generation(root_item);
	subvol_info->flags = btrfs_root_flags(root_item);

	memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE);
	memcpy(subvol_info->parent_uuid, root_item->parent_uuid,
						    BTRFS_UUID_SIZE);
	memcpy(subvol_info->received_uuid, root_item->received_uuid,
						    BTRFS_UUID_SIZE);

	subvol_info->ctransid = btrfs_root_ctransid(root_item);
	subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime);
	subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime);

	subvol_info->otransid = btrfs_root_otransid(root_item);
	subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime);
	subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime);

	subvol_info->stransid = btrfs_root_stransid(root_item);
	subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime);
	subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime);

	subvol_info->rtransid = btrfs_root_rtransid(root_item);
	subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime);
	subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime);

	if (key.objectid != BTRFS_FS_TREE_OBJECTID) {
		/* Search root tree for ROOT_BACKREF of this subvolume */
		root = fs_info->tree_root;

		key.type = BTRFS_ROOT_BACKREF_KEY;
		key.offset = 0;
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0) {
			goto out;
		} else if (path->slots[0] >=
			   btrfs_header_nritems(path->nodes[0])) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0) {
				goto out;
			} else if (ret > 0) {
				ret = -EUCLEAN;
				goto out;
			}
		}

		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == subvol_info->treeid &&
		    key.type == BTRFS_ROOT_BACKREF_KEY) {
			subvol_info->parent_id = key.offset;

			rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
			subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref);

			item_off = btrfs_item_ptr_offset(leaf, slot)
					+ sizeof(struct btrfs_root_ref);
			item_len = btrfs_item_size_nr(leaf, slot)
					- sizeof(struct btrfs_root_ref);
			read_extent_buffer(leaf, subvol_info->name,
					   item_off, item_len);
		} else {
			ret = -ENOENT;
			goto out;
		}
	}

	if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
		ret = -EFAULT;

out:
	btrfs_free_path(path);
	kzfree(subvol_info);
	return ret;
}

2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811
/*
 * Return ROOT_REF information of the subvolume containing this inode
 * except the subvolume name.
 */
static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
{
	struct btrfs_ioctl_get_subvol_rootref_args *rootrefs;
	struct btrfs_root_ref *rref;
	struct btrfs_root *root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct inode *inode;
	u64 objectid;
	int slot;
	int ret;
	u8 found;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	rootrefs = memdup_user(argp, sizeof(*rootrefs));
	if (IS_ERR(rootrefs)) {
		btrfs_free_path(path);
		return PTR_ERR(rootrefs);
	}

	inode = file_inode(file);
	root = BTRFS_I(inode)->root->fs_info->tree_root;
	objectid = BTRFS_I(inode)->root->root_key.objectid;

	key.objectid = objectid;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = rootrefs->min_treeid;
	found = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0) {
		goto out;
	} else if (path->slots[0] >=
		   btrfs_header_nritems(path->nodes[0])) {
		ret = btrfs_next_leaf(root, path);
		if (ret < 0) {
			goto out;
		} else if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}
	while (1) {
		leaf = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) {
			ret = 0;
			goto out;
		}

		if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) {
			ret = -EOVERFLOW;
			goto out;
		}

		rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref);
		rootrefs->rootref[found].treeid = key.offset;
		rootrefs->rootref[found].dirid =
				  btrfs_root_ref_dirid(leaf, rref);
		found++;

		ret = btrfs_next_item(root, path);
		if (ret < 0) {
			goto out;
		} else if (ret > 0) {
			ret = -EUCLEAN;
			goto out;
		}
	}

out:
	if (!ret || ret == -EOVERFLOW) {
		rootrefs->num_items = found;
		/* update min_treeid for next search */
		if (found)
			rootrefs->min_treeid =
				rootrefs->rootref[found - 1].treeid + 1;
		if (copy_to_user(argp, rootrefs, sizeof(*rootrefs)))
			ret = -EFAULT;
	}

	kfree(rootrefs);
	btrfs_free_path(path);

	return ret;
}

2812 2813 2814
static noinline int btrfs_ioctl_snap_destroy(struct file *file,
					     void __user *arg)
{
A
Al Viro 已提交
2815
	struct dentry *parent = file->f_path.dentry;
2816
	struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2817
	struct dentry *dentry;
2818
	struct inode *dir = d_inode(parent);
2819 2820 2821 2822 2823 2824 2825
	struct inode *inode;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_root *dest = NULL;
	struct btrfs_ioctl_vol_args *vol_args;
	int namelen;
	int err = 0;

2826 2827 2828
	if (!S_ISDIR(dir->i_mode))
		return -ENOTDIR;

2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);

	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
	namelen = strlen(vol_args->name);
	if (strchr(vol_args->name, '/') ||
	    strncmp(vol_args->name, "..", namelen) == 0) {
		err = -EINVAL;
		goto out;
	}

2841
	err = mnt_want_write_file(file);
2842 2843 2844
	if (err)
		goto out;

2845

2846 2847 2848
	err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (err == -EINTR)
		goto out_drop_write;
2849 2850 2851 2852 2853 2854
	dentry = lookup_one_len(vol_args->name, parent, namelen);
	if (IS_ERR(dentry)) {
		err = PTR_ERR(dentry);
		goto out_unlock_dir;
	}

2855
	if (d_really_is_negative(dentry)) {
2856 2857 2858 2859
		err = -ENOENT;
		goto out_dput;
	}

2860
	inode = d_inode(dentry);
2861
	dest = BTRFS_I(inode)->root;
2862
	if (!capable(CAP_SYS_ADMIN)) {
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876
		/*
		 * Regular user.  Only allow this with a special mount
		 * option, when the user has write+exec access to the
		 * subvol root, and when rmdir(2) would have been
		 * allowed.
		 *
		 * Note that this is _not_ check that the subvol is
		 * empty or doesn't contain data that we wouldn't
		 * otherwise be able to delete.
		 *
		 * Users who want to delete empty subvols should try
		 * rmdir(2).
		 */
		err = -EPERM;
2877
		if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
			goto out_dput;

		/*
		 * Do not allow deletion if the parent dir is the same
		 * as the dir to be deleted.  That means the ioctl
		 * must be called on the dentry referencing the root
		 * of the subvol, not a random directory contained
		 * within it.
		 */
		err = -EINVAL;
		if (root == dest)
			goto out_dput;

		err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
		if (err)
			goto out_dput;
	}

2896 2897 2898 2899 2900
	/* check if subvolume may be deleted by a user */
	err = btrfs_may_delete(dir, dentry, 1);
	if (err)
		goto out_dput;

2901
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2902 2903 2904 2905
		err = -EINVAL;
		goto out_dput;
	}

A
Al Viro 已提交
2906
	inode_lock(inode);
2907
	err = btrfs_delete_subvolume(dir, dentry);
A
Al Viro 已提交
2908
	inode_unlock(inode);
2909
	if (!err)
2910
		d_delete(dentry);
2911

2912 2913 2914
out_dput:
	dput(dentry);
out_unlock_dir:
A
Al Viro 已提交
2915
	inode_unlock(dir);
2916
out_drop_write:
A
Al Viro 已提交
2917
	mnt_drop_write_file(file);
2918 2919 2920 2921 2922
out:
	kfree(vol_args);
	return err;
}

C
Chris Mason 已提交
2923
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
C
Christoph Hellwig 已提交
2924
{
A
Al Viro 已提交
2925
	struct inode *inode = file_inode(file);
C
Christoph Hellwig 已提交
2926
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Chris Mason 已提交
2927
	struct btrfs_ioctl_defrag_range_args *range;
Y
Yan Zheng 已提交
2928 2929
	int ret;

2930 2931 2932
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
L
Li Zefan 已提交
2933

2934 2935 2936
	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
2937
	}
C
Christoph Hellwig 已提交
2938 2939 2940

	switch (inode->i_mode & S_IFMT) {
	case S_IFDIR:
2941 2942 2943 2944
		if (!capable(CAP_SYS_ADMIN)) {
			ret = -EPERM;
			goto out;
		}
2945
		ret = btrfs_defrag_root(root);
C
Christoph Hellwig 已提交
2946 2947
		break;
	case S_IFREG:
2948 2949 2950 2951 2952 2953 2954 2955
		/*
		 * Note that this does not check the file descriptor for write
		 * access. This prevents defragmenting executables that are
		 * running and allows defrag on files open in read-only mode.
		 */
		if (!capable(CAP_SYS_ADMIN) &&
		    inode_permission(inode, MAY_WRITE)) {
			ret = -EPERM;
2956 2957
			goto out;
		}
C
Chris Mason 已提交
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969

		range = kzalloc(sizeof(*range), GFP_KERNEL);
		if (!range) {
			ret = -ENOMEM;
			goto out;
		}

		if (argp) {
			if (copy_from_user(range, argp,
					   sizeof(*range))) {
				ret = -EFAULT;
				kfree(range);
2970
				goto out;
C
Chris Mason 已提交
2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
			}
			/* compression requires us to start the IO */
			if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
				range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
				range->extent_thresh = (u32)-1;
			}
		} else {
			/* the rest are all set to zero by kzalloc */
			range->len = (u64)-1;
		}
A
Al Viro 已提交
2981
		ret = btrfs_defrag_file(file_inode(file), file,
2982
					range, BTRFS_OLDEST_GENERATION, 0);
C
Chris Mason 已提交
2983 2984
		if (ret > 0)
			ret = 0;
C
Chris Mason 已提交
2985
		kfree(range);
C
Christoph Hellwig 已提交
2986
		break;
2987 2988
	default:
		ret = -EINVAL;
C
Christoph Hellwig 已提交
2989
	}
2990
out:
2991
	mnt_drop_write_file(file);
2992
	return ret;
C
Christoph Hellwig 已提交
2993 2994
}

2995
static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
C
Christoph Hellwig 已提交
2996 2997 2998 2999
{
	struct btrfs_ioctl_vol_args *vol_args;
	int ret;

3000 3001 3002
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3003
	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
3004
		return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3005

L
Li Zefan 已提交
3006
	vol_args = memdup_user(arg, sizeof(*vol_args));
3007 3008 3009 3010
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
		goto out;
	}
C
Christoph Hellwig 已提交
3011

3012
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3013
	ret = btrfs_init_new_device(fs_info, vol_args->name);
C
Christoph Hellwig 已提交
3014

A
Anand Jain 已提交
3015
	if (!ret)
3016
		btrfs_info(fs_info, "disk added %s", vol_args->name);
A
Anand Jain 已提交
3017

C
Christoph Hellwig 已提交
3018
	kfree(vol_args);
3019
out:
3020
	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
C
Christoph Hellwig 已提交
3021 3022 3023
	return ret;
}

3024
static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
3025
{
3026 3027
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3028
	struct btrfs_ioctl_vol_args_v2 *vol_args;
C
Christoph Hellwig 已提交
3029 3030
	int ret;

3031 3032 3033
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3034 3035 3036
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3037

L
Li Zefan 已提交
3038
	vol_args = memdup_user(arg, sizeof(*vol_args));
3039 3040
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
Dan Carpenter 已提交
3041
		goto err_drop;
3042
	}
C
Christoph Hellwig 已提交
3043

3044
	/* Check for compatibility reject unknown flags */
3045 3046 3047 3048
	if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
		ret = -EOPNOTSUPP;
		goto out;
	}
C
Christoph Hellwig 已提交
3049

3050
	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3051 3052 3053 3054
		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
		goto out;
	}

3055
	if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
3056
		ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
3057 3058
	} else {
		vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
3059
		ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3060
	}
3061
	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3062

3063
	if (!ret) {
3064
		if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
3065
			btrfs_info(fs_info, "device deleted: id %llu",
3066 3067
					vol_args->devid);
		else
3068
			btrfs_info(fs_info, "device deleted: %s",
3069 3070
					vol_args->name);
	}
3071 3072
out:
	kfree(vol_args);
D
Dan Carpenter 已提交
3073
err_drop:
3074
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
3075 3076 3077
	return ret;
}

3078
static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
3079
{
3080 3081
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
3082 3083 3084
	struct btrfs_ioctl_vol_args *vol_args;
	int ret;

3085 3086 3087
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

3088 3089 3090
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
3091

3092
	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3093
		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
3094 3095 3096 3097 3098 3099
		goto out_drop_write;
	}

	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
3100 3101 3102
		goto out;
	}

3103
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
3104
	ret = btrfs_rm_device(fs_info, vol_args->name, 0);
3105

3106
	if (!ret)
3107
		btrfs_info(fs_info, "disk deleted %s", vol_args->name);
3108
	kfree(vol_args);
3109
out:
3110
	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3111
out_drop_write:
3112
	mnt_drop_write_file(file);
3113

C
Christoph Hellwig 已提交
3114 3115 3116
	return ret;
}

3117 3118
static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
				void __user *arg)
J
Jan Schmidt 已提交
3119
{
3120
	struct btrfs_ioctl_fs_info_args *fi_args;
J
Jan Schmidt 已提交
3121
	struct btrfs_device *device;
3122
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3123
	int ret = 0;
J
Jan Schmidt 已提交
3124

3125 3126 3127 3128
	fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
	if (!fi_args)
		return -ENOMEM;

3129
	rcu_read_lock();
3130
	fi_args->num_devices = fs_devices->num_devices;
J
Jan Schmidt 已提交
3131

3132
	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
3133 3134
		if (device->devid > fi_args->max_id)
			fi_args->max_id = device->devid;
J
Jan Schmidt 已提交
3135
	}
3136
	rcu_read_unlock();
J
Jan Schmidt 已提交
3137

3138
	memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
3139 3140 3141
	fi_args->nodesize = fs_info->nodesize;
	fi_args->sectorsize = fs_info->sectorsize;
	fi_args->clone_alignment = fs_info->sectorsize;
3142

3143 3144
	if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
		ret = -EFAULT;
J
Jan Schmidt 已提交
3145

3146 3147
	kfree(fi_args);
	return ret;
J
Jan Schmidt 已提交
3148 3149
}

3150 3151
static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
				 void __user *arg)
J
Jan Schmidt 已提交
3152 3153 3154 3155 3156 3157 3158 3159 3160 3161
{
	struct btrfs_ioctl_dev_info_args *di_args;
	struct btrfs_device *dev;
	int ret = 0;
	char *s_uuid = NULL;

	di_args = memdup_user(arg, sizeof(*di_args));
	if (IS_ERR(di_args))
		return PTR_ERR(di_args);

3162
	if (!btrfs_is_empty_uuid(di_args->uuid))
J
Jan Schmidt 已提交
3163 3164
		s_uuid = di_args->uuid;

3165
	rcu_read_lock();
3166
	dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
J
Jan Schmidt 已提交
3167 3168 3169 3170 3171 3172 3173

	if (!dev) {
		ret = -ENODEV;
		goto out;
	}

	di_args->devid = dev->devid;
3174 3175
	di_args->bytes_used = btrfs_device_get_bytes_used(dev);
	di_args->total_bytes = btrfs_device_get_total_bytes(dev);
J
Jan Schmidt 已提交
3176
	memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
3177
	if (dev->name) {
3178 3179
		strncpy(di_args->path, rcu_str_deref(dev->name),
				sizeof(di_args->path) - 1);
3180 3181
		di_args->path[sizeof(di_args->path) - 1] = 0;
	} else {
3182
		di_args->path[0] = '\0';
3183
	}
J
Jan Schmidt 已提交
3184 3185

out:
3186
	rcu_read_unlock();
J
Jan Schmidt 已提交
3187 3188 3189 3190 3191 3192 3193
	if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
		ret = -EFAULT;

	kfree(di_args);
	return ret;
}

3194
static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
M
Mark Fasheh 已提交
3195 3196 3197 3198 3199
{
	struct page *page;

	page = grab_cache_page(inode->i_mapping, index);
	if (!page)
3200
		return ERR_PTR(-ENOMEM);
M
Mark Fasheh 已提交
3201 3202

	if (!PageUptodate(page)) {
3203 3204 3205 3206 3207
		int ret;

		ret = btrfs_readpage(NULL, page);
		if (ret)
			return ERR_PTR(ret);
M
Mark Fasheh 已提交
3208 3209 3210
		lock_page(page);
		if (!PageUptodate(page)) {
			unlock_page(page);
3211
			put_page(page);
3212 3213 3214 3215
			return ERR_PTR(-EIO);
		}
		if (page->mapping != inode->i_mapping) {
			unlock_page(page);
3216
			put_page(page);
3217
			return ERR_PTR(-EAGAIN);
M
Mark Fasheh 已提交
3218 3219 3220 3221 3222 3223
		}
	}

	return page;
}

3224 3225 3226 3227
static int gather_extent_pages(struct inode *inode, struct page **pages,
			       int num_pages, u64 off)
{
	int i;
3228
	pgoff_t index = off >> PAGE_SHIFT;
3229 3230

	for (i = 0; i < num_pages; i++) {
3231
again:
3232
		pages[i] = extent_same_get_page(inode, index + i);
3233 3234 3235 3236 3237 3238 3239 3240
		if (IS_ERR(pages[i])) {
			int err = PTR_ERR(pages[i]);

			if (err == -EAGAIN)
				goto again;
			pages[i] = NULL;
			return err;
		}
3241 3242 3243 3244
	}
	return 0;
}

3245 3246
static int lock_extent_range(struct inode *inode, u64 off, u64 len,
			     bool retry_range_locking)
3247
{
3248 3249 3250 3251 3252 3253 3254 3255
	/*
	 * Do any pending delalloc/csum calculations on inode, one way or
	 * another, and lock file content.
	 * The locking order is:
	 *
	 *   1) pages
	 *   2) range in the inode's io tree
	 */
3256 3257 3258 3259 3260
	while (1) {
		struct btrfs_ordered_extent *ordered;
		lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
		ordered = btrfs_lookup_first_ordered_extent(inode,
							    off + len - 1);
3261 3262 3263
		if ((!ordered ||
		     ordered->file_offset + ordered->len <= off ||
		     ordered->file_offset >= off + len) &&
3264
		    !test_range_bit(&BTRFS_I(inode)->io_tree, off,
3265 3266 3267
				    off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
			if (ordered)
				btrfs_put_ordered_extent(ordered);
3268
			break;
3269
		}
3270 3271 3272
		unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
		if (ordered)
			btrfs_put_ordered_extent(ordered);
3273 3274
		if (!retry_range_locking)
			return -EAGAIN;
3275 3276
		btrfs_wait_ordered_range(inode, off, len);
	}
3277
	return 0;
3278 3279
}

3280
static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
M
Mark Fasheh 已提交
3281
{
A
Al Viro 已提交
3282 3283
	inode_unlock(inode1);
	inode_unlock(inode2);
M
Mark Fasheh 已提交
3284 3285
}

3286 3287 3288 3289 3290
static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
{
	if (inode1 < inode2)
		swap(inode1, inode2);

A
Al Viro 已提交
3291 3292
	inode_lock_nested(inode1, I_MUTEX_PARENT);
	inode_lock_nested(inode2, I_MUTEX_CHILD);
3293 3294 3295 3296 3297 3298 3299 3300 3301
}

static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
				      struct inode *inode2, u64 loff2, u64 len)
{
	unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
	unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
}

3302 3303 3304
static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
				    struct inode *inode2, u64 loff2, u64 len,
				    bool retry_range_locking)
M
Mark Fasheh 已提交
3305
{
3306 3307
	int ret;

M
Mark Fasheh 已提交
3308 3309 3310 3311
	if (inode1 < inode2) {
		swap(inode1, inode2);
		swap(loff1, loff2);
	}
3312 3313 3314 3315 3316 3317 3318 3319
	ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
	if (ret)
		return ret;
	ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
	if (ret)
		unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
			      loff1 + len - 1);
	return ret;
3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334
}

struct cmp_pages {
	int		num_pages;
	struct page	**src_pages;
	struct page	**dst_pages;
};

static void btrfs_cmp_data_free(struct cmp_pages *cmp)
{
	int i;
	struct page *pg;

	for (i = 0; i < cmp->num_pages; i++) {
		pg = cmp->src_pages[i];
3335 3336
		if (pg) {
			unlock_page(pg);
3337
			put_page(pg);
3338
			cmp->src_pages[i] = NULL;
3339
		}
3340
		pg = cmp->dst_pages[i];
3341 3342
		if (pg) {
			unlock_page(pg);
3343
			put_page(pg);
3344
			cmp->dst_pages[i] = NULL;
3345
		}
3346 3347 3348 3349 3350 3351 3352 3353
	}
}

static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
				  struct inode *dst, u64 dst_loff,
				  u64 len, struct cmp_pages *cmp)
{
	int ret;
3354
	int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
3355 3356

	cmp->num_pages = num_pages;
3357

3358
	ret = gather_extent_pages(src, cmp->src_pages, num_pages, loff);
3359 3360 3361
	if (ret)
		goto out;

3362
	ret = gather_extent_pages(dst, cmp->dst_pages, num_pages, dst_loff);
3363 3364 3365 3366

out:
	if (ret)
		btrfs_cmp_data_free(cmp);
3367
	return ret;
M
Mark Fasheh 已提交
3368 3369
}

3370
static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
M
Mark Fasheh 已提交
3371 3372
{
	int ret = 0;
3373
	int i;
M
Mark Fasheh 已提交
3374
	struct page *src_page, *dst_page;
3375
	unsigned int cmp_len = PAGE_SIZE;
M
Mark Fasheh 已提交
3376 3377
	void *addr, *dst_addr;

3378
	i = 0;
M
Mark Fasheh 已提交
3379
	while (len) {
3380
		if (len < PAGE_SIZE)
M
Mark Fasheh 已提交
3381 3382
			cmp_len = len;

3383 3384 3385 3386
		BUG_ON(i >= cmp->num_pages);

		src_page = cmp->src_pages[i];
		dst_page = cmp->dst_pages[i];
3387 3388
		ASSERT(PageLocked(src_page));
		ASSERT(PageLocked(dst_page));
3389

M
Mark Fasheh 已提交
3390 3391 3392 3393 3394 3395 3396
		addr = kmap_atomic(src_page);
		dst_addr = kmap_atomic(dst_page);

		flush_dcache_page(src_page);
		flush_dcache_page(dst_page);

		if (memcmp(addr, dst_addr, cmp_len))
3397
			ret = -EBADE;
M
Mark Fasheh 已提交
3398 3399 3400 3401 3402 3403 3404 3405

		kunmap_atomic(addr);
		kunmap_atomic(dst_addr);

		if (ret)
			break;

		len -= cmp_len;
3406
		i++;
M
Mark Fasheh 已提交
3407 3408 3409 3410 3411
	}

	return ret;
}

3412 3413
static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
				     u64 olen)
M
Mark Fasheh 已提交
3414
{
3415
	u64 len = *plen;
M
Mark Fasheh 已提交
3416 3417
	u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;

3418
	if (off + olen > inode->i_size || off + olen < off)
M
Mark Fasheh 已提交
3419
		return -EINVAL;
3420 3421 3422 3423 3424

	/* if we extend to eof, continue to block boundary */
	if (off + len == inode->i_size)
		*plen = len = ALIGN(inode->i_size, bs) - off;

M
Mark Fasheh 已提交
3425 3426 3427 3428 3429 3430 3431
	/* Check that we are block aligned - btrfs_clone() requires this */
	if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
		return -EINVAL;

	return 0;
}

3432
static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3433 3434
				   struct inode *dst, u64 dst_loff,
				   struct cmp_pages *cmp)
M
Mark Fasheh 已提交
3435 3436
{
	int ret;
3437
	u64 len = olen;
3438
	bool same_inode = (src == dst);
M
Mark Fasheh 已提交
3439 3440
	u64 same_lock_start = 0;
	u64 same_lock_len = 0;
M
Mark Fasheh 已提交
3441

3442 3443
	ret = extent_same_check_offsets(src, loff, &len, olen);
	if (ret)
3444
		return ret;
M
Mark Fasheh 已提交
3445

3446 3447
	ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
	if (ret)
3448
		return ret;
3449 3450

	if (same_inode) {
M
Mark Fasheh 已提交
3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
		/*
		 * Single inode case wants the same checks, except we
		 * don't want our length pushed out past i_size as
		 * comparing that data range makes no sense.
		 *
		 * extent_same_check_offsets() will do this for an
		 * unaligned length at i_size, so catch it here and
		 * reject the request.
		 *
		 * This effectively means we require aligned extents
		 * for the single-inode case, whereas the other cases
		 * allow an unaligned length so long as it ends at
		 * i_size.
		 */
3465 3466
		if (len != olen)
			return -EINVAL;
M
Mark Fasheh 已提交
3467 3468

		/* Check for overlapping ranges */
3469 3470
		if (dst_loff + len > loff && dst_loff < loff + len)
			return -EINVAL;
M
Mark Fasheh 已提交
3471 3472 3473

		same_lock_start = min_t(u64, loff, dst_loff);
		same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490
	} else {
		/*
		 * If the source and destination inodes are different, the
		 * source's range end offset matches the source's i_size, that
		 * i_size is not a multiple of the sector size, and the
		 * destination range does not go past the destination's i_size,
		 * we must round down the length to the nearest sector size
		 * multiple. If we don't do this adjustment we end replacing
		 * with zeroes the bytes in the range that starts at the
		 * deduplication range's end offset and ends at the next sector
		 * size multiple.
		 */
		if (loff + olen == i_size_read(src) &&
		    dst_loff + len < i_size_read(dst)) {
			const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;

			len = round_down(i_size_read(src), sz) - loff;
3491 3492
			if (len == 0)
				return 0;
3493 3494
			olen = len;
		}
M
Mark Fasheh 已提交
3495
	}
M
Mark Fasheh 已提交
3496

3497
again:
3498
	ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, cmp);
3499
	if (ret)
3500
		return ret;
3501

M
Mark Fasheh 已提交
3502
	if (same_inode)
3503 3504
		ret = lock_extent_range(src, same_lock_start, same_lock_len,
					false);
M
Mark Fasheh 已提交
3505
	else
3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520
		ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
					       false);
	/*
	 * If one of the inodes has dirty pages in the respective range or
	 * ordered extents, we need to flush dellaloc and wait for all ordered
	 * extents in the range. We must unlock the pages and the ranges in the
	 * io trees to avoid deadlocks when flushing delalloc (requires locking
	 * pages) and when waiting for ordered extents to complete (they require
	 * range locking).
	 */
	if (ret == -EAGAIN) {
		/*
		 * Ranges in the io trees already unlocked. Now unlock all
		 * pages before waiting for all IO to complete.
		 */
3521
		btrfs_cmp_data_free(cmp);
3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533
		if (same_inode) {
			btrfs_wait_ordered_range(src, same_lock_start,
						 same_lock_len);
		} else {
			btrfs_wait_ordered_range(src, loff, len);
			btrfs_wait_ordered_range(dst, dst_loff, len);
		}
		goto again;
	}
	ASSERT(ret == 0);
	if (WARN_ON(ret)) {
		/* ranges in the io trees already unlocked */
3534
		btrfs_cmp_data_free(cmp);
3535 3536
		return ret;
	}
3537

3538
	/* pass original length for comparison so we stay within i_size */
3539
	ret = btrfs_cmp_data(olen, cmp);
M
Mark Fasheh 已提交
3540
	if (ret == 0)
3541
		ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
M
Mark Fasheh 已提交
3542

M
Mark Fasheh 已提交
3543 3544 3545 3546 3547
	if (same_inode)
		unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
			      same_lock_start + same_lock_len - 1);
	else
		btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3548

3549
	btrfs_cmp_data_free(cmp);
3550 3551 3552 3553

	return ret;
}

3554 3555
#define BTRFS_MAX_DEDUPE_LEN	SZ_16M

3556 3557 3558 3559
static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
			     struct inode *dst, u64 dst_loff)
{
	int ret;
3560 3561
	struct cmp_pages cmp;
	int num_pages = PAGE_ALIGN(BTRFS_MAX_DEDUPE_LEN) >> PAGE_SHIFT;
3562
	bool same_inode = (src == dst);
3563
	u64 i, tail_len, chunk_count;
3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579

	if (olen == 0)
		return 0;

	if (same_inode)
		inode_lock(src);
	else
		btrfs_double_inode_lock(src, dst);

	/* don't make the dst file partly checksummed */
	if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
	    (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
		ret = -EINVAL;
		goto out_unlock;
	}

3580 3581
	tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
	chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597
	if (chunk_count == 0)
		num_pages = PAGE_ALIGN(tail_len) >> PAGE_SHIFT;

	/*
	 * If deduping ranges in the same inode, locking rules make it
	 * mandatory to always lock pages in ascending order to avoid deadlocks
	 * with concurrent tasks (such as starting writeback/delalloc).
	 */
	if (same_inode && dst_loff < loff)
		swap(loff, dst_loff);

	/*
	 * We must gather up all the pages before we initiate our extent
	 * locking. We use an array for the page pointers. Size of the array is
	 * bounded by len, which is in turn bounded by BTRFS_MAX_DEDUPE_LEN.
	 */
3598 3599 3600 3601
	cmp.src_pages = kvmalloc_array(num_pages, sizeof(struct page *),
				       GFP_KERNEL | __GFP_ZERO);
	cmp.dst_pages = kvmalloc_array(num_pages, sizeof(struct page *),
				       GFP_KERNEL | __GFP_ZERO);
3602
	if (!cmp.src_pages || !cmp.dst_pages) {
3603 3604
		ret = -ENOMEM;
		goto out_free;
3605
	}
3606 3607 3608

	for (i = 0; i < chunk_count; i++) {
		ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
3609
					      dst, dst_loff, &cmp);
3610
		if (ret)
3611
			goto out_free;
3612 3613 3614 3615 3616 3617

		loff += BTRFS_MAX_DEDUPE_LEN;
		dst_loff += BTRFS_MAX_DEDUPE_LEN;
	}

	if (tail_len > 0)
3618 3619
		ret = btrfs_extent_same_range(src, loff, tail_len, dst,
					      dst_loff, &cmp);
3620

3621 3622 3623 3624
out_free:
	kvfree(cmp.src_pages);
	kvfree(cmp.dst_pages);

M
Mark Fasheh 已提交
3625
out_unlock:
M
Mark Fasheh 已提交
3626
	if (same_inode)
A
Al Viro 已提交
3627
		inode_unlock(src);
M
Mark Fasheh 已提交
3628 3629
	else
		btrfs_double_inode_unlock(src, dst);
M
Mark Fasheh 已提交
3630 3631 3632 3633

	return ret;
}

M
Miklos Szeredi 已提交
3634 3635 3636
int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
			    struct file *dst_file, loff_t dst_loff,
			    u64 olen)
M
Mark Fasheh 已提交
3637
{
3638 3639
	struct inode *src = file_inode(src_file);
	struct inode *dst = file_inode(dst_file);
M
Mark Fasheh 已提交
3640 3641
	u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;

3642
	if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
M
Mark Fasheh 已提交
3643 3644 3645 3646 3647
		/*
		 * Btrfs does not support blocksize < page_size. As a
		 * result, btrfs_cmp_data() won't correctly handle
		 * this situation without an update.
		 */
3648
		return -EINVAL;
M
Mark Fasheh 已提交
3649 3650
	}

M
Miklos Szeredi 已提交
3651
	return btrfs_extent_same(src, src_loff, olen, dst, dst_loff);
M
Mark Fasheh 已提交
3652 3653
}

3654 3655 3656 3657
static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
				     struct inode *inode,
				     u64 endoff,
				     const u64 destoff,
3658 3659
				     const u64 olen,
				     int no_time_update)
3660 3661 3662 3663 3664
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;

	inode_inc_iversion(inode);
3665
	if (!no_time_update)
3666
		inode->i_mtime = inode->i_ctime = current_time(inode);
3667 3668 3669 3670 3671 3672 3673
	/*
	 * We round up to the block size at eof when determining which
	 * extents to clone above, but shouldn't round up the file size.
	 */
	if (endoff > destoff + olen)
		endoff = destoff + olen;
	if (endoff > inode->i_size)
3674
		btrfs_i_size_write(BTRFS_I(inode), endoff);
3675 3676 3677

	ret = btrfs_update_inode(trans, root, inode);
	if (ret) {
3678
		btrfs_abort_transaction(trans, ret);
3679
		btrfs_end_transaction(trans);
3680 3681
		goto out;
	}
3682
	ret = btrfs_end_transaction(trans);
3683 3684 3685 3686
out:
	return ret;
}

3687
static void clone_update_extent_map(struct btrfs_inode *inode,
3688 3689 3690 3691 3692
				    const struct btrfs_trans_handle *trans,
				    const struct btrfs_path *path,
				    const u64 hole_offset,
				    const u64 hole_len)
{
3693
	struct extent_map_tree *em_tree = &inode->extent_tree;
3694 3695 3696 3697 3698
	struct extent_map *em;
	int ret;

	em = alloc_extent_map();
	if (!em) {
3699
		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3700 3701 3702
		return;
	}

3703 3704 3705 3706 3707
	if (path) {
		struct btrfs_file_extent_item *fi;

		fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
				    struct btrfs_file_extent_item);
3708
		btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3709 3710 3711 3712
		em->generation = -1;
		if (btrfs_file_extent_type(path->nodes[0], fi) ==
		    BTRFS_FILE_EXTENT_INLINE)
			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3713
					&inode->runtime_flags);
3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733
	} else {
		em->start = hole_offset;
		em->len = hole_len;
		em->ram_bytes = em->len;
		em->orig_start = hole_offset;
		em->block_start = EXTENT_MAP_HOLE;
		em->block_len = 0;
		em->orig_block_len = 0;
		em->compress_type = BTRFS_COMPRESS_NONE;
		em->generation = trans->transid;
	}

	while (1) {
		write_lock(&em_tree->lock);
		ret = add_extent_mapping(em_tree, em, 1);
		write_unlock(&em_tree->lock);
		if (ret != -EEXIST) {
			free_extent_map(em);
			break;
		}
3734
		btrfs_drop_extent_cache(inode, em->start,
3735 3736 3737
					em->start + em->len - 1, 0);
	}

3738
	if (ret)
3739
		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3740 3741
}

3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766
/*
 * Make sure we do not end up inserting an inline extent into a file that has
 * already other (non-inline) extents. If a file has an inline extent it can
 * not have any other extents and the (single) inline extent must start at the
 * file offset 0. Failing to respect these rules will lead to file corruption,
 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
 *
 * We can have extents that have been already written to disk or we can have
 * dirty ranges still in delalloc, in which case the extent maps and items are
 * created only when we run delalloc, and the delalloc ranges might fall outside
 * the range we are currently locking in the inode's io tree. So we check the
 * inode's i_size because of that (i_size updates are done while holding the
 * i_mutex, which we are holding here).
 * We also check to see if the inode has a size not greater than "datal" but has
 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
 * protected against such concurrent fallocate calls by the i_mutex).
 *
 * If the file has no extents but a size greater than datal, do not allow the
 * copy because we would need turn the inline extent into a non-inline one (even
 * with NO_HOLES enabled). If we find our destination inode only has one inline
 * extent, just overwrite it with the source inline extent if its size is less
 * than the source extent's size, or we could copy the source inline extent's
 * data into the destination inode's inline extent if the later is greater then
 * the former.
 */
3767
static int clone_copy_inline_extent(struct inode *dst,
3768 3769 3770 3771 3772 3773 3774 3775 3776
				    struct btrfs_trans_handle *trans,
				    struct btrfs_path *path,
				    struct btrfs_key *new_key,
				    const u64 drop_start,
				    const u64 datal,
				    const u64 skip,
				    const u64 size,
				    char *inline_data)
{
3777
	struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
3778 3779
	struct btrfs_root *root = BTRFS_I(dst)->root;
	const u64 aligned_end = ALIGN(new_key->offset + datal,
3780
				      fs_info->sectorsize);
3781 3782 3783 3784 3785 3786
	int ret;
	struct btrfs_key key;

	if (new_key->offset > 0)
		return -EOPNOTSUPP;

3787
	key.objectid = btrfs_ino(BTRFS_I(dst));
3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801
	key.type = BTRFS_EXTENT_DATA_KEY;
	key.offset = 0;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0) {
		return ret;
	} else if (ret > 0) {
		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				return ret;
			else if (ret > 0)
				goto copy_inline_extent;
		}
		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3802
		if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835
		    key.type == BTRFS_EXTENT_DATA_KEY) {
			ASSERT(key.offset > 0);
			return -EOPNOTSUPP;
		}
	} else if (i_size_read(dst) <= datal) {
		struct btrfs_file_extent_item *ei;
		u64 ext_len;

		/*
		 * If the file size is <= datal, make sure there are no other
		 * extents following (can happen do to an fallocate call with
		 * the flag FALLOC_FL_KEEP_SIZE).
		 */
		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
				    struct btrfs_file_extent_item);
		/*
		 * If it's an inline extent, it can not have other extents
		 * following it.
		 */
		if (btrfs_file_extent_type(path->nodes[0], ei) ==
		    BTRFS_FILE_EXTENT_INLINE)
			goto copy_inline_extent;

		ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
		if (ext_len > aligned_end)
			return -EOPNOTSUPP;

		ret = btrfs_next_item(root, path);
		if (ret < 0) {
			return ret;
		} else if (ret == 0) {
			btrfs_item_key_to_cpu(path->nodes[0], &key,
					      path->slots[0]);
3836
			if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885
			    key.type == BTRFS_EXTENT_DATA_KEY)
				return -EOPNOTSUPP;
		}
	}

copy_inline_extent:
	/*
	 * We have no extent items, or we have an extent at offset 0 which may
	 * or may not be inlined. All these cases are dealt the same way.
	 */
	if (i_size_read(dst) > datal) {
		/*
		 * If the destination inode has an inline extent...
		 * This would require copying the data from the source inline
		 * extent into the beginning of the destination's inline extent.
		 * But this is really complex, both extents can be compressed
		 * or just one of them, which would require decompressing and
		 * re-compressing data (which could increase the new compressed
		 * size, not allowing the compressed data to fit anymore in an
		 * inline extent).
		 * So just don't support this case for now (it should be rare,
		 * we are not really saving space when cloning inline extents).
		 */
		return -EOPNOTSUPP;
	}

	btrfs_release_path(path);
	ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
	if (ret)
		return ret;
	ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
	if (ret)
		return ret;

	if (skip) {
		const u32 start = btrfs_file_extent_calc_inline_size(0);

		memmove(inline_data + start, inline_data + start + skip, datal);
	}

	write_extent_buffer(path->nodes[0], inline_data,
			    btrfs_item_ptr_offset(path->nodes[0],
						  path->slots[0]),
			    size);
	inode_add_bytes(dst, datal);

	return 0;
}

3886 3887 3888 3889 3890 3891 3892
/**
 * btrfs_clone() - clone a range from inode file to another
 *
 * @src: Inode to clone from
 * @inode: Inode to clone to
 * @off: Offset within source to start clone from
 * @olen: Original length, passed by user, of range to clone
3893
 * @olen_aligned: Block-aligned value of olen
3894
 * @destoff: Offset within @inode to start clone
3895
 * @no_time_update: Whether to update mtime/ctime on the target inode
3896 3897
 */
static int btrfs_clone(struct inode *src, struct inode *inode,
3898
		       const u64 off, const u64 olen, const u64 olen_aligned,
3899
		       const u64 destoff, int no_time_update)
C
Christoph Hellwig 已提交
3900
{
3901
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
3902
	struct btrfs_root *root = BTRFS_I(inode)->root;
3903
	struct btrfs_path *path = NULL;
C
Christoph Hellwig 已提交
3904
	struct extent_buffer *leaf;
3905 3906
	struct btrfs_trans_handle *trans;
	char *buf = NULL;
Y
Yan Zheng 已提交
3907
	struct btrfs_key key;
C
Christoph Hellwig 已提交
3908 3909
	u32 nritems;
	int slot;
Y
Yan Zheng 已提交
3910
	int ret;
3911 3912
	const u64 len = olen_aligned;
	u64 last_dest_end = destoff;
Y
Yan Zheng 已提交
3913 3914

	ret = -ENOMEM;
3915 3916 3917
	buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
	if (!buf)
		return ret;
Y
Yan Zheng 已提交
3918 3919 3920

	path = btrfs_alloc_path();
	if (!path) {
3921
		kvfree(buf);
3922
		return ret;
3923 3924
	}

3925
	path->reada = READA_FORWARD;
3926
	/* clone data */
3927
	key.objectid = btrfs_ino(BTRFS_I(src));
Y
Yan Zheng 已提交
3928
	key.type = BTRFS_EXTENT_DATA_KEY;
3929
	key.offset = off;
C
Christoph Hellwig 已提交
3930 3931

	while (1) {
3932
		u64 next_key_min_offset = key.offset + 1;
3933

C
Christoph Hellwig 已提交
3934 3935 3936 3937
		/*
		 * note the key will change type as we walk through the
		 * tree.
		 */
3938
		path->leave_spinning = 1;
3939 3940
		ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
				0, 0);
C
Christoph Hellwig 已提交
3941 3942
		if (ret < 0)
			goto out;
3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953
		/*
		 * First search, if no extent item that starts at offset off was
		 * found but the previous item is an extent item, it's possible
		 * it might overlap our target range, therefore process it.
		 */
		if (key.offset == off && ret > 0 && path->slots[0] > 0) {
			btrfs_item_key_to_cpu(path->nodes[0], &key,
					      path->slots[0] - 1);
			if (key.type == BTRFS_EXTENT_DATA_KEY)
				path->slots[0]--;
		}
C
Christoph Hellwig 已提交
3954

Y
Yan Zheng 已提交
3955
		nritems = btrfs_header_nritems(path->nodes[0]);
3956
process_slot:
Y
Yan Zheng 已提交
3957
		if (path->slots[0] >= nritems) {
3958
			ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
C
Christoph Hellwig 已提交
3959 3960 3961 3962
			if (ret < 0)
				goto out;
			if (ret > 0)
				break;
Y
Yan Zheng 已提交
3963
			nritems = btrfs_header_nritems(path->nodes[0]);
C
Christoph Hellwig 已提交
3964 3965 3966 3967
		}
		leaf = path->nodes[0];
		slot = path->slots[0];

Y
Yan Zheng 已提交
3968
		btrfs_item_key_to_cpu(leaf, &key, slot);
3969
		if (key.type > BTRFS_EXTENT_DATA_KEY ||
3970
		    key.objectid != btrfs_ino(BTRFS_I(src)))
C
Christoph Hellwig 已提交
3971 3972
			break;

3973
		if (key.type == BTRFS_EXTENT_DATA_KEY) {
3974 3975
			struct btrfs_file_extent_item *extent;
			int type;
Z
Zheng Yan 已提交
3976 3977
			u32 size;
			struct btrfs_key new_key;
3978 3979 3980
			u64 disko = 0, diskl = 0;
			u64 datao = 0, datal = 0;
			u8 comp;
3981
			u64 drop_start;
Z
Zheng Yan 已提交
3982

3983 3984 3985 3986
			extent = btrfs_item_ptr(leaf, slot,
						struct btrfs_file_extent_item);
			comp = btrfs_file_extent_compression(leaf, extent);
			type = btrfs_file_extent_type(leaf, extent);
3987 3988
			if (type == BTRFS_FILE_EXTENT_REG ||
			    type == BTRFS_FILE_EXTENT_PREALLOC) {
C
Chris Mason 已提交
3989 3990 3991 3992
				disko = btrfs_file_extent_disk_bytenr(leaf,
								      extent);
				diskl = btrfs_file_extent_disk_num_bytes(leaf,
								 extent);
3993
				datao = btrfs_file_extent_offset(leaf, extent);
C
Chris Mason 已提交
3994 3995
				datal = btrfs_file_extent_num_bytes(leaf,
								    extent);
3996 3997 3998 3999 4000
			} else if (type == BTRFS_FILE_EXTENT_INLINE) {
				/* take upper bound, may be compressed */
				datal = btrfs_file_extent_ram_bytes(leaf,
								    extent);
			}
Z
Zheng Yan 已提交
4001

4002 4003 4004 4005 4006 4007
			/*
			 * The first search might have left us at an extent
			 * item that ends before our target range's start, can
			 * happen if we have holes and NO_HOLES feature enabled.
			 */
			if (key.offset + datal <= off) {
4008 4009
				path->slots[0]++;
				goto process_slot;
4010 4011
			} else if (key.offset >= off + len) {
				break;
4012
			}
4013
			next_key_min_offset = key.offset + datal;
4014 4015 4016 4017 4018 4019 4020
			size = btrfs_item_size_nr(leaf, slot);
			read_extent_buffer(leaf, buf,
					   btrfs_item_ptr_offset(leaf, slot),
					   size);

			btrfs_release_path(path);
			path->leave_spinning = 0;
4021

Z
Zheng Yan 已提交
4022
			memcpy(&new_key, &key, sizeof(new_key));
4023
			new_key.objectid = btrfs_ino(BTRFS_I(inode));
4024 4025 4026 4027
			if (off <= key.offset)
				new_key.offset = key.offset + destoff - off;
			else
				new_key.offset = destoff;
Z
Zheng Yan 已提交
4028

4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040
			/*
			 * Deal with a hole that doesn't have an extent item
			 * that represents it (NO_HOLES feature enabled).
			 * This hole is either in the middle of the cloning
			 * range or at the beginning (fully overlaps it or
			 * partially overlaps it).
			 */
			if (new_key.offset != last_dest_end)
				drop_start = last_dest_end;
			else
				drop_start = new_key.offset;

4041 4042 4043 4044 4045 4046
			/*
			 * 1 - adjusting old extent (we may have to split it)
			 * 1 - add new extent
			 * 1 - inode update
			 */
			trans = btrfs_start_transaction(root, 3);
4047 4048 4049 4050 4051
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				goto out;
			}

4052 4053
			if (type == BTRFS_FILE_EXTENT_REG ||
			    type == BTRFS_FILE_EXTENT_PREALLOC) {
4054 4055 4056 4057 4058
				/*
				 *    a  | --- range to clone ---|  b
				 * | ------------- extent ------------- |
				 */

4059
				/* subtract range b */
4060 4061 4062
				if (key.offset + datal > off + len)
					datal = off + len - key.offset;

4063
				/* subtract range a */
4064 4065 4066 4067 4068
				if (off > key.offset) {
					datao += off - key.offset;
					datal -= off - key.offset;
				}

J
Josef Bacik 已提交
4069
				ret = btrfs_drop_extents(trans, root, inode,
4070
							 drop_start,
4071
							 new_key.offset + datal,
4072
							 1);
4073
				if (ret) {
4074
					if (ret != -EOPNOTSUPP)
4075
						btrfs_abort_transaction(trans,
4076
									ret);
4077
					btrfs_end_transaction(trans);
4078 4079
					goto out;
				}
4080

4081 4082
				ret = btrfs_insert_empty_item(trans, root, path,
							      &new_key, size);
4083
				if (ret) {
4084
					btrfs_abort_transaction(trans, ret);
4085
					btrfs_end_transaction(trans);
4086 4087
					goto out;
				}
4088 4089 4090 4091

				leaf = path->nodes[0];
				slot = path->slots[0];
				write_extent_buffer(leaf, buf,
Z
Zheng Yan 已提交
4092 4093
					    btrfs_item_ptr_offset(leaf, slot),
					    size);
Y
Yan Zheng 已提交
4094

4095
				extent = btrfs_item_ptr(leaf, slot,
C
Christoph Hellwig 已提交
4096
						struct btrfs_file_extent_item);
4097 4098 4099 4100 4101 4102 4103 4104 4105

				/* disko == 0 means it's a hole */
				if (!disko)
					datao = 0;

				btrfs_set_file_extent_offset(leaf, extent,
							     datao);
				btrfs_set_file_extent_num_bytes(leaf, extent,
								datal);
J
Josef Bacik 已提交
4106

4107 4108
				if (disko) {
					inode_add_bytes(inode, datal);
4109
					ret = btrfs_inc_extent_ref(trans,
4110
							root,
4111 4112
							disko, diskl, 0,
							root->root_key.objectid,
4113
							btrfs_ino(BTRFS_I(inode)),
4114
							new_key.offset - datao);
4115 4116 4117
					if (ret) {
						btrfs_abort_transaction(trans,
									ret);
4118
						btrfs_end_transaction(trans);
4119 4120 4121
						goto out;

					}
C
Christoph Hellwig 已提交
4122
				}
4123 4124 4125
			} else if (type == BTRFS_FILE_EXTENT_INLINE) {
				u64 skip = 0;
				u64 trim = 0;
4126

4127 4128 4129 4130
				if (off > key.offset) {
					skip = off - key.offset;
					new_key.offset += skip;
				}
C
Chris Mason 已提交
4131

L
Liu Bo 已提交
4132 4133
				if (key.offset + datal > off + len)
					trim = key.offset + datal - (off + len);
C
Chris Mason 已提交
4134

4135 4136
				if (comp && (skip || trim)) {
					ret = -EINVAL;
4137
					btrfs_end_transaction(trans);
4138 4139 4140 4141
					goto out;
				}
				size -= skip + trim;
				datal -= skip + trim;
4142

4143
				ret = clone_copy_inline_extent(inode,
4144 4145 4146 4147 4148
							       trans, path,
							       &new_key,
							       drop_start,
							       datal,
							       skip, size, buf);
4149
				if (ret) {
4150
					if (ret != -EOPNOTSUPP)
4151
						btrfs_abort_transaction(trans,
4152
									ret);
4153
					btrfs_end_transaction(trans);
4154 4155
					goto out;
				}
4156 4157
				leaf = path->nodes[0];
				slot = path->slots[0];
C
Christoph Hellwig 已提交
4158
			}
4159

4160 4161
			/* If we have an implicit hole (NO_HOLES feature). */
			if (drop_start < new_key.offset)
4162
				clone_update_extent_map(BTRFS_I(inode), trans,
4163
						NULL, drop_start,
4164 4165
						new_key.offset - drop_start);

4166 4167
			clone_update_extent_map(BTRFS_I(inode), trans,
					path, 0, 0);
4168

4169
			btrfs_mark_buffer_dirty(leaf);
4170
			btrfs_release_path(path);
4171

4172
			last_dest_end = ALIGN(new_key.offset + datal,
4173
					      fs_info->sectorsize);
4174 4175
			ret = clone_finish_inode_update(trans, inode,
							last_dest_end,
4176 4177
							destoff, olen,
							no_time_update);
4178
			if (ret)
4179
				goto out;
4180 4181
			if (new_key.offset + datal >= destoff + len)
				break;
4182
		}
4183
		btrfs_release_path(path);
4184
		key.offset = next_key_min_offset;
4185 4186 4187 4188 4189

		if (fatal_signal_pending(current)) {
			ret = -EINTR;
			goto out;
		}
C
Christoph Hellwig 已提交
4190 4191
	}
	ret = 0;
4192

4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212
	if (last_dest_end < destoff + len) {
		/*
		 * We have an implicit hole (NO_HOLES feature is enabled) that
		 * fully or partially overlaps our cloning range at its end.
		 */
		btrfs_release_path(path);

		/*
		 * 1 - remove extent(s)
		 * 1 - inode update
		 */
		trans = btrfs_start_transaction(root, 2);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			goto out;
		}
		ret = btrfs_drop_extents(trans, root, inode,
					 last_dest_end, destoff + len, 1);
		if (ret) {
			if (ret != -EOPNOTSUPP)
4213
				btrfs_abort_transaction(trans, ret);
4214
			btrfs_end_transaction(trans);
4215 4216
			goto out;
		}
4217 4218 4219
		clone_update_extent_map(BTRFS_I(inode), trans, NULL,
				last_dest_end,
				destoff + len - last_dest_end);
4220
		ret = clone_finish_inode_update(trans, inode, destoff + len,
4221
						destoff, olen, no_time_update);
4222 4223
	}

C
Christoph Hellwig 已提交
4224
out:
4225
	btrfs_free_path(path);
4226
	kvfree(buf);
4227 4228 4229
	return ret;
}

4230 4231
static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
					u64 off, u64 olen, u64 destoff)
4232
{
A
Al Viro 已提交
4233
	struct inode *inode = file_inode(file);
4234
	struct inode *src = file_inode(file_src);
4235
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4236 4237 4238
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;
	u64 len = olen;
4239
	u64 bs = fs_info->sb->s_blocksize;
4240
	int same_inode = src == inode;
4241 4242 4243 4244 4245 4246 4247

	/*
	 * TODO:
	 * - split compressed inline extents.  annoying: we need to
	 *   decompress into destination's address_space (the file offset
	 *   may change, so source mapping won't do), then recompress (or
	 *   otherwise reinsert) a subrange.
4248 4249 4250
	 *
	 * - split destination inode's inline extents.  The inline extents can
	 *   be either compressed or non-compressed.
4251 4252 4253 4254 4255
	 */

	if (btrfs_root_readonly(root))
		return -EROFS;

4256 4257 4258
	if (file_src->f_path.mnt != file->f_path.mnt ||
	    src->i_sb != inode->i_sb)
		return -EXDEV;
4259 4260

	if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
4261
		return -EISDIR;
4262 4263

	if (!same_inode) {
4264
		btrfs_double_inode_lock(src, inode);
4265
	} else {
A
Al Viro 已提交
4266
		inode_lock(src);
4267 4268
	}

4269 4270 4271 4272 4273 4274 4275
	/* don't make the dst file partly checksummed */
	if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
	    (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
		ret = -EINVAL;
		goto out_unlock;
	}

4276 4277 4278 4279 4280 4281 4282 4283 4284 4285
	/* determine range to clone */
	ret = -EINVAL;
	if (off + len > src->i_size || off + len < off)
		goto out_unlock;
	if (len == 0)
		olen = len = src->i_size - off;
	/* if we extend to eof, continue to block boundary */
	if (off + len == src->i_size)
		len = ALIGN(src->i_size, bs) - off;

4286 4287 4288 4289 4290
	if (len == 0) {
		ret = 0;
		goto out_unlock;
	}

4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307
	/* verify the end result is block aligned */
	if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
	    !IS_ALIGNED(destoff, bs))
		goto out_unlock;

	/* verify if ranges are overlapped within the same file */
	if (same_inode) {
		if (destoff + len > off && destoff < off + len)
			goto out_unlock;
	}

	if (destoff > inode->i_size) {
		ret = btrfs_cont_expand(inode, inode->i_size, destoff);
		if (ret)
			goto out_unlock;
	}

4308 4309 4310 4311 4312 4313 4314 4315 4316 4317
	/*
	 * Lock the target range too. Right after we replace the file extent
	 * items in the fs tree (which now point to the cloned data), we might
	 * have a worker replace them with extent items relative to a write
	 * operation that was issued before this clone operation (i.e. confront
	 * with inode.c:btrfs_finish_ordered_io).
	 */
	if (same_inode) {
		u64 lock_start = min_t(u64, off, destoff);
		u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
4318

4319
		ret = lock_extent_range(src, lock_start, lock_len, true);
4320
	} else {
4321 4322 4323 4324 4325 4326 4327
		ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
					       true);
	}
	ASSERT(ret == 0);
	if (WARN_ON(ret)) {
		/* ranges in the io trees already unlocked */
		goto out_unlock;
4328
	}
4329

4330
	ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
4331

4332 4333 4334 4335 4336 4337
	if (same_inode) {
		u64 lock_start = min_t(u64, off, destoff);
		u64 lock_end = max_t(u64, off, destoff) + len - 1;

		unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
	} else {
4338
		btrfs_double_extent_unlock(src, off, inode, destoff, len);
4339 4340 4341 4342 4343
	}
	/*
	 * Truncate page cache pages so that future reads will see the cloned
	 * data immediately and not the previous data.
	 */
4344
	truncate_inode_pages_range(&inode->i_data,
4345 4346
				round_down(destoff, PAGE_SIZE),
				round_up(destoff + len, PAGE_SIZE) - 1);
C
Christoph Hellwig 已提交
4347
out_unlock:
4348 4349 4350
	if (!same_inode)
		btrfs_double_inode_unlock(src, inode);
	else
A
Al Viro 已提交
4351
		inode_unlock(src);
4352 4353 4354
	return ret;
}

4355 4356
int btrfs_clone_file_range(struct file *src_file, loff_t off,
		struct file *dst_file, loff_t destoff, u64 len)
4357
{
4358
	return btrfs_clone_files(dst_file, src_file, off, len, destoff);
4359 4360
}

4361 4362
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
{
A
Al Viro 已提交
4363
	struct inode *inode = file_inode(file);
4364
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4365 4366 4367 4368 4369 4370 4371 4372 4373
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root *new_root;
	struct btrfs_dir_item *di;
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_key location;
	struct btrfs_disk_key disk_key;
	u64 objectid = 0;
	u64 dir_id;
4374
	int ret;
4375 4376 4377 4378

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4379 4380 4381 4382 4383 4384 4385 4386
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	if (copy_from_user(&objectid, argp, sizeof(objectid))) {
		ret = -EFAULT;
		goto out;
	}
4387 4388

	if (!objectid)
4389
		objectid = BTRFS_FS_TREE_OBJECTID;
4390 4391 4392 4393 4394

	location.objectid = objectid;
	location.type = BTRFS_ROOT_ITEM_KEY;
	location.offset = (u64)-1;

4395
	new_root = btrfs_read_fs_root_no_name(fs_info, &location);
4396 4397 4398 4399
	if (IS_ERR(new_root)) {
		ret = PTR_ERR(new_root);
		goto out;
	}
4400
	if (!is_fstree(new_root->root_key.objectid)) {
4401 4402 4403
		ret = -ENOENT;
		goto out;
	}
4404 4405

	path = btrfs_alloc_path();
4406 4407 4408 4409
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
4410 4411 4412
	path->leave_spinning = 1;

	trans = btrfs_start_transaction(root, 1);
4413
	if (IS_ERR(trans)) {
4414
		btrfs_free_path(path);
4415 4416
		ret = PTR_ERR(trans);
		goto out;
4417 4418
	}

4419 4420
	dir_id = btrfs_super_root_dir(fs_info->super_copy);
	di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
4421
				   dir_id, "default", 7, 1);
4422
	if (IS_ERR_OR_NULL(di)) {
4423
		btrfs_free_path(path);
4424
		btrfs_end_transaction(trans);
4425
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
4426
			  "Umm, you don't have the default diritem, this isn't going to work");
4427 4428
		ret = -ENOENT;
		goto out;
4429 4430 4431 4432 4433 4434 4435
	}

	btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
	btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
	btrfs_mark_buffer_dirty(path->nodes[0]);
	btrfs_free_path(path);

4436
	btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
4437
	btrfs_end_transaction(trans);
4438 4439 4440
out:
	mnt_drop_write_file(file);
	return ret;
4441 4442
}

4443 4444
static void get_block_group_info(struct list_head *groups_list,
				 struct btrfs_ioctl_space_info *space)
4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458
{
	struct btrfs_block_group_cache *block_group;

	space->total_bytes = 0;
	space->used_bytes = 0;
	space->flags = 0;
	list_for_each_entry(block_group, groups_list, list) {
		space->flags = block_group->flags;
		space->total_bytes += block_group->key.offset;
		space->used_bytes +=
			btrfs_block_group_used(&block_group->item);
	}
}

4459 4460
static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
				   void __user *arg)
J
Josef Bacik 已提交
4461 4462 4463 4464
{
	struct btrfs_ioctl_space_args space_args;
	struct btrfs_ioctl_space_info space;
	struct btrfs_ioctl_space_info *dest;
4465
	struct btrfs_ioctl_space_info *dest_orig;
4466
	struct btrfs_ioctl_space_info __user *user_dest;
J
Josef Bacik 已提交
4467
	struct btrfs_space_info *info;
4468 4469 4470 4471 4472 4473
	static const u64 types[] = {
		BTRFS_BLOCK_GROUP_DATA,
		BTRFS_BLOCK_GROUP_SYSTEM,
		BTRFS_BLOCK_GROUP_METADATA,
		BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
	};
4474
	int num_types = 4;
4475
	int alloc_size;
J
Josef Bacik 已提交
4476
	int ret = 0;
4477
	u64 slot_count = 0;
4478
	int i, c;
J
Josef Bacik 已提交
4479 4480 4481 4482 4483 4484

	if (copy_from_user(&space_args,
			   (struct btrfs_ioctl_space_args __user *)arg,
			   sizeof(space_args)))
		return -EFAULT;

4485 4486 4487 4488 4489
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

		info = NULL;
		rcu_read_lock();
4490
		list_for_each_entry_rcu(tmp, &fs_info->space_info,
4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508
					list) {
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}
		rcu_read_unlock();

		if (!info)
			continue;

		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c]))
				slot_count++;
		}
		up_read(&info->groups_sem);
	}
4509

4510 4511 4512 4513 4514
	/*
	 * Global block reserve, exported as a space_info
	 */
	slot_count++;

4515 4516 4517 4518 4519
	/* space_slots == 0 means they are asking for a count */
	if (space_args.space_slots == 0) {
		space_args.total_spaces = slot_count;
		goto out;
	}
4520

4521
	slot_count = min_t(u64, space_args.space_slots, slot_count);
4522

4523
	alloc_size = sizeof(*dest) * slot_count;
4524

4525 4526 4527
	/* we generally have at most 6 or so space infos, one for each raid
	 * level.  So, a whole page should be more than enough for everyone
	 */
4528
	if (alloc_size > PAGE_SIZE)
4529 4530
		return -ENOMEM;

J
Josef Bacik 已提交
4531
	space_args.total_spaces = 0;
4532
	dest = kmalloc(alloc_size, GFP_KERNEL);
4533 4534 4535
	if (!dest)
		return -ENOMEM;
	dest_orig = dest;
J
Josef Bacik 已提交
4536

4537
	/* now we have a buffer to copy into */
4538 4539 4540
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

4541 4542 4543
		if (!slot_count)
			break;

4544 4545
		info = NULL;
		rcu_read_lock();
4546
		list_for_each_entry_rcu(tmp, &fs_info->space_info,
4547 4548 4549 4550 4551 4552 4553
					list) {
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}
		rcu_read_unlock();
4554

4555 4556 4557 4558 4559
		if (!info)
			continue;
		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c])) {
4560 4561
				get_block_group_info(&info->block_groups[c],
						     &space);
4562 4563 4564
				memcpy(dest, &space, sizeof(space));
				dest++;
				space_args.total_spaces++;
4565
				slot_count--;
4566
			}
4567 4568
			if (!slot_count)
				break;
4569 4570
		}
		up_read(&info->groups_sem);
J
Josef Bacik 已提交
4571 4572
	}

4573 4574 4575 4576
	/*
	 * Add global block reserve
	 */
	if (slot_count) {
4577
		struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4578 4579 4580 4581 4582 4583 4584 4585 4586 4587

		spin_lock(&block_rsv->lock);
		space.total_bytes = block_rsv->size;
		space.used_bytes = block_rsv->size - block_rsv->reserved;
		spin_unlock(&block_rsv->lock);
		space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
		memcpy(dest, &space, sizeof(space));
		space_args.total_spaces++;
	}

D
Daniel J Blueman 已提交
4588
	user_dest = (struct btrfs_ioctl_space_info __user *)
4589 4590 4591 4592 4593 4594 4595 4596
		(arg + sizeof(struct btrfs_ioctl_space_args));

	if (copy_to_user(user_dest, dest_orig, alloc_size))
		ret = -EFAULT;

	kfree(dest_orig);
out:
	if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
J
Josef Bacik 已提交
4597 4598 4599 4600 4601
		ret = -EFAULT;

	return ret;
}

4602 4603
static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
					    void __user *argp)
4604 4605 4606
{
	struct btrfs_trans_handle *trans;
	u64 transid;
T
Tsutomu Itoh 已提交
4607
	int ret;
4608

M
Miao Xie 已提交
4609
	trans = btrfs_attach_transaction_barrier(root);
4610 4611 4612 4613 4614 4615 4616 4617
	if (IS_ERR(trans)) {
		if (PTR_ERR(trans) != -ENOENT)
			return PTR_ERR(trans);

		/* No running transaction, don't bother */
		transid = root->fs_info->last_trans_committed;
		goto out;
	}
4618
	transid = trans->transid;
4619
	ret = btrfs_commit_transaction_async(trans, 0);
4620
	if (ret) {
4621
		btrfs_end_transaction(trans);
T
Tsutomu Itoh 已提交
4622
		return ret;
4623
	}
4624
out:
4625 4626 4627 4628 4629 4630
	if (argp)
		if (copy_to_user(argp, &transid, sizeof(transid)))
			return -EFAULT;
	return 0;
}

4631
static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4632
					   void __user *argp)
4633 4634 4635 4636 4637 4638 4639 4640 4641
{
	u64 transid;

	if (argp) {
		if (copy_from_user(&transid, argp, sizeof(transid)))
			return -EFAULT;
	} else {
		transid = 0;  /* current trans */
	}
4642
	return btrfs_wait_for_commit(fs_info, transid);
4643 4644
}

M
Miao Xie 已提交
4645
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
J
Jan Schmidt 已提交
4646
{
4647
	struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
J
Jan Schmidt 已提交
4648
	struct btrfs_ioctl_scrub_args *sa;
M
Miao Xie 已提交
4649
	int ret;
J
Jan Schmidt 已提交
4650 4651 4652 4653 4654 4655 4656 4657

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

M
Miao Xie 已提交
4658 4659 4660 4661 4662 4663
	if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
		ret = mnt_want_write_file(file);
		if (ret)
			goto out;
	}

4664
	ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
4665 4666
			      &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
			      0);
J
Jan Schmidt 已提交
4667 4668 4669 4670

	if (copy_to_user(arg, sa, sizeof(*sa)))
		ret = -EFAULT;

M
Miao Xie 已提交
4671 4672 4673
	if (!(sa->flags & BTRFS_SCRUB_READONLY))
		mnt_drop_write_file(file);
out:
J
Jan Schmidt 已提交
4674 4675 4676 4677
	kfree(sa);
	return ret;
}

4678
static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
J
Jan Schmidt 已提交
4679 4680 4681 4682
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4683
	return btrfs_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
4684 4685
}

4686
static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
J
Jan Schmidt 已提交
4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698
				       void __user *arg)
{
	struct btrfs_ioctl_scrub_args *sa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

4699
	ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
J
Jan Schmidt 已提交
4700 4701 4702 4703 4704 4705 4706 4707

	if (copy_to_user(arg, sa, sizeof(*sa)))
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

4708
static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
4709
				      void __user *arg)
4710 4711 4712 4713 4714 4715 4716 4717
{
	struct btrfs_ioctl_get_dev_stats *sa;
	int ret;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

4718 4719 4720 4721 4722
	if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
		kfree(sa);
		return -EPERM;
	}

4723
	ret = btrfs_get_dev_stats(fs_info, sa);
4724 4725 4726 4727 4728 4729 4730 4731

	if (copy_to_user(arg, sa, sizeof(*sa)))
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

4732 4733
static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
				    void __user *arg)
4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746
{
	struct btrfs_ioctl_dev_replace_args *p;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	p = memdup_user(arg, sizeof(*p));
	if (IS_ERR(p))
		return PTR_ERR(p);

	switch (p->cmd) {
	case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4747
		if (sb_rdonly(fs_info->sb)) {
4748 4749 4750
			ret = -EROFS;
			goto out;
		}
4751
		if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4752
			ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4753
		} else {
4754
			ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4755
			clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4756 4757 4758
		}
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4759
		btrfs_dev_replace_status(fs_info, p);
4760 4761 4762
		ret = 0;
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4763
		p->result = btrfs_dev_replace_cancel(fs_info);
4764
		ret = 0;
4765 4766 4767 4768 4769 4770 4771 4772
		break;
	default:
		ret = -EINVAL;
		break;
	}

	if (copy_to_user(arg, p, sizeof(*p)))
		ret = -EFAULT;
4773
out:
4774 4775 4776 4777
	kfree(p);
	return ret;
}

4778 4779 4780 4781
static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
{
	int ret = 0;
	int i;
4782
	u64 rel_ptr;
4783
	int size;
4784
	struct btrfs_ioctl_ino_path_args *ipa = NULL;
4785 4786 4787
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_path *path;

4788
	if (!capable(CAP_DAC_READ_SEARCH))
4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816
		return -EPERM;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	ipa = memdup_user(arg, sizeof(*ipa));
	if (IS_ERR(ipa)) {
		ret = PTR_ERR(ipa);
		ipa = NULL;
		goto out;
	}

	size = min_t(u32, ipa->size, 4096);
	ipath = init_ipath(size, root, path);
	if (IS_ERR(ipath)) {
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto out;
	}

	ret = paths_from_inode(ipa->inum, ipath);
	if (ret < 0)
		goto out;

	for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4817 4818
		rel_ptr = ipath->fspath->val[i] -
			  (u64)(unsigned long)ipath->fspath->val;
4819
		ipath->fspath->val[i] = rel_ptr;
4820 4821
	}

4822 4823
	ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
			   ipath->fspath, size);
4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856
	if (ret) {
		ret = -EFAULT;
		goto out;
	}

out:
	btrfs_free_path(path);
	free_ipath(ipath);
	kfree(ipa);

	return ret;
}

static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
{
	struct btrfs_data_container *inodes = ctx;
	const size_t c = 3 * sizeof(u64);

	if (inodes->bytes_left >= c) {
		inodes->bytes_left -= c;
		inodes->val[inodes->elem_cnt] = inum;
		inodes->val[inodes->elem_cnt + 1] = offset;
		inodes->val[inodes->elem_cnt + 2] = root;
		inodes->elem_cnt += 3;
	} else {
		inodes->bytes_missing += c - inodes->bytes_left;
		inodes->bytes_left = 0;
		inodes->elem_missed += 3;
	}

	return 0;
}

4857
static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4858
					void __user *arg, int version)
4859 4860 4861 4862 4863 4864
{
	int ret = 0;
	int size;
	struct btrfs_ioctl_logical_ino_args *loi;
	struct btrfs_data_container *inodes = NULL;
	struct btrfs_path *path = NULL;
4865
	bool ignore_offset;
4866 4867 4868 4869 4870

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	loi = memdup_user(arg, sizeof(*loi));
4871 4872
	if (IS_ERR(loi))
		return PTR_ERR(loi);
4873

4874 4875
	if (version == 1) {
		ignore_offset = false;
4876
		size = min_t(u32, loi->size, SZ_64K);
4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888
	} else {
		/* All reserved bits must be 0 for now */
		if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
			ret = -EINVAL;
			goto out_loi;
		}
		/* Only accept flags we have defined so far */
		if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
			ret = -EINVAL;
			goto out_loi;
		}
		ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
4889
		size = min_t(u32, loi->size, SZ_16M);
4890 4891
	}

4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904
	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	inodes = init_data_container(size);
	if (IS_ERR(inodes)) {
		ret = PTR_ERR(inodes);
		inodes = NULL;
		goto out;
	}

4905
	ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4906
					  build_ino_list, inodes, ignore_offset);
L
Liu Bo 已提交
4907
	if (ret == -EINVAL)
4908 4909 4910 4911
		ret = -ENOENT;
	if (ret < 0)
		goto out;

4912 4913
	ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
			   size);
4914 4915 4916 4917 4918
	if (ret)
		ret = -EFAULT;

out:
	btrfs_free_path(path);
4919
	kvfree(inodes);
4920
out_loi:
4921 4922 4923 4924 4925
	kfree(loi);

	return ret;
}

4926
void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
4927 4928 4929 4930 4931 4932
			       struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	bargs->flags = bctl->flags;

4933
	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags))
4934 4935 4936
		bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
	if (atomic_read(&fs_info->balance_pause_req))
		bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4937 4938
	if (atomic_read(&fs_info->balance_cancel_req))
		bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4939

4940 4941 4942
	memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
	memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
	memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4943

4944 4945 4946
	spin_lock(&fs_info->balance_lock);
	memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
	spin_unlock(&fs_info->balance_lock);
4947 4948
}

4949
static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4950
{
A
Al Viro 已提交
4951
	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4952 4953 4954
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_ioctl_balance_args *bargs;
	struct btrfs_balance_control *bctl;
4955
	bool need_unlock; /* for mut. excl. ops lock */
4956 4957 4958 4959 4960
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4961
	ret = mnt_want_write_file(file);
4962 4963 4964
	if (ret)
		return ret;

4965
again:
4966
	if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
4967 4968 4969 4970 4971 4972
		mutex_lock(&fs_info->balance_mutex);
		need_unlock = true;
		goto locked;
	}

	/*
4973
	 * mut. excl. ops lock is locked.  Three possibilities:
4974 4975 4976 4977
	 *   (1) some other op is running
	 *   (2) balance is running
	 *   (3) balance is paused -- special case (think resume)
	 */
4978
	mutex_lock(&fs_info->balance_mutex);
4979 4980
	if (fs_info->balance_ctl) {
		/* this is either (2) or (3) */
4981
		if (!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4982
			mutex_unlock(&fs_info->balance_mutex);
4983 4984 4985 4986
			/*
			 * Lock released to allow other waiters to continue,
			 * we'll reexamine the status again.
			 */
4987 4988 4989
			mutex_lock(&fs_info->balance_mutex);

			if (fs_info->balance_ctl &&
4990
			    !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006
				/* this is (3) */
				need_unlock = false;
				goto locked;
			}

			mutex_unlock(&fs_info->balance_mutex);
			goto again;
		} else {
			/* this is (2) */
			mutex_unlock(&fs_info->balance_mutex);
			ret = -EINPROGRESS;
			goto out;
		}
	} else {
		/* this is (1) */
		mutex_unlock(&fs_info->balance_mutex);
5007
		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
5008 5009 5010 5011
		goto out;
	}

locked:
5012
	BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
5013 5014 5015 5016 5017

	if (arg) {
		bargs = memdup_user(arg, sizeof(*bargs));
		if (IS_ERR(bargs)) {
			ret = PTR_ERR(bargs);
5018
			goto out_unlock;
5019
		}
5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033

		if (bargs->flags & BTRFS_BALANCE_RESUME) {
			if (!fs_info->balance_ctl) {
				ret = -ENOTCONN;
				goto out_bargs;
			}

			bctl = fs_info->balance_ctl;
			spin_lock(&fs_info->balance_lock);
			bctl->flags |= BTRFS_BALANCE_RESUME;
			spin_unlock(&fs_info->balance_lock);

			goto do_balance;
		}
5034 5035 5036 5037
	} else {
		bargs = NULL;
	}

5038
	if (fs_info->balance_ctl) {
5039 5040 5041 5042
		ret = -EINPROGRESS;
		goto out_bargs;
	}

5043
	bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054
	if (!bctl) {
		ret = -ENOMEM;
		goto out_bargs;
	}

	if (arg) {
		memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
		memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
		memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));

		bctl->flags = bargs->flags;
5055 5056 5057
	} else {
		/* balance everything - no filters */
		bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
5058 5059
	}

5060 5061
	if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
		ret = -EINVAL;
5062
		goto out_bctl;
5063 5064
	}

5065
do_balance:
5066
	/*
5067 5068 5069 5070
	 * Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP goes to
	 * btrfs_balance.  bctl is freed in reset_balance_state, or, if
	 * restriper was paused all the way until unmount, in free_fs_info.
	 * The flag should be cleared after reset_balance_state.
5071
	 */
5072 5073
	need_unlock = false;

5074
	ret = btrfs_balance(fs_info, bctl, bargs);
5075
	bctl = NULL;
5076

5077 5078 5079 5080 5081
	if (arg) {
		if (copy_to_user(arg, bargs, sizeof(*bargs)))
			ret = -EFAULT;
	}

5082 5083
out_bctl:
	kfree(bctl);
5084 5085
out_bargs:
	kfree(bargs);
5086
out_unlock:
5087
	mutex_unlock(&fs_info->balance_mutex);
5088
	if (need_unlock)
5089
		clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
5090
out:
5091
	mnt_drop_write_file(file);
5092 5093 5094
	return ret;
}

5095
static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
5096 5097 5098 5099 5100 5101
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	switch (cmd) {
	case BTRFS_BALANCE_CTL_PAUSE:
5102
		return btrfs_pause_balance(fs_info);
5103
	case BTRFS_BALANCE_CTL_CANCEL:
5104
		return btrfs_cancel_balance(fs_info);
5105 5106 5107 5108 5109
	}

	return -EINVAL;
}

5110
static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124
					 void __user *arg)
{
	struct btrfs_ioctl_balance_args *bargs;
	int ret = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		ret = -ENOTCONN;
		goto out;
	}

5125
	bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
5126 5127 5128 5129 5130
	if (!bargs) {
		ret = -ENOMEM;
		goto out;
	}

5131
	btrfs_update_ioctl_balance_args(fs_info, bargs);
5132 5133 5134 5135 5136 5137 5138 5139 5140 5141

	if (copy_to_user(arg, bargs, sizeof(*bargs)))
		ret = -EFAULT;

	kfree(bargs);
out:
	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

5142
static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
A
Arne Jansen 已提交
5143
{
5144 5145
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
A
Arne Jansen 已提交
5146 5147 5148 5149 5150 5151
	struct btrfs_ioctl_quota_ctl_args *sa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

5152 5153 5154
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
5155 5156

	sa = memdup_user(arg, sizeof(*sa));
5157 5158 5159 5160
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
5161

5162
	down_write(&fs_info->subvol_sem);
A
Arne Jansen 已提交
5163 5164 5165

	switch (sa->cmd) {
	case BTRFS_QUOTA_CTL_ENABLE:
5166
		ret = btrfs_quota_enable(fs_info);
A
Arne Jansen 已提交
5167 5168
		break;
	case BTRFS_QUOTA_CTL_DISABLE:
5169
		ret = btrfs_quota_disable(fs_info);
A
Arne Jansen 已提交
5170 5171 5172 5173 5174 5175 5176
		break;
	default:
		ret = -EINVAL;
		break;
	}

	kfree(sa);
5177
	up_write(&fs_info->subvol_sem);
5178 5179
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
5180 5181 5182
	return ret;
}

5183
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
A
Arne Jansen 已提交
5184
{
5185 5186 5187
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
5188 5189 5190 5191 5192 5193 5194 5195
	struct btrfs_ioctl_qgroup_assign_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

5196 5197 5198
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
5199 5200

	sa = memdup_user(arg, sizeof(*sa));
5201 5202 5203 5204
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
5205 5206 5207 5208 5209 5210 5211 5212

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	if (sa->assign) {
5213
		ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
A
Arne Jansen 已提交
5214
	} else {
5215
		ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
A
Arne Jansen 已提交
5216 5217
	}

5218
	/* update qgroup status and info */
5219
	err = btrfs_run_qgroups(trans);
5220
	if (err < 0)
5221 5222
		btrfs_handle_fs_error(fs_info, err,
				      "failed to update qgroup status and info");
5223
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
5224 5225 5226 5227 5228
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
5229 5230
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
5231 5232 5233
	return ret;
}

5234
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
A
Arne Jansen 已提交
5235
{
5236 5237
	struct inode *inode = file_inode(file);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
5238 5239 5240 5241 5242 5243 5244 5245
	struct btrfs_ioctl_qgroup_create_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

5246 5247 5248
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
5249 5250

	sa = memdup_user(arg, sizeof(*sa));
5251 5252 5253 5254
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
5255

M
Miao Xie 已提交
5256 5257 5258 5259 5260
	if (!sa->qgroupid) {
		ret = -EINVAL;
		goto out;
	}

A
Arne Jansen 已提交
5261 5262 5263 5264 5265 5266 5267
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	if (sa->create) {
5268
		ret = btrfs_create_qgroup(trans, sa->qgroupid);
A
Arne Jansen 已提交
5269
	} else {
5270
		ret = btrfs_remove_qgroup(trans, sa->qgroupid);
A
Arne Jansen 已提交
5271 5272
	}

5273
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
5274 5275 5276 5277 5278
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
5279 5280
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
5281 5282 5283
	return ret;
}

5284
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
A
Arne Jansen 已提交
5285
{
5286 5287
	struct inode *inode = file_inode(file);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
5288 5289 5290 5291 5292 5293 5294 5295 5296
	struct btrfs_ioctl_qgroup_limit_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;
	u64 qgroupid;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

5297 5298 5299
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
5300 5301

	sa = memdup_user(arg, sizeof(*sa));
5302 5303 5304 5305
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	qgroupid = sa->qgroupid;
	if (!qgroupid) {
		/* take the current subvol as qgroup */
		qgroupid = root->root_key.objectid;
	}

5319
	ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
A
Arne Jansen 已提交
5320

5321
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
5322 5323 5324 5325 5326
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
5327 5328
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
5329 5330 5331
	return ret;
}

J
Jan Schmidt 已提交
5332 5333
static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
{
5334 5335
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
J
Jan Schmidt 已提交
5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356
	struct btrfs_ioctl_quota_rescan_args *qsa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	qsa = memdup_user(arg, sizeof(*qsa));
	if (IS_ERR(qsa)) {
		ret = PTR_ERR(qsa);
		goto drop_write;
	}

	if (qsa->flags) {
		ret = -EINVAL;
		goto out;
	}

5357
	ret = btrfs_qgroup_rescan(fs_info);
J
Jan Schmidt 已提交
5358 5359 5360 5361 5362 5363 5364 5365 5366 5367

out:
	kfree(qsa);
drop_write:
	mnt_drop_write_file(file);
	return ret;
}

static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
{
5368 5369
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
J
Jan Schmidt 已提交
5370 5371 5372 5373 5374 5375
	struct btrfs_ioctl_quota_rescan_args *qsa;
	int ret = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

5376
	qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
J
Jan Schmidt 已提交
5377 5378 5379
	if (!qsa)
		return -ENOMEM;

5380
	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
J
Jan Schmidt 已提交
5381
		qsa->flags = 1;
5382
		qsa->progress = fs_info->qgroup_rescan_progress.objectid;
J
Jan Schmidt 已提交
5383 5384 5385 5386 5387 5388 5389 5390 5391
	}

	if (copy_to_user(arg, qsa, sizeof(*qsa)))
		ret = -EFAULT;

	kfree(qsa);
	return ret;
}

5392 5393
static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
{
5394 5395
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5396 5397 5398 5399

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

5400
	return btrfs_qgroup_wait_for_completion(fs_info, true);
5401 5402
}

5403 5404
static long _btrfs_ioctl_set_received_subvol(struct file *file,
					    struct btrfs_ioctl_received_subvol_args *sa)
5405
{
A
Al Viro 已提交
5406
	struct inode *inode = file_inode(file);
5407
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5408 5409 5410
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root_item *root_item = &root->root_item;
	struct btrfs_trans_handle *trans;
5411
	struct timespec64 ct = current_time(inode);
5412
	int ret = 0;
5413
	int received_uuid_changed;
5414

5415 5416 5417
	if (!inode_owner_or_capable(inode))
		return -EPERM;

5418 5419 5420 5421
	ret = mnt_want_write_file(file);
	if (ret < 0)
		return ret;

5422
	down_write(&fs_info->subvol_sem);
5423

5424
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
5425 5426 5427 5428 5429 5430 5431 5432 5433
		ret = -EINVAL;
		goto out;
	}

	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
	}

5434 5435 5436 5437 5438
	/*
	 * 1 - root item
	 * 2 - uuid items (received uuid + subvol uuid)
	 */
	trans = btrfs_start_transaction(root, 3);
5439 5440 5441 5442 5443 5444 5445 5446 5447 5448
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		trans = NULL;
		goto out;
	}

	sa->rtransid = trans->transid;
	sa->rtime.sec = ct.tv_sec;
	sa->rtime.nsec = ct.tv_nsec;

5449 5450 5451
	received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
				       BTRFS_UUID_SIZE);
	if (received_uuid_changed &&
5452
	    !btrfs_is_empty_uuid(root_item->received_uuid)) {
5453
		ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
5454 5455 5456 5457 5458 5459 5460 5461
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  root->root_key.objectid);
		if (ret && ret != -ENOENT) {
		        btrfs_abort_transaction(trans, ret);
		        btrfs_end_transaction(trans);
		        goto out;
		}
	}
5462 5463 5464
	memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
	btrfs_set_root_stransid(root_item, sa->stransid);
	btrfs_set_root_rtransid(root_item, sa->rtransid);
5465 5466 5467 5468
	btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
	btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5469

5470
	ret = btrfs_update_root(trans, fs_info->tree_root,
5471 5472
				&root->root_key, &root->root_item);
	if (ret < 0) {
5473
		btrfs_end_transaction(trans);
5474
		goto out;
5475 5476
	}
	if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5477
		ret = btrfs_uuid_tree_add(trans, sa->uuid,
5478 5479 5480
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  root->root_key.objectid);
		if (ret < 0 && ret != -EEXIST) {
5481
			btrfs_abort_transaction(trans, ret);
5482
			btrfs_end_transaction(trans);
5483
			goto out;
5484 5485
		}
	}
5486
	ret = btrfs_commit_transaction(trans);
5487
out:
5488
	up_write(&fs_info->subvol_sem);
5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501
	mnt_drop_write_file(file);
	return ret;
}

#ifdef CONFIG_64BIT
static long btrfs_ioctl_set_received_subvol_32(struct file *file,
						void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
	struct btrfs_ioctl_received_subvol_args *args64 = NULL;
	int ret = 0;

	args32 = memdup_user(arg, sizeof(*args32));
5502 5503
	if (IS_ERR(args32))
		return PTR_ERR(args32);
5504

5505
	args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
5506 5507
	if (!args64) {
		ret = -ENOMEM;
5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550
		goto out;
	}

	memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
	args64->stransid = args32->stransid;
	args64->rtransid = args32->rtransid;
	args64->stime.sec = args32->stime.sec;
	args64->stime.nsec = args32->stime.nsec;
	args64->rtime.sec = args32->rtime.sec;
	args64->rtime.nsec = args32->rtime.nsec;
	args64->flags = args32->flags;

	ret = _btrfs_ioctl_set_received_subvol(file, args64);
	if (ret)
		goto out;

	memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
	args32->stransid = args64->stransid;
	args32->rtransid = args64->rtransid;
	args32->stime.sec = args64->stime.sec;
	args32->stime.nsec = args64->stime.nsec;
	args32->rtime.sec = args64->rtime.sec;
	args32->rtime.nsec = args64->rtime.nsec;
	args32->flags = args64->flags;

	ret = copy_to_user(arg, args32, sizeof(*args32));
	if (ret)
		ret = -EFAULT;

out:
	kfree(args32);
	kfree(args64);
	return ret;
}
#endif

static long btrfs_ioctl_set_received_subvol(struct file *file,
					    void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args *sa = NULL;
	int ret = 0;

	sa = memdup_user(arg, sizeof(*sa));
5551 5552
	if (IS_ERR(sa))
		return PTR_ERR(sa);
5553 5554 5555 5556 5557 5558

	ret = _btrfs_ioctl_set_received_subvol(file, sa);

	if (ret)
		goto out;

5559 5560 5561 5562 5563 5564 5565 5566 5567
	ret = copy_to_user(arg, sa, sizeof(*sa));
	if (ret)
		ret = -EFAULT;

out:
	kfree(sa);
	return ret;
}

5568 5569
static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
{
5570 5571
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5572
	size_t len;
5573
	int ret;
5574 5575
	char label[BTRFS_LABEL_SIZE];

5576 5577 5578
	spin_lock(&fs_info->super_lock);
	memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
	spin_unlock(&fs_info->super_lock);
5579 5580

	len = strnlen(label, BTRFS_LABEL_SIZE);
5581 5582

	if (len == BTRFS_LABEL_SIZE) {
5583 5584 5585
		btrfs_warn(fs_info,
			   "label is too long, return the first %zu bytes",
			   --len);
5586 5587 5588 5589 5590 5591 5592
	}

	ret = copy_to_user(arg, label, len);

	return ret ? -EFAULT : 0;
}

5593 5594
static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
{
5595 5596 5597 5598
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609
	struct btrfs_trans_handle *trans;
	char label[BTRFS_LABEL_SIZE];
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(label, arg, sizeof(label)))
		return -EFAULT;

	if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5610
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
5611 5612
			  "unable to set label with more than %d bytes",
			  BTRFS_LABEL_SIZE - 1);
5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625
		return -EINVAL;
	}

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_unlock;
	}

5626
	spin_lock(&fs_info->super_lock);
5627
	strcpy(super_block->label, label);
5628
	spin_unlock(&fs_info->super_lock);
5629
	ret = btrfs_commit_transaction(trans);
5630 5631 5632 5633 5634 5635

out_unlock:
	mnt_drop_write_file(file);
	return ret;
}

5636 5637 5638 5639 5640
#define INIT_FEATURE_FLAGS(suffix) \
	{ .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
	  .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
	  .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }

5641
int btrfs_ioctl_get_supported_features(void __user *arg)
5642
{
D
David Sterba 已提交
5643
	static const struct btrfs_ioctl_feature_flags features[3] = {
5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656
		INIT_FEATURE_FLAGS(SUPP),
		INIT_FEATURE_FLAGS(SAFE_SET),
		INIT_FEATURE_FLAGS(SAFE_CLEAR)
	};

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
{
5657 5658 5659
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_super_block *super_block = fs_info->super_copy;
5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671
	struct btrfs_ioctl_feature_flags features;

	features.compat_flags = btrfs_super_compat_flags(super_block);
	features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
	features.incompat_flags = btrfs_super_incompat_flags(super_block);

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

5672
static int check_feature_bits(struct btrfs_fs_info *fs_info,
5673
			      enum btrfs_feature_set set,
5674 5675 5676
			      u64 change_mask, u64 flags, u64 supported_flags,
			      u64 safe_set, u64 safe_clear)
{
5677 5678
	const char *type = btrfs_feature_set_names[set];
	char *names;
5679 5680 5681 5682 5683 5684
	u64 disallowed, unsupported;
	u64 set_mask = flags & change_mask;
	u64 clear_mask = ~flags & change_mask;

	unsupported = set_mask & ~supported_flags;
	if (unsupported) {
5685 5686
		names = btrfs_printable_features(set, unsupported);
		if (names) {
5687 5688 5689
			btrfs_warn(fs_info,
				   "this kernel does not support the %s feature bit%s",
				   names, strchr(names, ',') ? "s" : "");
5690 5691
			kfree(names);
		} else
5692 5693 5694
			btrfs_warn(fs_info,
				   "this kernel does not support %s bits 0x%llx",
				   type, unsupported);
5695 5696 5697 5698 5699
		return -EOPNOTSUPP;
	}

	disallowed = set_mask & ~safe_set;
	if (disallowed) {
5700 5701
		names = btrfs_printable_features(set, disallowed);
		if (names) {
5702 5703 5704
			btrfs_warn(fs_info,
				   "can't set the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
5705 5706
			kfree(names);
		} else
5707 5708 5709
			btrfs_warn(fs_info,
				   "can't set %s bits 0x%llx while mounted",
				   type, disallowed);
5710 5711 5712 5713 5714
		return -EPERM;
	}

	disallowed = clear_mask & ~safe_clear;
	if (disallowed) {
5715 5716
		names = btrfs_printable_features(set, disallowed);
		if (names) {
5717 5718 5719
			btrfs_warn(fs_info,
				   "can't clear the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
5720 5721
			kfree(names);
		} else
5722 5723 5724
			btrfs_warn(fs_info,
				   "can't clear %s bits 0x%llx while mounted",
				   type, disallowed);
5725 5726 5727 5728 5729 5730
		return -EPERM;
	}

	return 0;
}

5731 5732
#define check_feature(fs_info, change_mask, flags, mask_base)	\
check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags,	\
5733 5734 5735 5736 5737 5738
		   BTRFS_FEATURE_ ## mask_base ## _SUPP,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_SET,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)

static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
{
5739 5740 5741 5742
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758
	struct btrfs_ioctl_feature_flags flags[2];
	struct btrfs_trans_handle *trans;
	u64 newflags;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(flags, arg, sizeof(flags)))
		return -EFAULT;

	/* Nothing to do */
	if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
	    !flags[0].incompat_flags)
		return 0;

5759
	ret = check_feature(fs_info, flags[0].compat_flags,
5760 5761 5762 5763
			    flags[1].compat_flags, COMPAT);
	if (ret)
		return ret;

5764
	ret = check_feature(fs_info, flags[0].compat_ro_flags,
5765 5766 5767 5768
			    flags[1].compat_ro_flags, COMPAT_RO);
	if (ret)
		return ret;

5769
	ret = check_feature(fs_info, flags[0].incompat_flags,
5770 5771 5772 5773
			    flags[1].incompat_flags, INCOMPAT);
	if (ret)
		return ret;

5774 5775 5776 5777
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

5778
	trans = btrfs_start_transaction(root, 0);
5779 5780 5781 5782
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_drop_write;
	}
5783

5784
	spin_lock(&fs_info->super_lock);
5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798
	newflags = btrfs_super_compat_flags(super_block);
	newflags |= flags[0].compat_flags & flags[1].compat_flags;
	newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
	btrfs_set_super_compat_flags(super_block, newflags);

	newflags = btrfs_super_compat_ro_flags(super_block);
	newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
	newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
	btrfs_set_super_compat_ro_flags(super_block, newflags);

	newflags = btrfs_super_incompat_flags(super_block);
	newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
	newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
	btrfs_set_super_incompat_flags(super_block, newflags);
5799
	spin_unlock(&fs_info->super_lock);
5800

5801
	ret = btrfs_commit_transaction(trans);
5802 5803 5804 5805
out_drop_write:
	mnt_drop_write_file(file);

	return ret;
5806 5807
}

5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842
static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
{
	struct btrfs_ioctl_send_args *arg;
	int ret;

	if (compat) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
		struct btrfs_ioctl_send_args_32 args32;

		ret = copy_from_user(&args32, argp, sizeof(args32));
		if (ret)
			return -EFAULT;
		arg = kzalloc(sizeof(*arg), GFP_KERNEL);
		if (!arg)
			return -ENOMEM;
		arg->send_fd = args32.send_fd;
		arg->clone_sources_count = args32.clone_sources_count;
		arg->clone_sources = compat_ptr(args32.clone_sources);
		arg->parent_root = args32.parent_root;
		arg->flags = args32.flags;
		memcpy(arg->reserved, args32.reserved,
		       sizeof(args32.reserved));
#else
		return -ENOTTY;
#endif
	} else {
		arg = memdup_user(argp, sizeof(*arg));
		if (IS_ERR(arg))
			return PTR_ERR(arg);
	}
	ret = btrfs_ioctl_send(file, arg);
	kfree(arg);
	return ret;
}

C
Christoph Hellwig 已提交
5843 5844 5845
long btrfs_ioctl(struct file *file, unsigned int
		cmd, unsigned long arg)
{
5846 5847 5848
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
5849
	void __user *argp = (void __user *)arg;
C
Christoph Hellwig 已提交
5850 5851

	switch (cmd) {
5852 5853 5854 5855 5856 5857
	case FS_IOC_GETFLAGS:
		return btrfs_ioctl_getflags(file, argp);
	case FS_IOC_SETFLAGS:
		return btrfs_ioctl_setflags(file, argp);
	case FS_IOC_GETVERSION:
		return btrfs_ioctl_getversion(file, argp);
5858 5859
	case FITRIM:
		return btrfs_ioctl_fitrim(file, argp);
C
Christoph Hellwig 已提交
5860
	case BTRFS_IOC_SNAP_CREATE:
5861
		return btrfs_ioctl_snap_create(file, argp, 0);
5862
	case BTRFS_IOC_SNAP_CREATE_V2:
5863
		return btrfs_ioctl_snap_create_v2(file, argp, 0);
5864
	case BTRFS_IOC_SUBVOL_CREATE:
5865
		return btrfs_ioctl_snap_create(file, argp, 1);
A
Arne Jansen 已提交
5866 5867
	case BTRFS_IOC_SUBVOL_CREATE_V2:
		return btrfs_ioctl_snap_create_v2(file, argp, 1);
5868 5869
	case BTRFS_IOC_SNAP_DESTROY:
		return btrfs_ioctl_snap_destroy(file, argp);
5870 5871 5872 5873
	case BTRFS_IOC_SUBVOL_GETFLAGS:
		return btrfs_ioctl_subvol_getflags(file, argp);
	case BTRFS_IOC_SUBVOL_SETFLAGS:
		return btrfs_ioctl_subvol_setflags(file, argp);
5874 5875
	case BTRFS_IOC_DEFAULT_SUBVOL:
		return btrfs_ioctl_default_subvol(file, argp);
C
Christoph Hellwig 已提交
5876
	case BTRFS_IOC_DEFRAG:
C
Chris Mason 已提交
5877 5878 5879
		return btrfs_ioctl_defrag(file, NULL);
	case BTRFS_IOC_DEFRAG_RANGE:
		return btrfs_ioctl_defrag(file, argp);
C
Christoph Hellwig 已提交
5880
	case BTRFS_IOC_RESIZE:
5881
		return btrfs_ioctl_resize(file, argp);
C
Christoph Hellwig 已提交
5882
	case BTRFS_IOC_ADD_DEV:
5883
		return btrfs_ioctl_add_dev(fs_info, argp);
C
Christoph Hellwig 已提交
5884
	case BTRFS_IOC_RM_DEV:
5885
		return btrfs_ioctl_rm_dev(file, argp);
5886 5887
	case BTRFS_IOC_RM_DEV_V2:
		return btrfs_ioctl_rm_dev_v2(file, argp);
J
Jan Schmidt 已提交
5888
	case BTRFS_IOC_FS_INFO:
5889
		return btrfs_ioctl_fs_info(fs_info, argp);
J
Jan Schmidt 已提交
5890
	case BTRFS_IOC_DEV_INFO:
5891
		return btrfs_ioctl_dev_info(fs_info, argp);
C
Christoph Hellwig 已提交
5892
	case BTRFS_IOC_BALANCE:
5893
		return btrfs_ioctl_balance(file, NULL);
5894 5895
	case BTRFS_IOC_TREE_SEARCH:
		return btrfs_ioctl_tree_search(file, argp);
G
Gerhard Heift 已提交
5896 5897
	case BTRFS_IOC_TREE_SEARCH_V2:
		return btrfs_ioctl_tree_search_v2(file, argp);
5898 5899
	case BTRFS_IOC_INO_LOOKUP:
		return btrfs_ioctl_ino_lookup(file, argp);
5900 5901 5902
	case BTRFS_IOC_INO_PATHS:
		return btrfs_ioctl_ino_to_path(root, argp);
	case BTRFS_IOC_LOGICAL_INO:
5903 5904 5905
		return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
	case BTRFS_IOC_LOGICAL_INO_V2:
		return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
J
Josef Bacik 已提交
5906
	case BTRFS_IOC_SPACE_INFO:
5907
		return btrfs_ioctl_space_info(fs_info, argp);
5908 5909 5910
	case BTRFS_IOC_SYNC: {
		int ret;

5911
		ret = btrfs_start_delalloc_roots(fs_info, -1);
5912 5913
		if (ret)
			return ret;
5914
		ret = btrfs_sync_fs(inode->i_sb, 1);
5915 5916
		/*
		 * The transaction thread may want to do more work,
5917
		 * namely it pokes the cleaner kthread that will start
5918 5919
		 * processing uncleaned subvols.
		 */
5920
		wake_up_process(fs_info->transaction_kthread);
5921 5922
		return ret;
	}
5923
	case BTRFS_IOC_START_SYNC:
5924
		return btrfs_ioctl_start_sync(root, argp);
5925
	case BTRFS_IOC_WAIT_SYNC:
5926
		return btrfs_ioctl_wait_sync(fs_info, argp);
J
Jan Schmidt 已提交
5927
	case BTRFS_IOC_SCRUB:
M
Miao Xie 已提交
5928
		return btrfs_ioctl_scrub(file, argp);
J
Jan Schmidt 已提交
5929
	case BTRFS_IOC_SCRUB_CANCEL:
5930
		return btrfs_ioctl_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
5931
	case BTRFS_IOC_SCRUB_PROGRESS:
5932
		return btrfs_ioctl_scrub_progress(fs_info, argp);
5933
	case BTRFS_IOC_BALANCE_V2:
5934
		return btrfs_ioctl_balance(file, argp);
5935
	case BTRFS_IOC_BALANCE_CTL:
5936
		return btrfs_ioctl_balance_ctl(fs_info, arg);
5937
	case BTRFS_IOC_BALANCE_PROGRESS:
5938
		return btrfs_ioctl_balance_progress(fs_info, argp);
5939 5940
	case BTRFS_IOC_SET_RECEIVED_SUBVOL:
		return btrfs_ioctl_set_received_subvol(file, argp);
5941 5942 5943 5944
#ifdef CONFIG_64BIT
	case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
		return btrfs_ioctl_set_received_subvol_32(file, argp);
#endif
5945
	case BTRFS_IOC_SEND:
5946 5947 5948 5949 5950
		return _btrfs_ioctl_send(file, argp, false);
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
	case BTRFS_IOC_SEND_32:
		return _btrfs_ioctl_send(file, argp, true);
#endif
5951
	case BTRFS_IOC_GET_DEV_STATS:
5952
		return btrfs_ioctl_get_dev_stats(fs_info, argp);
A
Arne Jansen 已提交
5953
	case BTRFS_IOC_QUOTA_CTL:
5954
		return btrfs_ioctl_quota_ctl(file, argp);
A
Arne Jansen 已提交
5955
	case BTRFS_IOC_QGROUP_ASSIGN:
5956
		return btrfs_ioctl_qgroup_assign(file, argp);
A
Arne Jansen 已提交
5957
	case BTRFS_IOC_QGROUP_CREATE:
5958
		return btrfs_ioctl_qgroup_create(file, argp);
A
Arne Jansen 已提交
5959
	case BTRFS_IOC_QGROUP_LIMIT:
5960
		return btrfs_ioctl_qgroup_limit(file, argp);
J
Jan Schmidt 已提交
5961 5962 5963 5964
	case BTRFS_IOC_QUOTA_RESCAN:
		return btrfs_ioctl_quota_rescan(file, argp);
	case BTRFS_IOC_QUOTA_RESCAN_STATUS:
		return btrfs_ioctl_quota_rescan_status(file, argp);
5965 5966
	case BTRFS_IOC_QUOTA_RESCAN_WAIT:
		return btrfs_ioctl_quota_rescan_wait(file, argp);
5967
	case BTRFS_IOC_DEV_REPLACE:
5968
		return btrfs_ioctl_dev_replace(fs_info, argp);
5969 5970
	case BTRFS_IOC_GET_FSLABEL:
		return btrfs_ioctl_get_fslabel(file, argp);
5971 5972
	case BTRFS_IOC_SET_FSLABEL:
		return btrfs_ioctl_set_fslabel(file, argp);
5973
	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5974
		return btrfs_ioctl_get_supported_features(argp);
5975 5976 5977 5978
	case BTRFS_IOC_GET_FEATURES:
		return btrfs_ioctl_get_features(file, argp);
	case BTRFS_IOC_SET_FEATURES:
		return btrfs_ioctl_set_features(file, argp);
5979 5980
	case FS_IOC_FSGETXATTR:
		return btrfs_ioctl_fsgetxattr(file, argp);
5981 5982
	case FS_IOC_FSSETXATTR:
		return btrfs_ioctl_fssetxattr(file, argp);
5983 5984
	case BTRFS_IOC_GET_SUBVOL_INFO:
		return btrfs_ioctl_get_subvol_info(file, argp);
5985 5986
	case BTRFS_IOC_GET_SUBVOL_ROOTREF:
		return btrfs_ioctl_get_subvol_rootref(file, argp);
5987 5988
	case BTRFS_IOC_INO_LOOKUP_USER:
		return btrfs_ioctl_ino_lookup_user(file, argp);
C
Christoph Hellwig 已提交
5989 5990 5991 5992
	}

	return -ENOTTY;
}
5993 5994 5995 5996

#ifdef CONFIG_COMPAT
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
5997 5998 5999 6000
	/*
	 * These all access 32-bit values anyway so no further
	 * handling is necessary.
	 */
6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015
	switch (cmd) {
	case FS_IOC32_GETFLAGS:
		cmd = FS_IOC_GETFLAGS;
		break;
	case FS_IOC32_SETFLAGS:
		cmd = FS_IOC_SETFLAGS;
		break;
	case FS_IOC32_GETVERSION:
		cmd = FS_IOC_GETVERSION;
		break;
	}

	return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif