ioctl.c 137.2 KB
Newer Older
C
Christoph Hellwig 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
24
#include <linux/fsnotify.h>
C
Christoph Hellwig 已提交
25 26 27 28 29 30
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
31
#include <linux/mount.h>
C
Christoph Hellwig 已提交
32
#include <linux/mpage.h>
33
#include <linux/namei.h>
C
Christoph Hellwig 已提交
34 35 36 37
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
38
#include <linux/security.h>
C
Christoph Hellwig 已提交
39
#include <linux/xattr.h>
40
#include <linux/vmalloc.h>
41
#include <linux/slab.h>
42
#include <linux/blkdev.h>
43
#include <linux/uuid.h>
44
#include <linux/btrfs.h>
M
Mark Fasheh 已提交
45
#include <linux/uaccess.h>
C
Christoph Hellwig 已提交
46 47 48 49 50 51
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "print-tree.h"
#include "volumes.h"
52
#include "locking.h"
53
#include "inode-map.h"
54
#include "backref.h"
55
#include "rcu-string.h"
56
#include "send.h"
57
#include "dev-replace.h"
58
#include "props.h"
59
#include "sysfs.h"
J
Josef Bacik 已提交
60
#include "qgroup.h"
61
#include "tree-log.h"
62
#include "compression.h"
C
Christoph Hellwig 已提交
63

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
#ifdef CONFIG_64BIT
/* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
 * structures are incorrect, as the timespec structure from userspace
 * is 4 bytes too small. We define these alternatives here to teach
 * the kernel about the 32-bit struct packing.
 */
struct btrfs_ioctl_timespec_32 {
	__u64 sec;
	__u32 nsec;
} __attribute__ ((__packed__));

struct btrfs_ioctl_received_subvol_args_32 {
	char	uuid[BTRFS_UUID_SIZE];	/* in */
	__u64	stransid;		/* in */
	__u64	rtransid;		/* out */
	struct btrfs_ioctl_timespec_32 stime; /* in */
	struct btrfs_ioctl_timespec_32 rtime; /* out */
	__u64	flags;			/* in */
	__u64	reserved[16];		/* in */
} __attribute__ ((__packed__));

#define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
				struct btrfs_ioctl_received_subvol_args_32)
#endif


M
Mark Fasheh 已提交
90
static int btrfs_clone(struct inode *src, struct inode *inode,
91 92
		       u64 off, u64 olen, u64 olen_aligned, u64 destoff,
		       int no_time_update);
M
Mark Fasheh 已提交
93

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
{
	if (S_ISDIR(mode))
		return flags;
	else if (S_ISREG(mode))
		return flags & ~FS_DIRSYNC_FL;
	else
		return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
}

/*
 * Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl.
 */
static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
{
	unsigned int iflags = 0;

	if (flags & BTRFS_INODE_SYNC)
		iflags |= FS_SYNC_FL;
	if (flags & BTRFS_INODE_IMMUTABLE)
		iflags |= FS_IMMUTABLE_FL;
	if (flags & BTRFS_INODE_APPEND)
		iflags |= FS_APPEND_FL;
	if (flags & BTRFS_INODE_NODUMP)
		iflags |= FS_NODUMP_FL;
	if (flags & BTRFS_INODE_NOATIME)
		iflags |= FS_NOATIME_FL;
	if (flags & BTRFS_INODE_DIRSYNC)
		iflags |= FS_DIRSYNC_FL;
L
Li Zefan 已提交
124 125 126
	if (flags & BTRFS_INODE_NODATACOW)
		iflags |= FS_NOCOW_FL;

127
	if (flags & BTRFS_INODE_NOCOMPRESS)
L
Li Zefan 已提交
128
		iflags |= FS_NOCOMP_FL;
129 130
	else if (flags & BTRFS_INODE_COMPRESS)
		iflags |= FS_COMPR_FL;
131 132 133 134 135 136 137 138 139 140

	return iflags;
}

/*
 * Update inode->i_flags based on the btrfs internal flags.
 */
void btrfs_update_iflags(struct inode *inode)
{
	struct btrfs_inode *ip = BTRFS_I(inode);
141
	unsigned int new_fl = 0;
142 143

	if (ip->flags & BTRFS_INODE_SYNC)
144
		new_fl |= S_SYNC;
145
	if (ip->flags & BTRFS_INODE_IMMUTABLE)
146
		new_fl |= S_IMMUTABLE;
147
	if (ip->flags & BTRFS_INODE_APPEND)
148
		new_fl |= S_APPEND;
149
	if (ip->flags & BTRFS_INODE_NOATIME)
150
		new_fl |= S_NOATIME;
151
	if (ip->flags & BTRFS_INODE_DIRSYNC)
152 153 154 155 156
		new_fl |= S_DIRSYNC;

	set_mask_bits(&inode->i_flags,
		      S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
		      new_fl);
157 158 159 160 161
}

/*
 * Inherit flags from the parent inode.
 *
162
 * Currently only the compression flags and the cow flags are inherited.
163 164 165
 */
void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
{
166 167 168 169 170 171
	unsigned int flags;

	if (!dir)
		return;

	flags = BTRFS_I(dir)->flags;
172

173 174 175 176 177 178 179 180
	if (flags & BTRFS_INODE_NOCOMPRESS) {
		BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
		BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
	} else if (flags & BTRFS_INODE_COMPRESS) {
		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
		BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
	}

L
Liu Bo 已提交
181
	if (flags & BTRFS_INODE_NODATACOW) {
182
		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
L
Liu Bo 已提交
183 184 185
		if (S_ISREG(inode->i_mode))
			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
	}
186 187 188 189 190 191

	btrfs_update_iflags(inode);
}

static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
{
A
Al Viro 已提交
192
	struct btrfs_inode *ip = BTRFS_I(file_inode(file));
193 194 195 196 197 198 199
	unsigned int flags = btrfs_flags_to_ioctl(ip->flags);

	if (copy_to_user(arg, &flags, sizeof(flags)))
		return -EFAULT;
	return 0;
}

200 201 202 203 204
static int check_flags(unsigned int flags)
{
	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
		      FS_NOATIME_FL | FS_NODUMP_FL | \
		      FS_SYNC_FL | FS_DIRSYNC_FL | \
L
Li Zefan 已提交
205 206
		      FS_NOCOMP_FL | FS_COMPR_FL |
		      FS_NOCOW_FL))
207 208 209 210 211 212 213 214
		return -EOPNOTSUPP;

	if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
		return -EINVAL;

	return 0;
}

215 216
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
A
Al Viro 已提交
217
	struct inode *inode = file_inode(file);
218
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
219 220 221 222 223
	struct btrfs_inode *ip = BTRFS_I(inode);
	struct btrfs_root *root = ip->root;
	struct btrfs_trans_handle *trans;
	unsigned int flags, oldflags;
	int ret;
224 225
	u64 ip_oldflags;
	unsigned int i_oldflags;
226
	umode_t mode;
227

228 229 230
	if (!inode_owner_or_capable(inode))
		return -EPERM;

L
Li Zefan 已提交
231 232 233
	if (btrfs_root_readonly(root))
		return -EROFS;

234 235 236
	if (copy_from_user(&flags, arg, sizeof(flags)))
		return -EFAULT;

237 238 239
	ret = check_flags(flags);
	if (ret)
		return ret;
C
Christoph Hellwig 已提交
240

241 242 243 244
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

A
Al Viro 已提交
245
	inode_lock(inode);
246

247 248
	ip_oldflags = ip->flags;
	i_oldflags = inode->i_flags;
249
	mode = inode->i_mode;
250

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
	flags = btrfs_mask_flags(inode->i_mode, flags);
	oldflags = btrfs_flags_to_ioctl(ip->flags);
	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
		if (!capable(CAP_LINUX_IMMUTABLE)) {
			ret = -EPERM;
			goto out_unlock;
		}
	}

	if (flags & FS_SYNC_FL)
		ip->flags |= BTRFS_INODE_SYNC;
	else
		ip->flags &= ~BTRFS_INODE_SYNC;
	if (flags & FS_IMMUTABLE_FL)
		ip->flags |= BTRFS_INODE_IMMUTABLE;
	else
		ip->flags &= ~BTRFS_INODE_IMMUTABLE;
	if (flags & FS_APPEND_FL)
		ip->flags |= BTRFS_INODE_APPEND;
	else
		ip->flags &= ~BTRFS_INODE_APPEND;
	if (flags & FS_NODUMP_FL)
		ip->flags |= BTRFS_INODE_NODUMP;
	else
		ip->flags &= ~BTRFS_INODE_NODUMP;
	if (flags & FS_NOATIME_FL)
		ip->flags |= BTRFS_INODE_NOATIME;
	else
		ip->flags &= ~BTRFS_INODE_NOATIME;
	if (flags & FS_DIRSYNC_FL)
		ip->flags |= BTRFS_INODE_DIRSYNC;
	else
		ip->flags &= ~BTRFS_INODE_DIRSYNC;
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
	if (flags & FS_NOCOW_FL) {
		if (S_ISREG(mode)) {
			/*
			 * It's safe to turn csums off here, no extents exist.
			 * Otherwise we want the flag to reflect the real COW
			 * status of the file and will not set it.
			 */
			if (inode->i_size == 0)
				ip->flags |= BTRFS_INODE_NODATACOW
					   | BTRFS_INODE_NODATASUM;
		} else {
			ip->flags |= BTRFS_INODE_NODATACOW;
		}
	} else {
		/*
299
		 * Revert back under same assumptions as above
300 301 302 303 304 305 306 307 308
		 */
		if (S_ISREG(mode)) {
			if (inode->i_size == 0)
				ip->flags &= ~(BTRFS_INODE_NODATACOW
				             | BTRFS_INODE_NODATASUM);
		} else {
			ip->flags &= ~BTRFS_INODE_NODATACOW;
		}
	}
309

310 311 312 313 314 315 316 317
	/*
	 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
	 * flag may be changed automatically if compression code won't make
	 * things smaller.
	 */
	if (flags & FS_NOCOMP_FL) {
		ip->flags &= ~BTRFS_INODE_COMPRESS;
		ip->flags |= BTRFS_INODE_NOCOMPRESS;
318 319 320 321

		ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
		if (ret && ret != -ENODATA)
			goto out_drop;
322
	} else if (flags & FS_COMPR_FL) {
323 324
		const char *comp;

325 326
		ip->flags |= BTRFS_INODE_COMPRESS;
		ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
327

328
		if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
329 330 331 332 333 334 335 336
			comp = "lzo";
		else
			comp = "zlib";
		ret = btrfs_set_prop(inode, "btrfs.compression",
				     comp, strlen(comp), 0);
		if (ret)
			goto out_drop;

L
Li Zefan 已提交
337
	} else {
338 339 340
		ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
		if (ret && ret != -ENODATA)
			goto out_drop;
L
Li Zefan 已提交
341
		ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
342
	}
343

344
	trans = btrfs_start_transaction(root, 1);
345 346 347 348
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_drop;
	}
349

350
	btrfs_update_iflags(inode);
351
	inode_inc_iversion(inode);
352
	inode->i_ctime = current_time(inode);
353 354
	ret = btrfs_update_inode(trans, root, inode);

355
	btrfs_end_transaction(trans);
356 357 358 359 360
 out_drop:
	if (ret) {
		ip->flags = ip_oldflags;
		inode->i_flags = i_oldflags;
	}
361 362

 out_unlock:
A
Al Viro 已提交
363
	inode_unlock(inode);
364
	mnt_drop_write_file(file);
365
	return ret;
366 367 368 369
}

static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
{
A
Al Viro 已提交
370
	struct inode *inode = file_inode(file);
371 372 373

	return put_user(inode->i_generation, arg);
}
C
Christoph Hellwig 已提交
374

375 376
static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
{
377 378
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
379 380 381 382 383
	struct btrfs_device *device;
	struct request_queue *q;
	struct fstrim_range range;
	u64 minlen = ULLONG_MAX;
	u64 num_devices = 0;
384
	u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
385 386 387 388 389
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

390 391 392
	rcu_read_lock();
	list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
				dev_list) {
393 394 395 396 397 398 399 400 401
		if (!device->bdev)
			continue;
		q = bdev_get_queue(device->bdev);
		if (blk_queue_discard(q)) {
			num_devices++;
			minlen = min((u64)q->limits.discard_granularity,
				     minlen);
		}
	}
402
	rcu_read_unlock();
403

404 405 406 407
	if (!num_devices)
		return -EOPNOTSUPP;
	if (copy_from_user(&range, arg, sizeof(range)))
		return -EFAULT;
408 409
	if (range.start > total_bytes ||
	    range.len < fs_info->sb->s_blocksize)
410
		return -EINVAL;
411

412
	range.len = min(range.len, total_bytes - range.start);
413
	range.minlen = max(range.minlen, minlen);
414
	ret = btrfs_trim_fs(fs_info, &range);
415 416 417 418 419 420 421 422 423
	if (ret < 0)
		return ret;

	if (copy_to_user(arg, &range, sizeof(range)))
		return -EFAULT;

	return 0;
}

424 425
int btrfs_is_empty_uuid(u8 *uuid)
{
C
Chris Mason 已提交
426 427 428 429 430 431 432
	int i;

	for (i = 0; i < BTRFS_UUID_SIZE; i++) {
		if (uuid[i])
			return 0;
	}
	return 1;
433 434
}

435
static noinline int create_subvol(struct inode *dir,
436
				  struct dentry *dentry,
S
Sage Weil 已提交
437
				  char *name, int namelen,
A
Arne Jansen 已提交
438
				  u64 *async_transid,
439
				  struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
440
{
441
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
C
Christoph Hellwig 已提交
442 443
	struct btrfs_trans_handle *trans;
	struct btrfs_key key;
444
	struct btrfs_root_item *root_item;
C
Christoph Hellwig 已提交
445 446
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
447
	struct btrfs_root *root = BTRFS_I(dir)->root;
448
	struct btrfs_root *new_root;
449
	struct btrfs_block_rsv block_rsv;
450
	struct timespec cur_time = current_time(dir);
451
	struct inode *inode;
C
Christoph Hellwig 已提交
452 453 454 455
	int ret;
	int err;
	u64 objectid;
	u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
456
	u64 index = 0;
457
	u64 qgroup_reserved;
458
	uuid_le new_uuid;
C
Christoph Hellwig 已提交
459

460 461 462 463
	root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
	if (!root_item)
		return -ENOMEM;

464
	ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
465
	if (ret)
466
		goto fail_free;
467

468 469
	/*
	 * Don't create subvolume whose level is not zero. Or qgroup will be
470
	 * screwed up since it assumes subvolume qgroup's level to be 0.
471
	 */
472 473 474 475
	if (btrfs_qgroup_level(objectid)) {
		ret = -ENOSPC;
		goto fail_free;
	}
476

477
	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
J
Josef Bacik 已提交
478
	/*
479 480
	 * The same as the snapshot creation, please see the comment
	 * of create_snapshot().
J
Josef Bacik 已提交
481
	 */
482
	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
483
					       8, &qgroup_reserved, false);
484
	if (ret)
485
		goto fail_free;
486 487 488 489

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
490
		btrfs_subvolume_release_metadata(fs_info, &block_rsv,
491
						 qgroup_reserved);
492
		goto fail_free;
493 494 495
	}
	trans->block_rsv = &block_rsv;
	trans->bytes_reserved = block_rsv.size;
C
Christoph Hellwig 已提交
496

497
	ret = btrfs_qgroup_inherit(trans, fs_info, 0, objectid, inherit);
A
Arne Jansen 已提交
498 499 500
	if (ret)
		goto fail;

501
	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
502 503 504 505
	if (IS_ERR(leaf)) {
		ret = PTR_ERR(leaf);
		goto fail;
	}
C
Christoph Hellwig 已提交
506

507
	memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
C
Christoph Hellwig 已提交
508 509
	btrfs_set_header_bytenr(leaf, leaf->start);
	btrfs_set_header_generation(leaf, trans->transid);
510
	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
C
Christoph Hellwig 已提交
511 512
	btrfs_set_header_owner(leaf, objectid);

513 514
	write_extent_buffer_fsid(leaf, fs_info->fsid);
	write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
C
Christoph Hellwig 已提交
515 516
	btrfs_mark_buffer_dirty(leaf);

517
	inode_item = &root_item->inode;
518 519 520
	btrfs_set_stack_inode_generation(inode_item, 1);
	btrfs_set_stack_inode_size(inode_item, 3);
	btrfs_set_stack_inode_nlink(inode_item, 1);
521
	btrfs_set_stack_inode_nbytes(inode_item,
522
				     fs_info->nodesize);
523
	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
C
Christoph Hellwig 已提交
524

525 526
	btrfs_set_root_flags(root_item, 0);
	btrfs_set_root_limit(root_item, 0);
527
	btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
528

529 530 531 532 533 534
	btrfs_set_root_bytenr(root_item, leaf->start);
	btrfs_set_root_generation(root_item, trans->transid);
	btrfs_set_root_level(root_item, 0);
	btrfs_set_root_refs(root_item, 1);
	btrfs_set_root_used(root_item, leaf->len);
	btrfs_set_root_last_snapshot(root_item, 0);
C
Christoph Hellwig 已提交
535

536 537
	btrfs_set_root_generation_v2(root_item,
			btrfs_root_generation(root_item));
538
	uuid_le_gen(&new_uuid);
539 540 541 542 543 544
	memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
	btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
	btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
	root_item->ctime = root_item->otime;
	btrfs_set_root_ctransid(root_item, trans->transid);
	btrfs_set_root_otransid(root_item, trans->transid);
C
Christoph Hellwig 已提交
545

546
	btrfs_tree_unlock(leaf);
C
Christoph Hellwig 已提交
547 548 549
	free_extent_buffer(leaf);
	leaf = NULL;

550
	btrfs_set_root_dirid(root_item, new_dirid);
C
Christoph Hellwig 已提交
551 552

	key.objectid = objectid;
553
	key.offset = 0;
554
	key.type = BTRFS_ROOT_ITEM_KEY;
555
	ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
556
				root_item);
C
Christoph Hellwig 已提交
557 558 559
	if (ret)
		goto fail;

560
	key.offset = (u64)-1;
561
	new_root = btrfs_read_fs_root_no_name(fs_info, &key);
562 563
	if (IS_ERR(new_root)) {
		ret = PTR_ERR(new_root);
564
		btrfs_abort_transaction(trans, ret);
565 566
		goto fail;
	}
567 568 569

	btrfs_record_root_in_trans(trans, new_root);

570
	ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
571 572
	if (ret) {
		/* We potentially lose an unused inode item here */
573
		btrfs_abort_transaction(trans, ret);
574 575 576
		goto fail;
	}

577 578 579 580
	mutex_lock(&new_root->objectid_mutex);
	new_root->highest_objectid = new_dirid;
	mutex_unlock(&new_root->objectid_mutex);

C
Christoph Hellwig 已提交
581 582 583
	/*
	 * insert the directory item
	 */
584
	ret = btrfs_set_inode_index(dir, &index);
585
	if (ret) {
586
		btrfs_abort_transaction(trans, ret);
587 588
		goto fail;
	}
589 590

	ret = btrfs_insert_dir_item(trans, root,
591
				    name, namelen, dir, &key,
592
				    BTRFS_FT_DIR, index);
593
	if (ret) {
594
		btrfs_abort_transaction(trans, ret);
C
Christoph Hellwig 已提交
595
		goto fail;
596
	}
597

598 599 600 601
	btrfs_i_size_write(dir, dir->i_size + namelen * 2);
	ret = btrfs_update_inode(trans, root, dir);
	BUG_ON(ret);

602
	ret = btrfs_add_root_ref(trans, fs_info,
603
				 objectid, root->root_key.objectid,
L
Li Zefan 已提交
604
				 btrfs_ino(dir), index, name, namelen);
605
	BUG_ON(ret);
C
Christoph Hellwig 已提交
606

607
	ret = btrfs_uuid_tree_add(trans, fs_info, root_item->uuid,
608
				  BTRFS_UUID_KEY_SUBVOL, objectid);
609
	if (ret)
610
		btrfs_abort_transaction(trans, ret);
611

C
Christoph Hellwig 已提交
612
fail:
613
	kfree(root_item);
614 615
	trans->block_rsv = NULL;
	trans->bytes_reserved = 0;
616
	btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
617

S
Sage Weil 已提交
618 619
	if (async_transid) {
		*async_transid = trans->transid;
620
		err = btrfs_commit_transaction_async(trans, 1);
621
		if (err)
622
			err = btrfs_commit_transaction(trans);
S
Sage Weil 已提交
623
	} else {
624
		err = btrfs_commit_transaction(trans);
S
Sage Weil 已提交
625
	}
C
Christoph Hellwig 已提交
626 627
	if (err && !ret)
		ret = err;
628

629 630
	if (!ret) {
		inode = btrfs_lookup_dentry(dir, dentry);
631 632
		if (IS_ERR(inode))
			return PTR_ERR(inode);
633 634
		d_instantiate(dentry, inode);
	}
C
Christoph Hellwig 已提交
635
	return ret;
636 637 638 639

fail_free:
	kfree(root_item);
	return ret;
C
Christoph Hellwig 已提交
640 641
}

642
static void btrfs_wait_for_no_snapshoting_writes(struct btrfs_root *root)
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
{
	s64 writers;
	DEFINE_WAIT(wait);

	do {
		prepare_to_wait(&root->subv_writers->wait, &wait,
				TASK_UNINTERRUPTIBLE);

		writers = percpu_counter_sum(&root->subv_writers->counter);
		if (writers)
			schedule();

		finish_wait(&root->subv_writers->wait, &wait);
	} while (writers);
}

659 660 661 662
static int create_snapshot(struct btrfs_root *root, struct inode *dir,
			   struct dentry *dentry, char *name, int namelen,
			   u64 *async_transid, bool readonly,
			   struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
663
{
664
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
665
	struct inode *inode;
C
Christoph Hellwig 已提交
666 667
	struct btrfs_pending_snapshot *pending_snapshot;
	struct btrfs_trans_handle *trans;
668
	int ret;
C
Christoph Hellwig 已提交
669

670
	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
C
Christoph Hellwig 已提交
671 672
		return -EINVAL;

673 674 675 676
	pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
	if (!pending_snapshot)
		return -ENOMEM;

677 678
	pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
			GFP_NOFS);
679 680
	pending_snapshot->path = btrfs_alloc_path();
	if (!pending_snapshot->root_item || !pending_snapshot->path) {
681 682 683 684
		ret = -ENOMEM;
		goto free_pending;
	}

685
	atomic_inc(&root->will_be_snapshoted);
686
	smp_mb__after_atomic();
687
	btrfs_wait_for_no_snapshoting_writes(root);
688

689 690
	ret = btrfs_start_delalloc_inodes(root, 0);
	if (ret)
691
		goto dec_and_free;
692

693
	btrfs_wait_ordered_extents(root, -1, 0, (u64)-1);
694

695 696
	btrfs_init_block_rsv(&pending_snapshot->block_rsv,
			     BTRFS_BLOCK_RSV_TEMP);
697 698 699 700 701 702
	/*
	 * 1 - parent dir inode
	 * 2 - dir entries
	 * 1 - root item
	 * 2 - root ref/backref
	 * 1 - root of snapshot
703
	 * 1 - UUID item
704 705
	 */
	ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
706
					&pending_snapshot->block_rsv, 8,
707 708
					&pending_snapshot->qgroup_reserved,
					false);
709
	if (ret)
710
		goto dec_and_free;
711

712
	pending_snapshot->dentry = dentry;
C
Christoph Hellwig 已提交
713
	pending_snapshot->root = root;
L
Li Zefan 已提交
714
	pending_snapshot->readonly = readonly;
715
	pending_snapshot->dir = dir;
716
	pending_snapshot->inherit = inherit;
717

718
	trans = btrfs_start_transaction(root, 0);
719 720 721 722 723
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto fail;
	}

724
	spin_lock(&fs_info->trans_lock);
C
Christoph Hellwig 已提交
725 726
	list_add(&pending_snapshot->list,
		 &trans->transaction->pending_snapshots);
727
	spin_unlock(&fs_info->trans_lock);
S
Sage Weil 已提交
728 729
	if (async_transid) {
		*async_transid = trans->transid;
730
		ret = btrfs_commit_transaction_async(trans, 1);
731
		if (ret)
732
			ret = btrfs_commit_transaction(trans);
S
Sage Weil 已提交
733
	} else {
734
		ret = btrfs_commit_transaction(trans);
S
Sage Weil 已提交
735
	}
736
	if (ret)
737
		goto fail;
738 739 740 741 742

	ret = pending_snapshot->error;
	if (ret)
		goto fail;

743 744 745 746
	ret = btrfs_orphan_cleanup(pending_snapshot->snap);
	if (ret)
		goto fail;

747
	inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
748 749 750 751
	if (IS_ERR(inode)) {
		ret = PTR_ERR(inode);
		goto fail;
	}
752

753 754 755
	d_instantiate(dentry, inode);
	ret = 0;
fail:
756
	btrfs_subvolume_release_metadata(fs_info,
757 758
					 &pending_snapshot->block_rsv,
					 pending_snapshot->qgroup_reserved);
759
dec_and_free:
760 761
	if (atomic_dec_and_test(&root->will_be_snapshoted))
		wake_up_atomic_t(&root->will_be_snapshoted);
762 763
free_pending:
	kfree(pending_snapshot->root_item);
764
	btrfs_free_path(pending_snapshot->path);
765 766
	kfree(pending_snapshot);

C
Christoph Hellwig 已提交
767 768 769
	return ret;
}

770 771 772 773 774 775 776 777 778 779 780
/*  copy of may_delete in fs/namei.c()
 *	Check whether we can remove a link victim from directory dir, check
 *  whether the type of victim is right.
 *  1. We can't do it if dir is read-only (done in permission())
 *  2. We should have write and exec permissions on dir
 *  3. We can't remove anything from append-only dir
 *  4. We can't do anything with immutable dir (done in permission())
 *  5. If the sticky bit on dir is set we should either
 *	a. be owner of dir, or
 *	b. be owner of victim, or
 *	c. have CAP_FOWNER capability
781
 *  6. If the victim is append-only or immutable we can't do anything with
782 783 784 785 786 787 788 789
 *     links pointing to it.
 *  7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
 *  8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
 *  9. We can't remove a root or mountpoint.
 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
 *     nfs_async_unlink().
 */

790
static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
791 792 793
{
	int error;

794
	if (d_really_is_negative(victim))
795 796
		return -ENOENT;

797
	BUG_ON(d_inode(victim->d_parent) != dir);
798
	audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
799 800 801 802 803 804

	error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
	if (error)
		return error;
	if (IS_APPEND(dir))
		return -EPERM;
805 806
	if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
	    IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
807 808
		return -EPERM;
	if (isdir) {
809
		if (!d_is_dir(victim))
810 811 812
			return -ENOTDIR;
		if (IS_ROOT(victim))
			return -EBUSY;
813
	} else if (d_is_dir(victim))
814 815 816 817 818 819 820 821
		return -EISDIR;
	if (IS_DEADDIR(dir))
		return -ENOENT;
	if (victim->d_flags & DCACHE_NFSFS_RENAMED)
		return -EBUSY;
	return 0;
}

822 823 824
/* copy of may_create in fs/namei.c() */
static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
{
825
	if (d_really_is_positive(child))
826 827 828 829 830 831 832 833 834 835 836
		return -EEXIST;
	if (IS_DEADDIR(dir))
		return -ENOENT;
	return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}

/*
 * Create a new subvolume below @parent.  This is largely modeled after
 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
 * inside this filesystem so it's quite a bit simpler.
 */
A
Al Viro 已提交
837
static noinline int btrfs_mksubvol(const struct path *parent,
838
				   char *name, int namelen,
S
Sage Weil 已提交
839
				   struct btrfs_root *snap_src,
A
Arne Jansen 已提交
840
				   u64 *async_transid, bool readonly,
841
				   struct btrfs_qgroup_inherit *inherit)
842
{
843 844
	struct inode *dir = d_inode(parent->dentry);
	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
845 846 847
	struct dentry *dentry;
	int error;

848 849 850
	error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (error == -EINTR)
		return error;
851 852 853 854 855 856

	dentry = lookup_one_len(name, parent->dentry, namelen);
	error = PTR_ERR(dentry);
	if (IS_ERR(dentry))
		goto out_unlock;

857
	error = btrfs_may_create(dir, dentry);
858
	if (error)
859
		goto out_dput;
860

C
Chris Mason 已提交
861 862 863 864 865 866 867 868 869 870
	/*
	 * even if this name doesn't exist, we may get hash collisions.
	 * check for them now when we can safely fail
	 */
	error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
					       dir->i_ino, name,
					       namelen);
	if (error)
		goto out_dput;

871
	down_read(&fs_info->subvol_sem);
872 873 874 875

	if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
		goto out_up_read;

876
	if (snap_src) {
877
		error = create_snapshot(snap_src, dir, dentry, name, namelen,
A
Arne Jansen 已提交
878
					async_transid, readonly, inherit);
879
	} else {
880 881
		error = create_subvol(dir, dentry, name, namelen,
				      async_transid, inherit);
882
	}
883 884 885
	if (!error)
		fsnotify_mkdir(dir, dentry);
out_up_read:
886
	up_read(&fs_info->subvol_sem);
887 888 889
out_dput:
	dput(dentry);
out_unlock:
A
Al Viro 已提交
890
	inode_unlock(dir);
891 892 893
	return error;
}

C
Chris Mason 已提交
894 895 896 897 898 899 900
/*
 * When we're defragging a range, we don't want to kick it off again
 * if it is really just waiting for delalloc to send it down.
 * If we find a nice big extent or delalloc range for the bytes in the
 * file you want to defrag, we return 0 to let you know to skip this
 * part of the file
 */
901
static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
C
Chris Mason 已提交
902 903 904 905 906 907 908
{
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct extent_map *em = NULL;
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	u64 end;

	read_lock(&em_tree->lock);
909
	em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
C
Chris Mason 已提交
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
	read_unlock(&em_tree->lock);

	if (em) {
		end = extent_map_end(em);
		free_extent_map(em);
		if (end - offset > thresh)
			return 0;
	}
	/* if we already have a nice delalloc here, just stop */
	thresh /= 2;
	end = count_range_bits(io_tree, &offset, offset + thresh,
			       thresh, EXTENT_DELALLOC, 1);
	if (end >= thresh)
		return 0;
	return 1;
}

/*
 * helper function to walk through a file and find extents
 * newer than a specific transid, and smaller than thresh.
 *
 * This is used by the defragging code to find new and small
 * extents
 */
static int find_new_extents(struct btrfs_root *root,
			    struct inode *inode, u64 newer_than,
936
			    u64 *off, u32 thresh)
C
Chris Mason 已提交
937 938 939 940 941 942 943
{
	struct btrfs_path *path;
	struct btrfs_key min_key;
	struct extent_buffer *leaf;
	struct btrfs_file_extent_item *extent;
	int type;
	int ret;
944
	u64 ino = btrfs_ino(inode);
C
Chris Mason 已提交
945 946 947 948 949

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

950
	min_key.objectid = ino;
C
Chris Mason 已提交
951 952 953
	min_key.type = BTRFS_EXTENT_DATA_KEY;
	min_key.offset = *off;

954
	while (1) {
955
		ret = btrfs_search_forward(root, &min_key, path, newer_than);
C
Chris Mason 已提交
956 957
		if (ret != 0)
			goto none;
958
process_slot:
959
		if (min_key.objectid != ino)
C
Chris Mason 已提交
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
			goto none;
		if (min_key.type != BTRFS_EXTENT_DATA_KEY)
			goto none;

		leaf = path->nodes[0];
		extent = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_file_extent_item);

		type = btrfs_file_extent_type(leaf, extent);
		if (type == BTRFS_FILE_EXTENT_REG &&
		    btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
		    check_defrag_in_cache(inode, min_key.offset, thresh)) {
			*off = min_key.offset;
			btrfs_free_path(path);
			return 0;
		}

977 978 979 980 981 982
		path->slots[0]++;
		if (path->slots[0] < btrfs_header_nritems(leaf)) {
			btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
			goto process_slot;
		}

C
Chris Mason 已提交
983 984 985 986 987 988 989 990 991 992 993
		if (min_key.offset == (u64)-1)
			goto none;

		min_key.offset++;
		btrfs_release_path(path);
	}
none:
	btrfs_free_path(path);
	return -ENOENT;
}

L
Li Zefan 已提交
994
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
995 996
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
L
Li Zefan 已提交
997 998
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
	struct extent_map *em;
999
	u64 len = PAGE_SIZE;
1000

L
Li Zefan 已提交
1001 1002 1003 1004
	/*
	 * hopefully we have this extent in the tree already, try without
	 * the full extent lock
	 */
1005
	read_lock(&em_tree->lock);
L
Li Zefan 已提交
1006
	em = lookup_extent_mapping(em_tree, start, len);
1007 1008
	read_unlock(&em_tree->lock);

L
Li Zefan 已提交
1009
	if (!em) {
1010 1011 1012
		struct extent_state *cached = NULL;
		u64 end = start + len - 1;

L
Li Zefan 已提交
1013
		/* get the big lock and read metadata off disk */
1014
		lock_extent_bits(io_tree, start, end, &cached);
L
Li Zefan 已提交
1015
		em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
1016
		unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
L
Li Zefan 已提交
1017 1018 1019 1020 1021 1022 1023

		if (IS_ERR(em))
			return NULL;
	}

	return em;
}
1024

L
Li Zefan 已提交
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
{
	struct extent_map *next;
	bool ret = true;

	/* this is the last extent */
	if (em->start + em->len >= i_size_read(inode))
		return false;

	next = defrag_lookup_extent(inode, em->start + em->len);
1035 1036 1037
	if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
		ret = false;
	else if ((em->block_start + em->block_len == next->block_start) &&
1038
		 (em->block_len > SZ_128K && next->block_len > SZ_128K))
L
Li Zefan 已提交
1039 1040 1041
		ret = false;

	free_extent_map(next);
1042 1043 1044
	return ret;
}

1045
static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1046 1047
			       u64 *last_len, u64 *skip, u64 *defrag_end,
			       int compress)
1048
{
L
Li Zefan 已提交
1049
	struct extent_map *em;
1050
	int ret = 1;
L
Li Zefan 已提交
1051
	bool next_mergeable = true;
1052
	bool prev_mergeable = true;
1053 1054

	/*
1055
	 * make sure that once we start defragging an extent, we keep on
1056 1057 1058 1059 1060 1061 1062
	 * defragging it
	 */
	if (start < *defrag_end)
		return 1;

	*skip = 0;

L
Li Zefan 已提交
1063 1064 1065
	em = defrag_lookup_extent(inode, start);
	if (!em)
		return 0;
1066 1067

	/* this will cover holes, and inline extents */
1068
	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1069
		ret = 0;
1070 1071 1072
		goto out;
	}

1073 1074 1075
	if (!*defrag_end)
		prev_mergeable = false;

L
Li Zefan 已提交
1076
	next_mergeable = defrag_check_next_extent(inode, em);
1077
	/*
L
Li Zefan 已提交
1078 1079
	 * we hit a real extent, if it is big or the next extent is not a
	 * real extent, don't bother defragging it
1080
	 */
1081
	if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1082
	    (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1083
		ret = 0;
1084
out:
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
	/*
	 * last_len ends up being a counter of how many bytes we've defragged.
	 * every time we choose not to defrag an extent, we reset *last_len
	 * so that the next tiny extent will force a defrag.
	 *
	 * The end result of this is that tiny extents before a single big
	 * extent will force at least part of that big extent to be defragged.
	 */
	if (ret) {
		*defrag_end = extent_map_end(em);
	} else {
		*last_len = 0;
		*skip = extent_map_end(em);
		*defrag_end = 0;
	}

	free_extent_map(em);
	return ret;
}

C
Chris Mason 已提交
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
/*
 * it doesn't do much good to defrag one or two pages
 * at a time.  This pulls in a nice chunk of pages
 * to COW and defrag.
 *
 * It also makes sure the delalloc code has enough
 * dirty data to avoid making new small extents as part
 * of the defrag
 *
 * It's a good idea to start RA on this range
 * before calling this.
 */
static int cluster_pages_for_defrag(struct inode *inode,
				    struct page **pages,
				    unsigned long start_index,
1120
				    unsigned long num_pages)
C
Christoph Hellwig 已提交
1121
{
C
Chris Mason 已提交
1122 1123 1124 1125
	unsigned long file_end;
	u64 isize = i_size_read(inode);
	u64 page_start;
	u64 page_end;
1126
	u64 page_cnt;
C
Chris Mason 已提交
1127 1128 1129
	int ret;
	int i;
	int i_done;
1130
	struct btrfs_ordered_extent *ordered;
C
Chris Mason 已提交
1131
	struct extent_state *cached_state = NULL;
1132
	struct extent_io_tree *tree;
1133
	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
C
Chris Mason 已提交
1134

1135
	file_end = (isize - 1) >> PAGE_SHIFT;
1136 1137 1138 1139
	if (!isize || start_index > file_end)
		return 0;

	page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
C
Chris Mason 已提交
1140 1141

	ret = btrfs_delalloc_reserve_space(inode,
1142 1143
			start_index << PAGE_SHIFT,
			page_cnt << PAGE_SHIFT);
C
Chris Mason 已提交
1144 1145 1146
	if (ret)
		return ret;
	i_done = 0;
1147
	tree = &BTRFS_I(inode)->io_tree;
C
Chris Mason 已提交
1148 1149

	/* step one, lock all the pages */
1150
	for (i = 0; i < page_cnt; i++) {
C
Chris Mason 已提交
1151
		struct page *page;
1152
again:
1153
		page = find_or_create_page(inode->i_mapping,
1154
					   start_index + i, mask);
C
Chris Mason 已提交
1155 1156 1157
		if (!page)
			break;

1158
		page_start = page_offset(page);
1159
		page_end = page_start + PAGE_SIZE - 1;
1160
		while (1) {
1161
			lock_extent_bits(tree, page_start, page_end,
1162
					 &cached_state);
1163 1164
			ordered = btrfs_lookup_ordered_extent(inode,
							      page_start);
1165 1166
			unlock_extent_cached(tree, page_start, page_end,
					     &cached_state, GFP_NOFS);
1167 1168 1169 1170 1171 1172 1173
			if (!ordered)
				break;

			unlock_page(page);
			btrfs_start_ordered_extent(inode, ordered, 1);
			btrfs_put_ordered_extent(ordered);
			lock_page(page);
1174 1175 1176 1177 1178 1179
			/*
			 * we unlocked the page above, so we need check if
			 * it was released or not.
			 */
			if (page->mapping != inode->i_mapping) {
				unlock_page(page);
1180
				put_page(page);
1181 1182
				goto again;
			}
1183 1184
		}

C
Chris Mason 已提交
1185 1186 1187 1188 1189
		if (!PageUptodate(page)) {
			btrfs_readpage(NULL, page);
			lock_page(page);
			if (!PageUptodate(page)) {
				unlock_page(page);
1190
				put_page(page);
C
Chris Mason 已提交
1191 1192 1193 1194
				ret = -EIO;
				break;
			}
		}
1195 1196 1197

		if (page->mapping != inode->i_mapping) {
			unlock_page(page);
1198
			put_page(page);
1199 1200 1201
			goto again;
		}

C
Chris Mason 已提交
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
		pages[i] = page;
		i_done++;
	}
	if (!i_done || ret)
		goto out;

	if (!(inode->i_sb->s_flags & MS_ACTIVE))
		goto out;

	/*
	 * so now we have a nice long stream of locked
	 * and up to date pages, lets wait on them
	 */
	for (i = 0; i < i_done; i++)
		wait_on_page_writeback(pages[i]);

	page_start = page_offset(pages[0]);
1219
	page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
C
Chris Mason 已提交
1220 1221

	lock_extent_bits(&BTRFS_I(inode)->io_tree,
1222
			 page_start, page_end - 1, &cached_state);
C
Chris Mason 已提交
1223 1224
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
			  page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1225 1226
			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
			  &cached_state, GFP_NOFS);
C
Chris Mason 已提交
1227

1228
	if (i_done != page_cnt) {
1229 1230 1231
		spin_lock(&BTRFS_I(inode)->lock);
		BTRFS_I(inode)->outstanding_extents++;
		spin_unlock(&BTRFS_I(inode)->lock);
C
Chris Mason 已提交
1232
		btrfs_delalloc_release_space(inode,
1233 1234
				start_index << PAGE_SHIFT,
				(page_cnt - i_done) << PAGE_SHIFT);
C
Chris Mason 已提交
1235 1236 1237
	}


1238
	set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1239
			  &cached_state);
C
Chris Mason 已提交
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250

	unlock_extent_cached(&BTRFS_I(inode)->io_tree,
			     page_start, page_end - 1, &cached_state,
			     GFP_NOFS);

	for (i = 0; i < i_done; i++) {
		clear_page_dirty_for_io(pages[i]);
		ClearPageChecked(pages[i]);
		set_page_extent_mapped(pages[i]);
		set_page_dirty(pages[i]);
		unlock_page(pages[i]);
1251
		put_page(pages[i]);
C
Chris Mason 已提交
1252 1253 1254 1255 1256
	}
	return i_done;
out:
	for (i = 0; i < i_done; i++) {
		unlock_page(pages[i]);
1257
		put_page(pages[i]);
C
Chris Mason 已提交
1258
	}
1259
	btrfs_delalloc_release_space(inode,
1260 1261
			start_index << PAGE_SHIFT,
			page_cnt << PAGE_SHIFT);
C
Chris Mason 已提交
1262 1263 1264 1265 1266 1267 1268 1269
	return ret;

}

int btrfs_defrag_file(struct inode *inode, struct file *file,
		      struct btrfs_ioctl_defrag_range_args *range,
		      u64 newer_than, unsigned long max_to_defrag)
{
1270
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
1271 1272
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct file_ra_state *ra = NULL;
C
Christoph Hellwig 已提交
1273
	unsigned long last_index;
1274
	u64 isize = i_size_read(inode);
1275 1276 1277
	u64 last_len = 0;
	u64 skip = 0;
	u64 defrag_end = 0;
C
Chris Mason 已提交
1278
	u64 newer_off = range->start;
C
Christoph Hellwig 已提交
1279
	unsigned long i;
1280
	unsigned long ra_index = 0;
C
Christoph Hellwig 已提交
1281
	int ret;
C
Chris Mason 已提交
1282
	int defrag_count = 0;
1283
	int compress_type = BTRFS_COMPRESS_ZLIB;
1284
	u32 extent_thresh = range->extent_thresh;
1285
	unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1286
	unsigned long cluster = max_cluster;
1287
	u64 new_align = ~((u64)SZ_128K - 1);
C
Chris Mason 已提交
1288 1289
	struct page **pages = NULL;

1290 1291 1292 1293 1294
	if (isize == 0)
		return 0;

	if (range->start >= isize)
		return -EINVAL;
1295 1296 1297 1298 1299 1300 1301

	if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
		if (range->compress_type > BTRFS_COMPRESS_TYPES)
			return -EINVAL;
		if (range->compress_type)
			compress_type = range->compress_type;
	}
C
Christoph Hellwig 已提交
1302

1303
	if (extent_thresh == 0)
1304
		extent_thresh = SZ_256K;
1305

C
Chris Mason 已提交
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
	/*
	 * if we were not given a file, allocate a readahead
	 * context
	 */
	if (!file) {
		ra = kzalloc(sizeof(*ra), GFP_NOFS);
		if (!ra)
			return -ENOMEM;
		file_ra_state_init(ra, inode->i_mapping);
	} else {
		ra = &file->f_ra;
	}

1319
	pages = kmalloc_array(max_cluster, sizeof(struct page *),
C
Chris Mason 已提交
1320 1321 1322 1323 1324 1325 1326
			GFP_NOFS);
	if (!pages) {
		ret = -ENOMEM;
		goto out_ra;
	}

	/* find the last page to defrag */
C
Chris Mason 已提交
1327
	if (range->start + range->len > range->start) {
1328
		last_index = min_t(u64, isize - 1,
1329
			 range->start + range->len - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
1330
	} else {
1331
		last_index = (isize - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
1332 1333
	}

C
Chris Mason 已提交
1334 1335
	if (newer_than) {
		ret = find_new_extents(root, inode, newer_than,
1336
				       &newer_off, SZ_64K);
C
Chris Mason 已提交
1337 1338 1339 1340 1341 1342
		if (!ret) {
			range->start = newer_off;
			/*
			 * we always align our defrag to help keep
			 * the extents in the file evenly spaced
			 */
1343
			i = (newer_off & new_align) >> PAGE_SHIFT;
C
Chris Mason 已提交
1344 1345 1346
		} else
			goto out_ra;
	} else {
1347
		i = range->start >> PAGE_SHIFT;
C
Chris Mason 已提交
1348 1349
	}
	if (!max_to_defrag)
1350
		max_to_defrag = last_index - i + 1;
C
Chris Mason 已提交
1351

L
Li Zefan 已提交
1352 1353 1354 1355 1356 1357 1358
	/*
	 * make writeback starts from i, so the defrag range can be
	 * written sequentially.
	 */
	if (i < inode->i_mapping->writeback_index)
		inode->i_mapping->writeback_index = i;

1359
	while (i <= last_index && defrag_count < max_to_defrag &&
1360
	       (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
C
Chris Mason 已提交
1361 1362 1363 1364 1365 1366 1367
		/*
		 * make sure we stop running if someone unmounts
		 * the FS
		 */
		if (!(inode->i_sb->s_flags & MS_ACTIVE))
			break;

1368 1369
		if (btrfs_defrag_cancelled(fs_info)) {
			btrfs_debug(fs_info, "defrag_file cancelled");
1370 1371 1372 1373
			ret = -EAGAIN;
			break;
		}

1374
		if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
L
Li Zefan 已提交
1375
					 extent_thresh, &last_len, &skip,
1376 1377
					 &defrag_end, range->flags &
					 BTRFS_DEFRAG_RANGE_COMPRESS)) {
1378 1379 1380 1381 1382
			unsigned long next;
			/*
			 * the should_defrag function tells us how much to skip
			 * bump our counter by the suggested amount
			 */
1383
			next = DIV_ROUND_UP(skip, PAGE_SIZE);
1384 1385 1386
			i = max(i + 1, next);
			continue;
		}
1387 1388

		if (!newer_than) {
1389 1390
			cluster = (PAGE_ALIGN(defrag_end) >>
				   PAGE_SHIFT) - i;
1391 1392 1393 1394 1395 1396 1397 1398 1399
			cluster = min(cluster, max_cluster);
		} else {
			cluster = max_cluster;
		}

		if (i + cluster > ra_index) {
			ra_index = max(i, ra_index);
			btrfs_force_ra(inode->i_mapping, ra, file, ra_index,
				       cluster);
1400
			ra_index += cluster;
1401
		}
1402

A
Al Viro 已提交
1403
		inode_lock(inode);
1404 1405
		if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
			BTRFS_I(inode)->force_compress = compress_type;
1406
		ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1407
		if (ret < 0) {
A
Al Viro 已提交
1408
			inode_unlock(inode);
C
Chris Mason 已提交
1409
			goto out_ra;
1410
		}
C
Chris Mason 已提交
1411 1412

		defrag_count += ret;
1413
		balance_dirty_pages_ratelimited(inode->i_mapping);
A
Al Viro 已提交
1414
		inode_unlock(inode);
C
Chris Mason 已提交
1415 1416 1417 1418 1419

		if (newer_than) {
			if (newer_off == (u64)-1)
				break;

1420 1421 1422
			if (ret > 0)
				i += ret;

C
Chris Mason 已提交
1423
			newer_off = max(newer_off + 1,
1424
					(u64)i << PAGE_SHIFT);
C
Chris Mason 已提交
1425

1426 1427
			ret = find_new_extents(root, inode, newer_than,
					       &newer_off, SZ_64K);
C
Chris Mason 已提交
1428 1429
			if (!ret) {
				range->start = newer_off;
1430
				i = (newer_off & new_align) >> PAGE_SHIFT;
C
Chris Mason 已提交
1431 1432
			} else {
				break;
C
Christoph Hellwig 已提交
1433
			}
C
Chris Mason 已提交
1434
		} else {
1435
			if (ret > 0) {
L
Li Zefan 已提交
1436
				i += ret;
1437
				last_len += ret << PAGE_SHIFT;
1438
			} else {
L
Li Zefan 已提交
1439
				i++;
1440 1441
				last_len = 0;
			}
C
Christoph Hellwig 已提交
1442 1443 1444
		}
	}

1445
	if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
C
Chris Mason 已提交
1446
		filemap_flush(inode->i_mapping);
1447 1448 1449 1450
		if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
			     &BTRFS_I(inode)->runtime_flags))
			filemap_flush(inode->i_mapping);
	}
C
Chris Mason 已提交
1451 1452 1453 1454 1455 1456

	if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
		/* the filemap_flush will queue IO into the worker threads, but
		 * we have to make sure the IO is actually started and that
		 * ordered extents get created before we return
		 */
1457 1458 1459 1460 1461 1462
		atomic_inc(&fs_info->async_submit_draining);
		while (atomic_read(&fs_info->nr_async_submits) ||
		       atomic_read(&fs_info->async_delalloc_pages)) {
			wait_event(fs_info->async_submit_wait,
				   (atomic_read(&fs_info->nr_async_submits) == 0 &&
				    atomic_read(&fs_info->async_delalloc_pages) == 0));
C
Chris Mason 已提交
1463
		}
1464
		atomic_dec(&fs_info->async_submit_draining);
C
Chris Mason 已提交
1465 1466
	}

1467
	if (range->compress_type == BTRFS_COMPRESS_LZO) {
1468
		btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1469 1470
	}

1471
	ret = defrag_count;
1472

C
Chris Mason 已提交
1473
out_ra:
1474
	if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
A
Al Viro 已提交
1475
		inode_lock(inode);
1476
		BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
A
Al Viro 已提交
1477
		inode_unlock(inode);
1478
	}
C
Chris Mason 已提交
1479 1480 1481
	if (!file)
		kfree(ra);
	kfree(pages);
1482
	return ret;
C
Christoph Hellwig 已提交
1483 1484
}

1485
static noinline int btrfs_ioctl_resize(struct file *file,
1486
					void __user *arg)
C
Christoph Hellwig 已提交
1487
{
1488 1489
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
1490 1491 1492
	u64 new_size;
	u64 old_size;
	u64 devid = 1;
1493
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Christoph Hellwig 已提交
1494 1495 1496 1497
	struct btrfs_ioctl_vol_args *vol_args;
	struct btrfs_trans_handle *trans;
	struct btrfs_device *device = NULL;
	char *sizestr;
1498
	char *retptr;
C
Christoph Hellwig 已提交
1499 1500 1501 1502
	char *devstr = NULL;
	int ret = 0;
	int mod = 0;

1503 1504 1505
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

1506 1507 1508 1509
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

1510
	if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
1511
		mnt_drop_write_file(file);
1512
		return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1513 1514
	}

1515
	mutex_lock(&fs_info->volume_mutex);
L
Li Zefan 已提交
1516
	vol_args = memdup_user(arg, sizeof(*vol_args));
1517 1518 1519 1520
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
		goto out;
	}
1521 1522

	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
C
Christoph Hellwig 已提交
1523 1524 1525 1526 1527 1528 1529

	sizestr = vol_args->name;
	devstr = strchr(sizestr, ':');
	if (devstr) {
		sizestr = devstr + 1;
		*devstr = '\0';
		devstr = vol_args->name;
1530 1531 1532
		ret = kstrtoull(devstr, 10, &devid);
		if (ret)
			goto out_free;
1533 1534 1535 1536
		if (!devid) {
			ret = -EINVAL;
			goto out_free;
		}
1537
		btrfs_info(fs_info, "resizing devid %llu", devid);
C
Christoph Hellwig 已提交
1538
	}
M
Miao Xie 已提交
1539

1540
	device = btrfs_find_device(fs_info, devid, NULL, NULL);
C
Christoph Hellwig 已提交
1541
	if (!device) {
1542 1543
		btrfs_info(fs_info, "resizer unable to find device %llu",
			   devid);
1544
		ret = -ENODEV;
1545
		goto out_free;
C
Christoph Hellwig 已提交
1546
	}
M
Miao Xie 已提交
1547 1548

	if (!device->writeable) {
1549
		btrfs_info(fs_info,
1550
			   "resizer unable to apply on readonly device %llu",
1551
		       devid);
1552
		ret = -EPERM;
L
Liu Bo 已提交
1553 1554 1555
		goto out_free;
	}

C
Christoph Hellwig 已提交
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
	if (!strcmp(sizestr, "max"))
		new_size = device->bdev->bd_inode->i_size;
	else {
		if (sizestr[0] == '-') {
			mod = -1;
			sizestr++;
		} else if (sizestr[0] == '+') {
			mod = 1;
			sizestr++;
		}
1566 1567
		new_size = memparse(sizestr, &retptr);
		if (*retptr != '\0' || new_size == 0) {
C
Christoph Hellwig 已提交
1568
			ret = -EINVAL;
1569
			goto out_free;
C
Christoph Hellwig 已提交
1570 1571 1572
		}
	}

1573
	if (device->is_tgtdev_for_dev_replace) {
1574
		ret = -EPERM;
1575 1576 1577
		goto out_free;
	}

1578
	old_size = btrfs_device_get_total_bytes(device);
C
Christoph Hellwig 已提交
1579 1580 1581 1582

	if (mod < 0) {
		if (new_size > old_size) {
			ret = -EINVAL;
1583
			goto out_free;
C
Christoph Hellwig 已提交
1584 1585 1586
		}
		new_size = old_size - new_size;
	} else if (mod > 0) {
1587
		if (new_size > ULLONG_MAX - old_size) {
1588
			ret = -ERANGE;
1589 1590
			goto out_free;
		}
C
Christoph Hellwig 已提交
1591 1592 1593
		new_size = old_size + new_size;
	}

1594
	if (new_size < SZ_256M) {
C
Christoph Hellwig 已提交
1595
		ret = -EINVAL;
1596
		goto out_free;
C
Christoph Hellwig 已提交
1597 1598 1599
	}
	if (new_size > device->bdev->bd_inode->i_size) {
		ret = -EFBIG;
1600
		goto out_free;
C
Christoph Hellwig 已提交
1601 1602
	}

1603 1604
	new_size = div_u64(new_size, fs_info->sectorsize);
	new_size *= fs_info->sectorsize;
C
Christoph Hellwig 已提交
1605

1606 1607
	btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
			  rcu_str_deref(device->name), new_size);
C
Christoph Hellwig 已提交
1608 1609

	if (new_size > old_size) {
1610
		trans = btrfs_start_transaction(root, 0);
1611 1612
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
1613
			goto out_free;
1614
		}
C
Christoph Hellwig 已提交
1615
		ret = btrfs_grow_device(trans, device, new_size);
1616
		btrfs_commit_transaction(trans);
1617
	} else if (new_size < old_size) {
C
Christoph Hellwig 已提交
1618
		ret = btrfs_shrink_device(device, new_size);
1619
	} /* equal, nothing need to do */
C
Christoph Hellwig 已提交
1620

1621
out_free:
C
Christoph Hellwig 已提交
1622
	kfree(vol_args);
1623
out:
1624 1625
	mutex_unlock(&fs_info->volume_mutex);
	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
1626
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
1627 1628 1629
	return ret;
}

S
Sage Weil 已提交
1630
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
A
Arne Jansen 已提交
1631 1632
				char *name, unsigned long fd, int subvol,
				u64 *transid, bool readonly,
1633
				struct btrfs_qgroup_inherit *inherit)
C
Christoph Hellwig 已提交
1634 1635
{
	int namelen;
1636
	int ret = 0;
C
Christoph Hellwig 已提交
1637

1638 1639 1640
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1641 1642 1643 1644
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;

S
Sage Weil 已提交
1645 1646
	namelen = strlen(name);
	if (strchr(name, '/')) {
C
Christoph Hellwig 已提交
1647
		ret = -EINVAL;
1648
		goto out_drop_write;
C
Christoph Hellwig 已提交
1649 1650
	}

1651 1652 1653
	if (name[0] == '.' &&
	   (namelen == 1 || (name[1] == '.' && namelen == 2))) {
		ret = -EEXIST;
1654
		goto out_drop_write;
1655 1656
	}

1657
	if (subvol) {
S
Sage Weil 已提交
1658
		ret = btrfs_mksubvol(&file->f_path, name, namelen,
A
Arne Jansen 已提交
1659
				     NULL, transid, readonly, inherit);
1660
	} else {
1661
		struct fd src = fdget(fd);
1662
		struct inode *src_inode;
1663
		if (!src.file) {
1664
			ret = -EINVAL;
1665
			goto out_drop_write;
1666 1667
		}

A
Al Viro 已提交
1668 1669
		src_inode = file_inode(src.file);
		if (src_inode->i_sb != file_inode(file)->i_sb) {
J
Josef Bacik 已提交
1670
			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1671
				   "Snapshot src from another FS");
1672
			ret = -EXDEV;
1673 1674 1675 1676 1677 1678
		} else if (!inode_owner_or_capable(src_inode)) {
			/*
			 * Subvolume creation is not restricted, but snapshots
			 * are limited to own subvolumes only
			 */
			ret = -EPERM;
1679 1680 1681 1682
		} else {
			ret = btrfs_mksubvol(&file->f_path, name, namelen,
					     BTRFS_I(src_inode)->root,
					     transid, readonly, inherit);
1683
		}
1684
		fdput(src);
1685
	}
1686 1687
out_drop_write:
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
1688
out:
S
Sage Weil 已提交
1689 1690 1691 1692
	return ret;
}

static noinline int btrfs_ioctl_snap_create(struct file *file,
1693
					    void __user *arg, int subvol)
S
Sage Weil 已提交
1694
{
1695
	struct btrfs_ioctl_vol_args *vol_args;
S
Sage Weil 已提交
1696 1697
	int ret;

1698 1699 1700
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1701 1702 1703 1704
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
S
Sage Weil 已提交
1705

1706
	ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
L
Li Zefan 已提交
1707
					      vol_args->fd, subvol,
A
Arne Jansen 已提交
1708
					      NULL, false, NULL);
1709

1710 1711 1712
	kfree(vol_args);
	return ret;
}
1713

1714 1715 1716 1717 1718 1719 1720
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
					       void __user *arg, int subvol)
{
	struct btrfs_ioctl_vol_args_v2 *vol_args;
	int ret;
	u64 transid = 0;
	u64 *ptr = NULL;
L
Li Zefan 已提交
1721
	bool readonly = false;
A
Arne Jansen 已提交
1722
	struct btrfs_qgroup_inherit *inherit = NULL;
1723

1724 1725 1726
	if (!S_ISDIR(file_inode(file)->i_mode))
		return -ENOTDIR;

1727 1728 1729 1730
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);
	vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1731

L
Li Zefan 已提交
1732
	if (vol_args->flags &
A
Arne Jansen 已提交
1733 1734
	    ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
	      BTRFS_SUBVOL_QGROUP_INHERIT)) {
L
Li Zefan 已提交
1735
		ret = -EOPNOTSUPP;
D
Dan Carpenter 已提交
1736
		goto free_args;
S
Sage Weil 已提交
1737
	}
1738 1739 1740

	if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
		ptr = &transid;
L
Li Zefan 已提交
1741 1742
	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
		readonly = true;
A
Arne Jansen 已提交
1743
	if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1744
		if (vol_args->size > PAGE_SIZE) {
A
Arne Jansen 已提交
1745
			ret = -EINVAL;
D
Dan Carpenter 已提交
1746
			goto free_args;
A
Arne Jansen 已提交
1747 1748 1749 1750
		}
		inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
		if (IS_ERR(inherit)) {
			ret = PTR_ERR(inherit);
D
Dan Carpenter 已提交
1751
			goto free_args;
A
Arne Jansen 已提交
1752 1753
		}
	}
1754 1755

	ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
A
Arne Jansen 已提交
1756
					      vol_args->fd, subvol, ptr,
1757
					      readonly, inherit);
D
Dan Carpenter 已提交
1758 1759
	if (ret)
		goto free_inherit;
1760

D
Dan Carpenter 已提交
1761 1762 1763 1764
	if (ptr && copy_to_user(arg +
				offsetof(struct btrfs_ioctl_vol_args_v2,
					transid),
				ptr, sizeof(*ptr)))
1765
		ret = -EFAULT;
D
Dan Carpenter 已提交
1766 1767

free_inherit:
A
Arne Jansen 已提交
1768
	kfree(inherit);
D
Dan Carpenter 已提交
1769 1770
free_args:
	kfree(vol_args);
C
Christoph Hellwig 已提交
1771 1772 1773
	return ret;
}

1774 1775 1776
static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
						void __user *arg)
{
A
Al Viro 已提交
1777
	struct inode *inode = file_inode(file);
1778
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1779 1780 1781 1782
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret = 0;
	u64 flags = 0;

L
Li Zefan 已提交
1783
	if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
1784 1785
		return -EINVAL;

1786
	down_read(&fs_info->subvol_sem);
1787 1788
	if (btrfs_root_readonly(root))
		flags |= BTRFS_SUBVOL_RDONLY;
1789
	up_read(&fs_info->subvol_sem);
1790 1791 1792 1793 1794 1795 1796 1797 1798 1799

	if (copy_to_user(arg, &flags, sizeof(flags)))
		ret = -EFAULT;

	return ret;
}

static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
					      void __user *arg)
{
A
Al Viro 已提交
1800
	struct inode *inode = file_inode(file);
1801
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1802 1803 1804 1805 1806 1807
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
	u64 root_flags;
	u64 flags;
	int ret = 0;

1808 1809 1810
	if (!inode_owner_or_capable(inode))
		return -EPERM;

1811 1812 1813
	ret = mnt_want_write_file(file);
	if (ret)
		goto out;
1814

1815 1816 1817 1818
	if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
		ret = -EINVAL;
		goto out_drop_write;
	}
1819

1820 1821 1822 1823
	if (copy_from_user(&flags, arg, sizeof(flags))) {
		ret = -EFAULT;
		goto out_drop_write;
	}
1824

1825 1826 1827 1828
	if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
		ret = -EINVAL;
		goto out_drop_write;
	}
1829

1830 1831 1832 1833
	if (flags & ~BTRFS_SUBVOL_RDONLY) {
		ret = -EOPNOTSUPP;
		goto out_drop_write;
	}
1834

1835
	down_write(&fs_info->subvol_sem);
1836 1837 1838

	/* nothing to do */
	if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1839
		goto out_drop_sem;
1840 1841

	root_flags = btrfs_root_flags(&root->root_item);
1842
	if (flags & BTRFS_SUBVOL_RDONLY) {
1843 1844
		btrfs_set_root_flags(&root->root_item,
				     root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1845 1846 1847 1848 1849 1850 1851 1852
	} else {
		/*
		 * Block RO -> RW transition if this subvolume is involved in
		 * send
		 */
		spin_lock(&root->root_item_lock);
		if (root->send_in_progress == 0) {
			btrfs_set_root_flags(&root->root_item,
1853
				     root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1854 1855 1856
			spin_unlock(&root->root_item_lock);
		} else {
			spin_unlock(&root->root_item_lock);
1857 1858 1859
			btrfs_warn(fs_info,
				   "Attempt to set subvolume %llu read-write during send",
				   root->root_key.objectid);
1860 1861 1862 1863
			ret = -EPERM;
			goto out_drop_sem;
		}
	}
1864 1865 1866 1867 1868 1869 1870

	trans = btrfs_start_transaction(root, 1);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_reset;
	}

1871
	ret = btrfs_update_root(trans, fs_info->tree_root,
1872 1873
				&root->root_key, &root->root_item);

1874
	btrfs_commit_transaction(trans);
1875 1876 1877
out_reset:
	if (ret)
		btrfs_set_root_flags(&root->root_item, root_flags);
1878
out_drop_sem:
1879
	up_write(&fs_info->subvol_sem);
1880 1881 1882
out_drop_write:
	mnt_drop_write_file(file);
out:
1883 1884 1885
	return ret;
}

1886 1887 1888 1889 1890
/*
 * helper to check if the subvolume references other subvolumes
 */
static noinline int may_destroy_subvol(struct btrfs_root *root)
{
1891
	struct btrfs_fs_info *fs_info = root->fs_info;
1892
	struct btrfs_path *path;
1893
	struct btrfs_dir_item *di;
1894
	struct btrfs_key key;
1895
	u64 dir_id;
1896 1897 1898 1899 1900 1901
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

1902
	/* Make sure this root isn't set as the default subvol */
1903 1904
	dir_id = btrfs_super_root_dir(fs_info->super_copy);
	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
1905 1906 1907 1908
				   dir_id, "default", 7, 0);
	if (di && !IS_ERR(di)) {
		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
		if (key.objectid == root->root_key.objectid) {
1909
			ret = -EPERM;
1910
			btrfs_err(fs_info,
J
Jeff Mahoney 已提交
1911 1912
				  "deleting default subvolume %llu is not allowed",
				  key.objectid);
1913 1914 1915 1916 1917
			goto out;
		}
		btrfs_release_path(path);
	}

1918 1919 1920 1921
	key.objectid = root->root_key.objectid;
	key.type = BTRFS_ROOT_REF_KEY;
	key.offset = (u64)-1;

1922
	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);

	ret = 0;
	if (path->slots[0] > 0) {
		path->slots[0]--;
		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
		if (key.objectid == root->root_key.objectid &&
		    key.type == BTRFS_ROOT_REF_KEY)
			ret = -ENOTEMPTY;
	}
out:
	btrfs_free_path(path);
	return ret;
}

1940 1941 1942
static noinline int key_in_sk(struct btrfs_key *key,
			      struct btrfs_ioctl_search_key *sk)
{
1943 1944 1945 1946 1947 1948 1949 1950 1951
	struct btrfs_key test;
	int ret;

	test.objectid = sk->min_objectid;
	test.type = sk->min_type;
	test.offset = sk->min_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret < 0)
1952
		return 0;
1953 1954 1955 1956 1957 1958 1959

	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;

	ret = btrfs_comp_cpu_keys(key, &test);
	if (ret > 0)
1960 1961 1962 1963
		return 0;
	return 1;
}

1964
static noinline int copy_to_sk(struct btrfs_path *path,
1965 1966
			       struct btrfs_key *key,
			       struct btrfs_ioctl_search_key *sk,
1967
			       size_t *buf_size,
1968
			       char __user *ubuf,
1969 1970 1971 1972 1973 1974
			       unsigned long *sk_offset,
			       int *num_found)
{
	u64 found_transid;
	struct extent_buffer *leaf;
	struct btrfs_ioctl_search_header sh;
1975
	struct btrfs_key test;
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
	unsigned long item_off;
	unsigned long item_len;
	int nritems;
	int i;
	int slot;
	int ret = 0;

	leaf = path->nodes[0];
	slot = path->slots[0];
	nritems = btrfs_header_nritems(leaf);

	if (btrfs_header_generation(leaf) > sk->max_transid) {
		i = nritems;
		goto advance_key;
	}
	found_transid = btrfs_header_generation(leaf);

	for (i = slot; i < nritems; i++) {
		item_off = btrfs_item_ptr_offset(leaf, i);
		item_len = btrfs_item_size_nr(leaf, i);

1997 1998 1999 2000
		btrfs_item_key_to_cpu(leaf, key, i);
		if (!key_in_sk(key, sk))
			continue;

2001
		if (sizeof(sh) + item_len > *buf_size) {
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
			if (*num_found) {
				ret = 1;
				goto out;
			}

			/*
			 * return one empty item back for v1, which does not
			 * handle -EOVERFLOW
			 */

2012
			*buf_size = sizeof(sh) + item_len;
2013
			item_len = 0;
2014 2015
			ret = -EOVERFLOW;
		}
2016

2017
		if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2018
			ret = 1;
2019
			goto out;
2020 2021 2022 2023 2024 2025 2026 2027 2028
		}

		sh.objectid = key->objectid;
		sh.offset = key->offset;
		sh.type = key->type;
		sh.len = item_len;
		sh.transid = found_transid;

		/* copy search result header */
2029 2030 2031 2032 2033
		if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
			ret = -EFAULT;
			goto out;
		}

2034 2035 2036
		*sk_offset += sizeof(sh);

		if (item_len) {
2037
			char __user *up = ubuf + *sk_offset;
2038
			/* copy the item */
2039 2040 2041 2042 2043 2044
			if (read_extent_buffer_to_user(leaf, up,
						       item_off, item_len)) {
				ret = -EFAULT;
				goto out;
			}

2045 2046
			*sk_offset += item_len;
		}
2047
		(*num_found)++;
2048

2049 2050 2051
		if (ret) /* -EOVERFLOW from above */
			goto out;

2052 2053 2054 2055
		if (*num_found >= sk->nr_items) {
			ret = 1;
			goto out;
		}
2056 2057
	}
advance_key:
2058
	ret = 0;
2059 2060 2061 2062 2063 2064
	test.objectid = sk->max_objectid;
	test.type = sk->max_type;
	test.offset = sk->max_offset;
	if (btrfs_comp_cpu_keys(key, &test) >= 0)
		ret = 1;
	else if (key->offset < (u64)-1)
2065
		key->offset++;
2066
	else if (key->type < (u8)-1) {
2067
		key->offset = 0;
2068
		key->type++;
2069
	} else if (key->objectid < (u64)-1) {
2070 2071
		key->offset = 0;
		key->type = 0;
2072
		key->objectid++;
2073 2074
	} else
		ret = 1;
2075
out:
2076 2077 2078 2079 2080 2081 2082 2083 2084
	/*
	 *  0: all items from this leaf copied, continue with next
	 *  1: * more items can be copied, but unused buffer is too small
	 *     * all items were found
	 *     Either way, it will stops the loop which iterates to the next
	 *     leaf
	 *  -EOVERFLOW: item was to large for buffer
	 *  -EFAULT: could not copy extent buffer back to userspace
	 */
2085 2086 2087 2088
	return ret;
}

static noinline int search_ioctl(struct inode *inode,
2089
				 struct btrfs_ioctl_search_key *sk,
2090
				 size_t *buf_size,
2091
				 char __user *ubuf)
2092
{
2093
	struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2094 2095 2096 2097 2098 2099 2100
	struct btrfs_root *root;
	struct btrfs_key key;
	struct btrfs_path *path;
	int ret;
	int num_found = 0;
	unsigned long sk_offset = 0;

2101 2102
	if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
		*buf_size = sizeof(struct btrfs_ioctl_search_header);
2103
		return -EOVERFLOW;
2104
	}
2105

2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	if (sk->tree_id == 0) {
		/* search the root of the inode that was passed */
		root = BTRFS_I(inode)->root;
	} else {
		key.objectid = sk->tree_id;
		key.type = BTRFS_ROOT_ITEM_KEY;
		key.offset = (u64)-1;
		root = btrfs_read_fs_root_no_name(info, &key);
		if (IS_ERR(root)) {
			btrfs_free_path(path);
			return -ENOENT;
		}
	}

	key.objectid = sk->min_objectid;
	key.type = sk->min_type;
	key.offset = sk->min_offset;

2128
	while (1) {
2129
		ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2130 2131 2132 2133 2134
		if (ret != 0) {
			if (ret > 0)
				ret = 0;
			goto err;
		}
2135
		ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2136
				 &sk_offset, &num_found);
2137
		btrfs_release_path(path);
2138
		if (ret)
2139 2140 2141
			break;

	}
2142 2143
	if (ret > 0)
		ret = 0;
2144 2145 2146 2147 2148 2149 2150 2151 2152
err:
	sk->nr_items = num_found;
	btrfs_free_path(path);
	return ret;
}

static noinline int btrfs_ioctl_tree_search(struct file *file,
					   void __user *argp)
{
2153 2154
	struct btrfs_ioctl_search_args __user *uargs;
	struct btrfs_ioctl_search_key sk;
2155 2156 2157
	struct inode *inode;
	int ret;
	size_t buf_size;
2158 2159 2160 2161

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2162 2163 2164 2165
	uargs = (struct btrfs_ioctl_search_args __user *)argp;

	if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
		return -EFAULT;
2166

2167
	buf_size = sizeof(uargs->buf);
2168

A
Al Viro 已提交
2169
	inode = file_inode(file);
2170
	ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2171 2172 2173 2174 2175 2176 2177 2178

	/*
	 * In the origin implementation an overflow is handled by returning a
	 * search header with a len of zero, so reset ret.
	 */
	if (ret == -EOVERFLOW)
		ret = 0;

2179
	if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2180 2181 2182 2183
		ret = -EFAULT;
	return ret;
}

G
Gerhard Heift 已提交
2184 2185 2186 2187 2188 2189 2190 2191
static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
					       void __user *argp)
{
	struct btrfs_ioctl_search_args_v2 __user *uarg;
	struct btrfs_ioctl_search_args_v2 args;
	struct inode *inode;
	int ret;
	size_t buf_size;
2192
	const size_t buf_limit = SZ_16M;
G
Gerhard Heift 已提交
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	/* copy search header and buffer size */
	uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
	if (copy_from_user(&args, uarg, sizeof(args)))
		return -EFAULT;

	buf_size = args.buf_size;

	if (buf_size < sizeof(struct btrfs_ioctl_search_header))
		return -EOVERFLOW;

	/* limit result size to 16MB */
	if (buf_size > buf_limit)
		buf_size = buf_limit;

	inode = file_inode(file);
	ret = search_ioctl(inode, &args.key, &buf_size,
			   (char *)(&uarg->buf[0]));
	if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
		ret = -EFAULT;
	else if (ret == -EOVERFLOW &&
		copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
		ret = -EFAULT;

2220 2221 2222
	return ret;
}

2223
/*
2224 2225 2226
 * Search INODE_REFs to identify path name of 'dirid' directory
 * in a 'tree_id' tree. and sets path name to 'name'.
 */
2227 2228 2229 2230 2231
static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
				u64 tree_id, u64 dirid, char *name)
{
	struct btrfs_root *root;
	struct btrfs_key key;
2232
	char *ptr;
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
	int ret = -1;
	int slot;
	int len;
	int total_len = 0;
	struct btrfs_inode_ref *iref;
	struct extent_buffer *l;
	struct btrfs_path *path;

	if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
		name[0]='\0';
		return 0;
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

2250
	ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
2251 2252 2253 2254 2255 2256

	key.objectid = tree_id;
	key.type = BTRFS_ROOT_ITEM_KEY;
	key.offset = (u64)-1;
	root = btrfs_read_fs_root_no_name(info, &key);
	if (IS_ERR(root)) {
2257
		btrfs_err(info, "could not find root %llu", tree_id);
2258 2259
		ret = -ENOENT;
		goto out;
2260 2261 2262 2263
	}

	key.objectid = dirid;
	key.type = BTRFS_INODE_REF_KEY;
2264
	key.offset = (u64)-1;
2265

2266
	while (1) {
2267 2268 2269
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		if (ret < 0)
			goto out;
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
		else if (ret > 0) {
			ret = btrfs_previous_item(root, path, dirid,
						  BTRFS_INODE_REF_KEY);
			if (ret < 0)
				goto out;
			else if (ret > 0) {
				ret = -ENOENT;
				goto out;
			}
		}
2280 2281 2282 2283 2284 2285 2286 2287 2288

		l = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(l, &key, slot);

		iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
		len = btrfs_inode_ref_name_len(l, iref);
		ptr -= len + 1;
		total_len += len + 1;
2289 2290
		if (ptr < name) {
			ret = -ENAMETOOLONG;
2291
			goto out;
2292
		}
2293 2294

		*(ptr + len) = '/';
2295
		read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2296 2297 2298 2299

		if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
			break;

2300
		btrfs_release_path(path);
2301
		key.objectid = key.offset;
2302
		key.offset = (u64)-1;
2303 2304
		dirid = key.objectid;
	}
2305
	memmove(name, ptr, total_len);
2306
	name[total_len] = '\0';
2307 2308 2309
	ret = 0;
out:
	btrfs_free_path(path);
2310 2311 2312 2313 2314 2315 2316 2317
	return ret;
}

static noinline int btrfs_ioctl_ino_lookup(struct file *file,
					   void __user *argp)
{
	 struct btrfs_ioctl_ino_lookup_args *args;
	 struct inode *inode;
2318
	int ret = 0;
2319

J
Julia Lawall 已提交
2320 2321 2322
	args = memdup_user(argp, sizeof(*args));
	if (IS_ERR(args))
		return PTR_ERR(args);
2323

A
Al Viro 已提交
2324
	inode = file_inode(file);
2325

2326 2327 2328 2329
	/*
	 * Unprivileged query to obtain the containing subvolume root id. The
	 * path is reset so it's consistent with btrfs_search_path_in_tree.
	 */
2330 2331 2332
	if (args->treeid == 0)
		args->treeid = BTRFS_I(inode)->root->root_key.objectid;

2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
		args->name[0] = 0;
		goto out;
	}

	if (!capable(CAP_SYS_ADMIN)) {
		ret = -EPERM;
		goto out;
	}

2343 2344 2345 2346
	ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
					args->treeid, args->objectid,
					args->name);

2347
out:
2348 2349 2350 2351
	if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
		ret = -EFAULT;

	kfree(args);
2352 2353 2354
	return ret;
}

2355 2356 2357
static noinline int btrfs_ioctl_snap_destroy(struct file *file,
					     void __user *arg)
{
A
Al Viro 已提交
2358
	struct dentry *parent = file->f_path.dentry;
2359
	struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2360
	struct dentry *dentry;
2361
	struct inode *dir = d_inode(parent);
2362 2363 2364 2365 2366
	struct inode *inode;
	struct btrfs_root *root = BTRFS_I(dir)->root;
	struct btrfs_root *dest = NULL;
	struct btrfs_ioctl_vol_args *vol_args;
	struct btrfs_trans_handle *trans;
2367
	struct btrfs_block_rsv block_rsv;
2368
	u64 root_flags;
2369
	u64 qgroup_reserved;
2370 2371 2372 2373
	int namelen;
	int ret;
	int err = 0;

2374 2375 2376
	if (!S_ISDIR(dir->i_mode))
		return -ENOTDIR;

2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args))
		return PTR_ERR(vol_args);

	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
	namelen = strlen(vol_args->name);
	if (strchr(vol_args->name, '/') ||
	    strncmp(vol_args->name, "..", namelen) == 0) {
		err = -EINVAL;
		goto out;
	}

2389
	err = mnt_want_write_file(file);
2390 2391 2392
	if (err)
		goto out;

2393

2394 2395 2396
	err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
	if (err == -EINTR)
		goto out_drop_write;
2397 2398 2399 2400 2401 2402
	dentry = lookup_one_len(vol_args->name, parent, namelen);
	if (IS_ERR(dentry)) {
		err = PTR_ERR(dentry);
		goto out_unlock_dir;
	}

2403
	if (d_really_is_negative(dentry)) {
2404 2405 2406 2407
		err = -ENOENT;
		goto out_dput;
	}

2408
	inode = d_inode(dentry);
2409
	dest = BTRFS_I(inode)->root;
2410
	if (!capable(CAP_SYS_ADMIN)) {
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424
		/*
		 * Regular user.  Only allow this with a special mount
		 * option, when the user has write+exec access to the
		 * subvol root, and when rmdir(2) would have been
		 * allowed.
		 *
		 * Note that this is _not_ check that the subvol is
		 * empty or doesn't contain data that we wouldn't
		 * otherwise be able to delete.
		 *
		 * Users who want to delete empty subvols should try
		 * rmdir(2).
		 */
		err = -EPERM;
2425
		if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443
			goto out_dput;

		/*
		 * Do not allow deletion if the parent dir is the same
		 * as the dir to be deleted.  That means the ioctl
		 * must be called on the dentry referencing the root
		 * of the subvol, not a random directory contained
		 * within it.
		 */
		err = -EINVAL;
		if (root == dest)
			goto out_dput;

		err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
		if (err)
			goto out_dput;
	}

2444 2445 2446 2447 2448
	/* check if subvolume may be deleted by a user */
	err = btrfs_may_delete(dir, dentry, 1);
	if (err)
		goto out_dput;

L
Li Zefan 已提交
2449
	if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
2450 2451 2452 2453
		err = -EINVAL;
		goto out_dput;
	}

A
Al Viro 已提交
2454
	inode_lock(inode);
2455 2456 2457 2458 2459 2460 2461

	/*
	 * Don't allow to delete a subvolume with send in progress. This is
	 * inside the i_mutex so the error handling that has to drop the bit
	 * again is not run concurrently.
	 */
	spin_lock(&dest->root_item_lock);
2462 2463 2464
	root_flags = btrfs_root_flags(&dest->root_item);
	if (dest->send_in_progress == 0) {
		btrfs_set_root_flags(&dest->root_item,
2465 2466 2467 2468
				root_flags | BTRFS_ROOT_SUBVOL_DEAD);
		spin_unlock(&dest->root_item_lock);
	} else {
		spin_unlock(&dest->root_item_lock);
2469 2470 2471
		btrfs_warn(fs_info,
			   "Attempt to delete subvolume %llu during send",
			   dest->root_key.objectid);
2472
		err = -EPERM;
2473
		goto out_unlock_inode;
2474 2475
	}

2476
	down_write(&fs_info->subvol_sem);
2477 2478 2479 2480 2481

	err = may_destroy_subvol(dest);
	if (err)
		goto out_up_write;

2482 2483 2484 2485 2486 2487
	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
	/*
	 * One for dir inode, two for dir entries, two for root
	 * ref/backref.
	 */
	err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
2488
					       5, &qgroup_reserved, true);
2489 2490 2491
	if (err)
		goto out_up_write;

2492 2493 2494
	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		err = PTR_ERR(trans);
2495
		goto out_release;
2496
	}
2497 2498
	trans->block_rsv = &block_rsv;
	trans->bytes_reserved = block_rsv.size;
2499

2500 2501
	btrfs_record_snapshot_destroy(trans, dir);

2502 2503 2504 2505
	ret = btrfs_unlink_subvol(trans, root, dir,
				dest->root_key.objectid,
				dentry->d_name.name,
				dentry->d_name.len);
2506 2507
	if (ret) {
		err = ret;
2508
		btrfs_abort_transaction(trans, ret);
2509 2510
		goto out_end_trans;
	}
2511 2512 2513 2514 2515 2516 2517 2518

	btrfs_record_root_in_trans(trans, dest);

	memset(&dest->root_item.drop_progress, 0,
		sizeof(dest->root_item.drop_progress));
	dest->root_item.drop_level = 0;
	btrfs_set_root_refs(&dest->root_item, 0);

2519
	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
2520
		ret = btrfs_insert_orphan_item(trans,
2521
					fs_info->tree_root,
2522
					dest->root_key.objectid);
2523
		if (ret) {
2524
			btrfs_abort_transaction(trans, ret);
2525 2526 2527
			err = ret;
			goto out_end_trans;
		}
2528
	}
2529

2530
	ret = btrfs_uuid_tree_rem(trans, fs_info, dest->root_item.uuid,
2531
				  BTRFS_UUID_KEY_SUBVOL,
2532 2533
				  dest->root_key.objectid);
	if (ret && ret != -ENOENT) {
2534
		btrfs_abort_transaction(trans, ret);
2535 2536 2537 2538
		err = ret;
		goto out_end_trans;
	}
	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
2539
		ret = btrfs_uuid_tree_rem(trans, fs_info,
2540 2541 2542 2543
					  dest->root_item.received_uuid,
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  dest->root_key.objectid);
		if (ret && ret != -ENOENT) {
2544
			btrfs_abort_transaction(trans, ret);
2545 2546 2547 2548 2549
			err = ret;
			goto out_end_trans;
		}
	}

2550
out_end_trans:
2551 2552
	trans->block_rsv = NULL;
	trans->bytes_reserved = 0;
2553
	ret = btrfs_end_transaction(trans);
2554 2555
	if (ret && !err)
		err = ret;
2556
	inode->i_flags |= S_DEAD;
2557
out_release:
2558
	btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
2559
out_up_write:
2560
	up_write(&fs_info->subvol_sem);
2561 2562
	if (err) {
		spin_lock(&dest->root_item_lock);
2563 2564
		root_flags = btrfs_root_flags(&dest->root_item);
		btrfs_set_root_flags(&dest->root_item,
2565 2566 2567
				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
		spin_unlock(&dest->root_item_lock);
	}
2568
out_unlock_inode:
A
Al Viro 已提交
2569
	inode_unlock(inode);
2570
	if (!err) {
2571
		d_invalidate(dentry);
2572 2573
		btrfs_invalidate_inodes(dest);
		d_delete(dentry);
2574
		ASSERT(dest->send_in_progress == 0);
2575 2576

		/* the last ref */
2577 2578 2579
		if (dest->ino_cache_inode) {
			iput(dest->ino_cache_inode);
			dest->ino_cache_inode = NULL;
2580
		}
2581 2582 2583 2584
	}
out_dput:
	dput(dentry);
out_unlock_dir:
A
Al Viro 已提交
2585
	inode_unlock(dir);
2586
out_drop_write:
A
Al Viro 已提交
2587
	mnt_drop_write_file(file);
2588 2589 2590 2591 2592
out:
	kfree(vol_args);
	return err;
}

C
Chris Mason 已提交
2593
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
C
Christoph Hellwig 已提交
2594
{
A
Al Viro 已提交
2595
	struct inode *inode = file_inode(file);
C
Christoph Hellwig 已提交
2596
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Chris Mason 已提交
2597
	struct btrfs_ioctl_defrag_range_args *range;
Y
Yan Zheng 已提交
2598 2599
	int ret;

2600 2601 2602
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
L
Li Zefan 已提交
2603

2604 2605 2606
	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
2607
	}
C
Christoph Hellwig 已提交
2608 2609 2610

	switch (inode->i_mode & S_IFMT) {
	case S_IFDIR:
2611 2612 2613 2614
		if (!capable(CAP_SYS_ADMIN)) {
			ret = -EPERM;
			goto out;
		}
2615
		ret = btrfs_defrag_root(root);
2616 2617
		if (ret)
			goto out;
2618
		ret = btrfs_defrag_root(root->fs_info->extent_root);
C
Christoph Hellwig 已提交
2619 2620
		break;
	case S_IFREG:
2621 2622 2623 2624
		if (!(file->f_mode & FMODE_WRITE)) {
			ret = -EINVAL;
			goto out;
		}
C
Chris Mason 已提交
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636

		range = kzalloc(sizeof(*range), GFP_KERNEL);
		if (!range) {
			ret = -ENOMEM;
			goto out;
		}

		if (argp) {
			if (copy_from_user(range, argp,
					   sizeof(*range))) {
				ret = -EFAULT;
				kfree(range);
2637
				goto out;
C
Chris Mason 已提交
2638 2639 2640 2641 2642 2643 2644 2645 2646 2647
			}
			/* compression requires us to start the IO */
			if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
				range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
				range->extent_thresh = (u32)-1;
			}
		} else {
			/* the rest are all set to zero by kzalloc */
			range->len = (u64)-1;
		}
A
Al Viro 已提交
2648
		ret = btrfs_defrag_file(file_inode(file), file,
C
Chris Mason 已提交
2649 2650 2651
					range, 0, 0);
		if (ret > 0)
			ret = 0;
C
Chris Mason 已提交
2652
		kfree(range);
C
Christoph Hellwig 已提交
2653
		break;
2654 2655
	default:
		ret = -EINVAL;
C
Christoph Hellwig 已提交
2656
	}
2657
out:
2658
	mnt_drop_write_file(file);
2659
	return ret;
C
Christoph Hellwig 已提交
2660 2661
}

2662
static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
C
Christoph Hellwig 已提交
2663 2664 2665 2666
{
	struct btrfs_ioctl_vol_args *vol_args;
	int ret;

2667 2668 2669
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2670
	if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1))
2671
		return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2672

2673
	mutex_lock(&fs_info->volume_mutex);
L
Li Zefan 已提交
2674
	vol_args = memdup_user(arg, sizeof(*vol_args));
2675 2676 2677 2678
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
		goto out;
	}
C
Christoph Hellwig 已提交
2679

2680
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2681
	ret = btrfs_init_new_device(fs_info, vol_args->name);
C
Christoph Hellwig 已提交
2682

A
Anand Jain 已提交
2683
	if (!ret)
2684
		btrfs_info(fs_info, "disk added %s", vol_args->name);
A
Anand Jain 已提交
2685

C
Christoph Hellwig 已提交
2686
	kfree(vol_args);
2687
out:
2688 2689
	mutex_unlock(&fs_info->volume_mutex);
	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
C
Christoph Hellwig 已提交
2690 2691 2692
	return ret;
}

2693
static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
2694
{
2695 2696
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2697
	struct btrfs_ioctl_vol_args_v2 *vol_args;
C
Christoph Hellwig 已提交
2698 2699
	int ret;

2700 2701 2702
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2703 2704 2705
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
2706

L
Li Zefan 已提交
2707
	vol_args = memdup_user(arg, sizeof(*vol_args));
2708 2709
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
D
Dan Carpenter 已提交
2710
		goto err_drop;
2711
	}
C
Christoph Hellwig 已提交
2712

2713
	/* Check for compatibility reject unknown flags */
2714 2715
	if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
		return -EOPNOTSUPP;
C
Christoph Hellwig 已提交
2716

2717
	if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
2718 2719 2720 2721
		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
		goto out;
	}

2722
	mutex_lock(&fs_info->volume_mutex);
2723
	if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
2724
		ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
2725 2726
	} else {
		vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
2727
		ret = btrfs_rm_device(fs_info, vol_args->name, 0);
2728
	}
2729 2730
	mutex_unlock(&fs_info->volume_mutex);
	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
2731

2732
	if (!ret) {
2733
		if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
2734
			btrfs_info(fs_info, "device deleted: id %llu",
2735 2736
					vol_args->devid);
		else
2737
			btrfs_info(fs_info, "device deleted: %s",
2738 2739
					vol_args->name);
	}
2740 2741
out:
	kfree(vol_args);
D
Dan Carpenter 已提交
2742
err_drop:
2743
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
2744 2745 2746
	return ret;
}

2747
static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
C
Christoph Hellwig 已提交
2748
{
2749 2750
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
2751 2752 2753
	struct btrfs_ioctl_vol_args *vol_args;
	int ret;

2754 2755 2756
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2757 2758 2759
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
Y
Yan Zheng 已提交
2760

2761
	if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
2762
		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2763 2764 2765 2766 2767 2768
		goto out_drop_write;
	}

	vol_args = memdup_user(arg, sizeof(*vol_args));
	if (IS_ERR(vol_args)) {
		ret = PTR_ERR(vol_args);
2769 2770 2771
		goto out;
	}

2772
	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2773
	mutex_lock(&fs_info->volume_mutex);
2774
	ret = btrfs_rm_device(fs_info, vol_args->name, 0);
2775
	mutex_unlock(&fs_info->volume_mutex);
2776

2777
	if (!ret)
2778
		btrfs_info(fs_info, "disk deleted %s", vol_args->name);
2779
	kfree(vol_args);
2780
out:
2781
	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
2782
out_drop_write:
2783
	mnt_drop_write_file(file);
2784

C
Christoph Hellwig 已提交
2785 2786 2787
	return ret;
}

2788 2789
static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
				void __user *arg)
J
Jan Schmidt 已提交
2790
{
2791
	struct btrfs_ioctl_fs_info_args *fi_args;
J
Jan Schmidt 已提交
2792
	struct btrfs_device *device;
2793
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2794
	int ret = 0;
J
Jan Schmidt 已提交
2795

2796 2797 2798 2799
	fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
	if (!fi_args)
		return -ENOMEM;

2800
	mutex_lock(&fs_devices->device_list_mutex);
2801
	fi_args->num_devices = fs_devices->num_devices;
2802
	memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
J
Jan Schmidt 已提交
2803

2804
	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2805 2806
		if (device->devid > fi_args->max_id)
			fi_args->max_id = device->devid;
J
Jan Schmidt 已提交
2807 2808 2809
	}
	mutex_unlock(&fs_devices->device_list_mutex);

2810 2811 2812
	fi_args->nodesize = fs_info->super_copy->nodesize;
	fi_args->sectorsize = fs_info->super_copy->sectorsize;
	fi_args->clone_alignment = fs_info->super_copy->sectorsize;
2813

2814 2815
	if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
		ret = -EFAULT;
J
Jan Schmidt 已提交
2816

2817 2818
	kfree(fi_args);
	return ret;
J
Jan Schmidt 已提交
2819 2820
}

2821 2822
static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
				 void __user *arg)
J
Jan Schmidt 已提交
2823 2824 2825
{
	struct btrfs_ioctl_dev_info_args *di_args;
	struct btrfs_device *dev;
2826
	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
J
Jan Schmidt 已提交
2827 2828 2829 2830 2831 2832 2833
	int ret = 0;
	char *s_uuid = NULL;

	di_args = memdup_user(arg, sizeof(*di_args));
	if (IS_ERR(di_args))
		return PTR_ERR(di_args);

2834
	if (!btrfs_is_empty_uuid(di_args->uuid))
J
Jan Schmidt 已提交
2835 2836 2837
		s_uuid = di_args->uuid;

	mutex_lock(&fs_devices->device_list_mutex);
2838
	dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
J
Jan Schmidt 已提交
2839 2840 2841 2842 2843 2844 2845

	if (!dev) {
		ret = -ENODEV;
		goto out;
	}

	di_args->devid = dev->devid;
2846 2847
	di_args->bytes_used = btrfs_device_get_bytes_used(dev);
	di_args->total_bytes = btrfs_device_get_total_bytes(dev);
J
Jan Schmidt 已提交
2848
	memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
2849
	if (dev->name) {
2850 2851 2852 2853 2854 2855
		struct rcu_string *name;

		rcu_read_lock();
		name = rcu_dereference(dev->name);
		strncpy(di_args->path, name->str, sizeof(di_args->path));
		rcu_read_unlock();
2856 2857
		di_args->path[sizeof(di_args->path) - 1] = 0;
	} else {
2858
		di_args->path[0] = '\0';
2859
	}
J
Jan Schmidt 已提交
2860 2861

out:
2862
	mutex_unlock(&fs_devices->device_list_mutex);
J
Jan Schmidt 已提交
2863 2864 2865 2866 2867 2868 2869
	if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
		ret = -EFAULT;

	kfree(di_args);
	return ret;
}

2870
static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
M
Mark Fasheh 已提交
2871 2872 2873 2874 2875
{
	struct page *page;

	page = grab_cache_page(inode->i_mapping, index);
	if (!page)
2876
		return ERR_PTR(-ENOMEM);
M
Mark Fasheh 已提交
2877 2878

	if (!PageUptodate(page)) {
2879 2880 2881 2882 2883
		int ret;

		ret = btrfs_readpage(NULL, page);
		if (ret)
			return ERR_PTR(ret);
M
Mark Fasheh 已提交
2884 2885 2886
		lock_page(page);
		if (!PageUptodate(page)) {
			unlock_page(page);
2887
			put_page(page);
2888 2889 2890 2891
			return ERR_PTR(-EIO);
		}
		if (page->mapping != inode->i_mapping) {
			unlock_page(page);
2892
			put_page(page);
2893
			return ERR_PTR(-EAGAIN);
M
Mark Fasheh 已提交
2894 2895 2896 2897 2898 2899
		}
	}

	return page;
}

2900 2901 2902 2903
static int gather_extent_pages(struct inode *inode, struct page **pages,
			       int num_pages, u64 off)
{
	int i;
2904
	pgoff_t index = off >> PAGE_SHIFT;
2905 2906

	for (i = 0; i < num_pages; i++) {
2907
again:
2908
		pages[i] = extent_same_get_page(inode, index + i);
2909 2910 2911 2912 2913 2914 2915 2916
		if (IS_ERR(pages[i])) {
			int err = PTR_ERR(pages[i]);

			if (err == -EAGAIN)
				goto again;
			pages[i] = NULL;
			return err;
		}
2917 2918 2919 2920
	}
	return 0;
}

2921 2922
static int lock_extent_range(struct inode *inode, u64 off, u64 len,
			     bool retry_range_locking)
2923
{
2924 2925 2926 2927 2928 2929 2930 2931
	/*
	 * Do any pending delalloc/csum calculations on inode, one way or
	 * another, and lock file content.
	 * The locking order is:
	 *
	 *   1) pages
	 *   2) range in the inode's io tree
	 */
2932 2933 2934 2935 2936
	while (1) {
		struct btrfs_ordered_extent *ordered;
		lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
		ordered = btrfs_lookup_first_ordered_extent(inode,
							    off + len - 1);
2937 2938 2939
		if ((!ordered ||
		     ordered->file_offset + ordered->len <= off ||
		     ordered->file_offset >= off + len) &&
2940
		    !test_range_bit(&BTRFS_I(inode)->io_tree, off,
2941 2942 2943
				    off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
			if (ordered)
				btrfs_put_ordered_extent(ordered);
2944
			break;
2945
		}
2946 2947 2948
		unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
		if (ordered)
			btrfs_put_ordered_extent(ordered);
2949 2950
		if (!retry_range_locking)
			return -EAGAIN;
2951 2952
		btrfs_wait_ordered_range(inode, off, len);
	}
2953
	return 0;
2954 2955
}

2956
static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
M
Mark Fasheh 已提交
2957
{
A
Al Viro 已提交
2958 2959
	inode_unlock(inode1);
	inode_unlock(inode2);
M
Mark Fasheh 已提交
2960 2961
}

2962 2963 2964 2965 2966
static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
{
	if (inode1 < inode2)
		swap(inode1, inode2);

A
Al Viro 已提交
2967 2968
	inode_lock_nested(inode1, I_MUTEX_PARENT);
	inode_lock_nested(inode2, I_MUTEX_CHILD);
2969 2970 2971 2972 2973 2974 2975 2976 2977
}

static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
				      struct inode *inode2, u64 loff2, u64 len)
{
	unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
	unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
}

2978 2979 2980
static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
				    struct inode *inode2, u64 loff2, u64 len,
				    bool retry_range_locking)
M
Mark Fasheh 已提交
2981
{
2982 2983
	int ret;

M
Mark Fasheh 已提交
2984 2985 2986 2987
	if (inode1 < inode2) {
		swap(inode1, inode2);
		swap(loff1, loff2);
	}
2988 2989 2990 2991 2992 2993 2994 2995
	ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
	if (ret)
		return ret;
	ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
	if (ret)
		unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
			      loff1 + len - 1);
	return ret;
2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010
}

struct cmp_pages {
	int		num_pages;
	struct page	**src_pages;
	struct page	**dst_pages;
};

static void btrfs_cmp_data_free(struct cmp_pages *cmp)
{
	int i;
	struct page *pg;

	for (i = 0; i < cmp->num_pages; i++) {
		pg = cmp->src_pages[i];
3011 3012
		if (pg) {
			unlock_page(pg);
3013
			put_page(pg);
3014
		}
3015
		pg = cmp->dst_pages[i];
3016 3017
		if (pg) {
			unlock_page(pg);
3018
			put_page(pg);
3019
		}
3020 3021 3022 3023 3024 3025 3026 3027 3028 3029
	}
	kfree(cmp->src_pages);
	kfree(cmp->dst_pages);
}

static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
				  struct inode *dst, u64 dst_loff,
				  u64 len, struct cmp_pages *cmp)
{
	int ret;
3030
	int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
3031 3032 3033 3034 3035 3036 3037 3038
	struct page **src_pgarr, **dst_pgarr;

	/*
	 * We must gather up all the pages before we initiate our
	 * extent locking. We use an array for the page pointers. Size
	 * of the array is bounded by len, which is in turn bounded by
	 * BTRFS_MAX_DEDUPE_LEN.
	 */
3039 3040
	src_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
	dst_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
3041 3042 3043 3044
	if (!src_pgarr || !dst_pgarr) {
		kfree(src_pgarr);
		kfree(dst_pgarr);
		return -ENOMEM;
M
Mark Fasheh 已提交
3045
	}
3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
	cmp->num_pages = num_pages;
	cmp->src_pages = src_pgarr;
	cmp->dst_pages = dst_pgarr;

	ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff);
	if (ret)
		goto out;

	ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff);

out:
	if (ret)
		btrfs_cmp_data_free(cmp);
	return 0;
M
Mark Fasheh 已提交
3060 3061 3062
}

static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
3063
			  u64 dst_loff, u64 len, struct cmp_pages *cmp)
M
Mark Fasheh 已提交
3064 3065
{
	int ret = 0;
3066
	int i;
M
Mark Fasheh 已提交
3067
	struct page *src_page, *dst_page;
3068
	unsigned int cmp_len = PAGE_SIZE;
M
Mark Fasheh 已提交
3069 3070
	void *addr, *dst_addr;

3071
	i = 0;
M
Mark Fasheh 已提交
3072
	while (len) {
3073
		if (len < PAGE_SIZE)
M
Mark Fasheh 已提交
3074 3075
			cmp_len = len;

3076 3077 3078 3079
		BUG_ON(i >= cmp->num_pages);

		src_page = cmp->src_pages[i];
		dst_page = cmp->dst_pages[i];
3080 3081
		ASSERT(PageLocked(src_page));
		ASSERT(PageLocked(dst_page));
3082

M
Mark Fasheh 已提交
3083 3084 3085 3086 3087 3088 3089
		addr = kmap_atomic(src_page);
		dst_addr = kmap_atomic(dst_page);

		flush_dcache_page(src_page);
		flush_dcache_page(dst_page);

		if (memcmp(addr, dst_addr, cmp_len))
3090
			ret = -EBADE;
M
Mark Fasheh 已提交
3091 3092 3093 3094 3095 3096 3097 3098

		kunmap_atomic(addr);
		kunmap_atomic(dst_addr);

		if (ret)
			break;

		len -= cmp_len;
3099
		i++;
M
Mark Fasheh 已提交
3100 3101 3102 3103 3104
	}

	return ret;
}

3105 3106
static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
				     u64 olen)
M
Mark Fasheh 已提交
3107
{
3108
	u64 len = *plen;
M
Mark Fasheh 已提交
3109 3110
	u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;

3111
	if (off + olen > inode->i_size || off + olen < off)
M
Mark Fasheh 已提交
3112
		return -EINVAL;
3113 3114 3115 3116 3117

	/* if we extend to eof, continue to block boundary */
	if (off + len == inode->i_size)
		*plen = len = ALIGN(inode->i_size, bs) - off;

M
Mark Fasheh 已提交
3118 3119 3120 3121 3122 3123 3124
	/* Check that we are block aligned - btrfs_clone() requires this */
	if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
		return -EINVAL;

	return 0;
}

3125
static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
M
Mark Fasheh 已提交
3126 3127 3128
			     struct inode *dst, u64 dst_loff)
{
	int ret;
3129
	u64 len = olen;
3130
	struct cmp_pages cmp;
M
Mark Fasheh 已提交
3131 3132 3133
	int same_inode = 0;
	u64 same_lock_start = 0;
	u64 same_lock_len = 0;
M
Mark Fasheh 已提交
3134 3135

	if (src == dst)
M
Mark Fasheh 已提交
3136
		same_inode = 1;
M
Mark Fasheh 已提交
3137

3138 3139 3140
	if (len == 0)
		return 0;

M
Mark Fasheh 已提交
3141
	if (same_inode) {
A
Al Viro 已提交
3142
		inode_lock(src);
M
Mark Fasheh 已提交
3143

M
Mark Fasheh 已提交
3144
		ret = extent_same_check_offsets(src, loff, &len, olen);
3145 3146 3147
		if (ret)
			goto out_unlock;
		ret = extent_same_check_offsets(src, dst_loff, &len, olen);
M
Mark Fasheh 已提交
3148 3149
		if (ret)
			goto out_unlock;
M
Mark Fasheh 已提交
3150

M
Mark Fasheh 已提交
3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188
		/*
		 * Single inode case wants the same checks, except we
		 * don't want our length pushed out past i_size as
		 * comparing that data range makes no sense.
		 *
		 * extent_same_check_offsets() will do this for an
		 * unaligned length at i_size, so catch it here and
		 * reject the request.
		 *
		 * This effectively means we require aligned extents
		 * for the single-inode case, whereas the other cases
		 * allow an unaligned length so long as it ends at
		 * i_size.
		 */
		if (len != olen) {
			ret = -EINVAL;
			goto out_unlock;
		}

		/* Check for overlapping ranges */
		if (dst_loff + len > loff && dst_loff < loff + len) {
			ret = -EINVAL;
			goto out_unlock;
		}

		same_lock_start = min_t(u64, loff, dst_loff);
		same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
	} else {
		btrfs_double_inode_lock(src, dst);

		ret = extent_same_check_offsets(src, loff, &len, olen);
		if (ret)
			goto out_unlock;

		ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
		if (ret)
			goto out_unlock;
	}
M
Mark Fasheh 已提交
3189 3190 3191 3192 3193 3194 3195 3196

	/* don't make the dst file partly checksummed */
	if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
	    (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
		ret = -EINVAL;
		goto out_unlock;
	}

3197
again:
3198 3199 3200 3201
	ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
	if (ret)
		goto out_unlock;

M
Mark Fasheh 已提交
3202
	if (same_inode)
3203 3204
		ret = lock_extent_range(src, same_lock_start, same_lock_len,
					false);
M
Mark Fasheh 已提交
3205
	else
3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236
		ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
					       false);
	/*
	 * If one of the inodes has dirty pages in the respective range or
	 * ordered extents, we need to flush dellaloc and wait for all ordered
	 * extents in the range. We must unlock the pages and the ranges in the
	 * io trees to avoid deadlocks when flushing delalloc (requires locking
	 * pages) and when waiting for ordered extents to complete (they require
	 * range locking).
	 */
	if (ret == -EAGAIN) {
		/*
		 * Ranges in the io trees already unlocked. Now unlock all
		 * pages before waiting for all IO to complete.
		 */
		btrfs_cmp_data_free(&cmp);
		if (same_inode) {
			btrfs_wait_ordered_range(src, same_lock_start,
						 same_lock_len);
		} else {
			btrfs_wait_ordered_range(src, loff, len);
			btrfs_wait_ordered_range(dst, dst_loff, len);
		}
		goto again;
	}
	ASSERT(ret == 0);
	if (WARN_ON(ret)) {
		/* ranges in the io trees already unlocked */
		btrfs_cmp_data_free(&cmp);
		return ret;
	}
3237

3238
	/* pass original length for comparison so we stay within i_size */
3239
	ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
M
Mark Fasheh 已提交
3240
	if (ret == 0)
3241
		ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
M
Mark Fasheh 已提交
3242

M
Mark Fasheh 已提交
3243 3244 3245 3246 3247
	if (same_inode)
		unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
			      same_lock_start + same_lock_len - 1);
	else
		btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3248 3249

	btrfs_cmp_data_free(&cmp);
M
Mark Fasheh 已提交
3250
out_unlock:
M
Mark Fasheh 已提交
3251
	if (same_inode)
A
Al Viro 已提交
3252
		inode_unlock(src);
M
Mark Fasheh 已提交
3253 3254
	else
		btrfs_double_inode_unlock(src, dst);
M
Mark Fasheh 已提交
3255 3256 3257 3258

	return ret;
}

3259
#define BTRFS_MAX_DEDUPE_LEN	SZ_16M
M
Mark Fasheh 已提交
3260

3261 3262
ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
				struct file *dst_file, u64 dst_loff)
M
Mark Fasheh 已提交
3263
{
3264 3265
	struct inode *src = file_inode(src_file);
	struct inode *dst = file_inode(dst_file);
M
Mark Fasheh 已提交
3266
	u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
3267
	ssize_t res;
M
Mark Fasheh 已提交
3268

3269 3270
	if (olen > BTRFS_MAX_DEDUPE_LEN)
		olen = BTRFS_MAX_DEDUPE_LEN;
M
Mark Fasheh 已提交
3271

3272
	if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
M
Mark Fasheh 已提交
3273 3274 3275 3276 3277
		/*
		 * Btrfs does not support blocksize < page_size. As a
		 * result, btrfs_cmp_data() won't correctly handle
		 * this situation without an update.
		 */
3278
		return -EINVAL;
M
Mark Fasheh 已提交
3279 3280
	}

3281 3282 3283 3284
	res = btrfs_extent_same(src, loff, olen, dst, dst_loff);
	if (res)
		return res;
	return olen;
M
Mark Fasheh 已提交
3285 3286
}

3287 3288 3289 3290
static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
				     struct inode *inode,
				     u64 endoff,
				     const u64 destoff,
3291 3292
				     const u64 olen,
				     int no_time_update)
3293 3294 3295 3296 3297
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;

	inode_inc_iversion(inode);
3298
	if (!no_time_update)
3299
		inode->i_mtime = inode->i_ctime = current_time(inode);
3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310
	/*
	 * We round up to the block size at eof when determining which
	 * extents to clone above, but shouldn't round up the file size.
	 */
	if (endoff > destoff + olen)
		endoff = destoff + olen;
	if (endoff > inode->i_size)
		btrfs_i_size_write(inode, endoff);

	ret = btrfs_update_inode(trans, root, inode);
	if (ret) {
3311
		btrfs_abort_transaction(trans, ret);
3312
		btrfs_end_transaction(trans);
3313 3314
		goto out;
	}
3315
	ret = btrfs_end_transaction(trans);
3316 3317 3318 3319
out:
	return ret;
}

3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336
static void clone_update_extent_map(struct inode *inode,
				    const struct btrfs_trans_handle *trans,
				    const struct btrfs_path *path,
				    const u64 hole_offset,
				    const u64 hole_len)
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
	struct extent_map *em;
	int ret;

	em = alloc_extent_map();
	if (!em) {
		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
			&BTRFS_I(inode)->runtime_flags);
		return;
	}

3337 3338 3339 3340 3341
	if (path) {
		struct btrfs_file_extent_item *fi;

		fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
				    struct btrfs_file_extent_item);
3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
		btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
		em->generation = -1;
		if (btrfs_file_extent_type(path->nodes[0], fi) ==
		    BTRFS_FILE_EXTENT_INLINE)
			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
				&BTRFS_I(inode)->runtime_flags);
	} else {
		em->start = hole_offset;
		em->len = hole_len;
		em->ram_bytes = em->len;
		em->orig_start = hole_offset;
		em->block_start = EXTENT_MAP_HOLE;
		em->block_len = 0;
		em->orig_block_len = 0;
		em->compress_type = BTRFS_COMPRESS_NONE;
		em->generation = trans->transid;
	}

	while (1) {
		write_lock(&em_tree->lock);
		ret = add_extent_mapping(em_tree, em, 1);
		write_unlock(&em_tree->lock);
		if (ret != -EEXIST) {
			free_extent_map(em);
			break;
		}
		btrfs_drop_extent_cache(inode, em->start,
					em->start + em->len - 1, 0);
	}

3372
	if (ret)
3373 3374 3375 3376
		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
			&BTRFS_I(inode)->runtime_flags);
}

3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412
/*
 * Make sure we do not end up inserting an inline extent into a file that has
 * already other (non-inline) extents. If a file has an inline extent it can
 * not have any other extents and the (single) inline extent must start at the
 * file offset 0. Failing to respect these rules will lead to file corruption,
 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
 *
 * We can have extents that have been already written to disk or we can have
 * dirty ranges still in delalloc, in which case the extent maps and items are
 * created only when we run delalloc, and the delalloc ranges might fall outside
 * the range we are currently locking in the inode's io tree. So we check the
 * inode's i_size because of that (i_size updates are done while holding the
 * i_mutex, which we are holding here).
 * We also check to see if the inode has a size not greater than "datal" but has
 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
 * protected against such concurrent fallocate calls by the i_mutex).
 *
 * If the file has no extents but a size greater than datal, do not allow the
 * copy because we would need turn the inline extent into a non-inline one (even
 * with NO_HOLES enabled). If we find our destination inode only has one inline
 * extent, just overwrite it with the source inline extent if its size is less
 * than the source extent's size, or we could copy the source inline extent's
 * data into the destination inode's inline extent if the later is greater then
 * the former.
 */
static int clone_copy_inline_extent(struct inode *src,
				    struct inode *dst,
				    struct btrfs_trans_handle *trans,
				    struct btrfs_path *path,
				    struct btrfs_key *new_key,
				    const u64 drop_start,
				    const u64 datal,
				    const u64 skip,
				    const u64 size,
				    char *inline_data)
{
3413
	struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
3414 3415
	struct btrfs_root *root = BTRFS_I(dst)->root;
	const u64 aligned_end = ALIGN(new_key->offset + datal,
3416
				      fs_info->sectorsize);
3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521
	int ret;
	struct btrfs_key key;

	if (new_key->offset > 0)
		return -EOPNOTSUPP;

	key.objectid = btrfs_ino(dst);
	key.type = BTRFS_EXTENT_DATA_KEY;
	key.offset = 0;
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0) {
		return ret;
	} else if (ret > 0) {
		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				return ret;
			else if (ret > 0)
				goto copy_inline_extent;
		}
		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
		if (key.objectid == btrfs_ino(dst) &&
		    key.type == BTRFS_EXTENT_DATA_KEY) {
			ASSERT(key.offset > 0);
			return -EOPNOTSUPP;
		}
	} else if (i_size_read(dst) <= datal) {
		struct btrfs_file_extent_item *ei;
		u64 ext_len;

		/*
		 * If the file size is <= datal, make sure there are no other
		 * extents following (can happen do to an fallocate call with
		 * the flag FALLOC_FL_KEEP_SIZE).
		 */
		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
				    struct btrfs_file_extent_item);
		/*
		 * If it's an inline extent, it can not have other extents
		 * following it.
		 */
		if (btrfs_file_extent_type(path->nodes[0], ei) ==
		    BTRFS_FILE_EXTENT_INLINE)
			goto copy_inline_extent;

		ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
		if (ext_len > aligned_end)
			return -EOPNOTSUPP;

		ret = btrfs_next_item(root, path);
		if (ret < 0) {
			return ret;
		} else if (ret == 0) {
			btrfs_item_key_to_cpu(path->nodes[0], &key,
					      path->slots[0]);
			if (key.objectid == btrfs_ino(dst) &&
			    key.type == BTRFS_EXTENT_DATA_KEY)
				return -EOPNOTSUPP;
		}
	}

copy_inline_extent:
	/*
	 * We have no extent items, or we have an extent at offset 0 which may
	 * or may not be inlined. All these cases are dealt the same way.
	 */
	if (i_size_read(dst) > datal) {
		/*
		 * If the destination inode has an inline extent...
		 * This would require copying the data from the source inline
		 * extent into the beginning of the destination's inline extent.
		 * But this is really complex, both extents can be compressed
		 * or just one of them, which would require decompressing and
		 * re-compressing data (which could increase the new compressed
		 * size, not allowing the compressed data to fit anymore in an
		 * inline extent).
		 * So just don't support this case for now (it should be rare,
		 * we are not really saving space when cloning inline extents).
		 */
		return -EOPNOTSUPP;
	}

	btrfs_release_path(path);
	ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
	if (ret)
		return ret;
	ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
	if (ret)
		return ret;

	if (skip) {
		const u32 start = btrfs_file_extent_calc_inline_size(0);

		memmove(inline_data + start, inline_data + start + skip, datal);
	}

	write_extent_buffer(path->nodes[0], inline_data,
			    btrfs_item_ptr_offset(path->nodes[0],
						  path->slots[0]),
			    size);
	inode_add_bytes(dst, datal);

	return 0;
}

3522 3523 3524 3525 3526 3527 3528
/**
 * btrfs_clone() - clone a range from inode file to another
 *
 * @src: Inode to clone from
 * @inode: Inode to clone to
 * @off: Offset within source to start clone from
 * @olen: Original length, passed by user, of range to clone
3529
 * @olen_aligned: Block-aligned value of olen
3530
 * @destoff: Offset within @inode to start clone
3531
 * @no_time_update: Whether to update mtime/ctime on the target inode
3532 3533
 */
static int btrfs_clone(struct inode *src, struct inode *inode,
3534
		       const u64 off, const u64 olen, const u64 olen_aligned,
3535
		       const u64 destoff, int no_time_update)
C
Christoph Hellwig 已提交
3536
{
3537
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
3538
	struct btrfs_root *root = BTRFS_I(inode)->root;
3539
	struct btrfs_path *path = NULL;
C
Christoph Hellwig 已提交
3540
	struct extent_buffer *leaf;
3541 3542
	struct btrfs_trans_handle *trans;
	char *buf = NULL;
Y
Yan Zheng 已提交
3543
	struct btrfs_key key;
C
Christoph Hellwig 已提交
3544 3545
	u32 nritems;
	int slot;
Y
Yan Zheng 已提交
3546
	int ret;
3547 3548
	const u64 len = olen_aligned;
	u64 last_dest_end = destoff;
Y
Yan Zheng 已提交
3549 3550

	ret = -ENOMEM;
3551
	buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
3552
	if (!buf) {
3553
		buf = vmalloc(fs_info->nodesize);
3554 3555 3556
		if (!buf)
			return ret;
	}
Y
Yan Zheng 已提交
3557 3558 3559

	path = btrfs_alloc_path();
	if (!path) {
3560
		kvfree(buf);
3561
		return ret;
3562 3563
	}

3564
	path->reada = READA_FORWARD;
3565
	/* clone data */
L
Li Zefan 已提交
3566
	key.objectid = btrfs_ino(src);
Y
Yan Zheng 已提交
3567
	key.type = BTRFS_EXTENT_DATA_KEY;
3568
	key.offset = off;
C
Christoph Hellwig 已提交
3569 3570

	while (1) {
3571
		u64 next_key_min_offset = key.offset + 1;
3572

C
Christoph Hellwig 已提交
3573 3574 3575 3576
		/*
		 * note the key will change type as we walk through the
		 * tree.
		 */
3577
		path->leave_spinning = 1;
3578 3579
		ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
				0, 0);
C
Christoph Hellwig 已提交
3580 3581
		if (ret < 0)
			goto out;
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592
		/*
		 * First search, if no extent item that starts at offset off was
		 * found but the previous item is an extent item, it's possible
		 * it might overlap our target range, therefore process it.
		 */
		if (key.offset == off && ret > 0 && path->slots[0] > 0) {
			btrfs_item_key_to_cpu(path->nodes[0], &key,
					      path->slots[0] - 1);
			if (key.type == BTRFS_EXTENT_DATA_KEY)
				path->slots[0]--;
		}
C
Christoph Hellwig 已提交
3593

Y
Yan Zheng 已提交
3594
		nritems = btrfs_header_nritems(path->nodes[0]);
3595
process_slot:
Y
Yan Zheng 已提交
3596
		if (path->slots[0] >= nritems) {
3597
			ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
C
Christoph Hellwig 已提交
3598 3599 3600 3601
			if (ret < 0)
				goto out;
			if (ret > 0)
				break;
Y
Yan Zheng 已提交
3602
			nritems = btrfs_header_nritems(path->nodes[0]);
C
Christoph Hellwig 已提交
3603 3604 3605 3606
		}
		leaf = path->nodes[0];
		slot = path->slots[0];

Y
Yan Zheng 已提交
3607
		btrfs_item_key_to_cpu(leaf, &key, slot);
3608
		if (key.type > BTRFS_EXTENT_DATA_KEY ||
L
Li Zefan 已提交
3609
		    key.objectid != btrfs_ino(src))
C
Christoph Hellwig 已提交
3610 3611
			break;

3612
		if (key.type == BTRFS_EXTENT_DATA_KEY) {
3613 3614
			struct btrfs_file_extent_item *extent;
			int type;
Z
Zheng Yan 已提交
3615 3616
			u32 size;
			struct btrfs_key new_key;
3617 3618 3619
			u64 disko = 0, diskl = 0;
			u64 datao = 0, datal = 0;
			u8 comp;
3620
			u64 drop_start;
Z
Zheng Yan 已提交
3621

3622 3623 3624 3625
			extent = btrfs_item_ptr(leaf, slot,
						struct btrfs_file_extent_item);
			comp = btrfs_file_extent_compression(leaf, extent);
			type = btrfs_file_extent_type(leaf, extent);
3626 3627
			if (type == BTRFS_FILE_EXTENT_REG ||
			    type == BTRFS_FILE_EXTENT_PREALLOC) {
C
Chris Mason 已提交
3628 3629 3630 3631
				disko = btrfs_file_extent_disk_bytenr(leaf,
								      extent);
				diskl = btrfs_file_extent_disk_num_bytes(leaf,
								 extent);
3632
				datao = btrfs_file_extent_offset(leaf, extent);
C
Chris Mason 已提交
3633 3634
				datal = btrfs_file_extent_num_bytes(leaf,
								    extent);
3635 3636 3637 3638 3639
			} else if (type == BTRFS_FILE_EXTENT_INLINE) {
				/* take upper bound, may be compressed */
				datal = btrfs_file_extent_ram_bytes(leaf,
								    extent);
			}
Z
Zheng Yan 已提交
3640

3641 3642 3643 3644 3645 3646
			/*
			 * The first search might have left us at an extent
			 * item that ends before our target range's start, can
			 * happen if we have holes and NO_HOLES feature enabled.
			 */
			if (key.offset + datal <= off) {
3647 3648
				path->slots[0]++;
				goto process_slot;
3649 3650
			} else if (key.offset >= off + len) {
				break;
3651
			}
3652
			next_key_min_offset = key.offset + datal;
3653 3654 3655 3656 3657 3658 3659
			size = btrfs_item_size_nr(leaf, slot);
			read_extent_buffer(leaf, buf,
					   btrfs_item_ptr_offset(leaf, slot),
					   size);

			btrfs_release_path(path);
			path->leave_spinning = 0;
3660

Z
Zheng Yan 已提交
3661
			memcpy(&new_key, &key, sizeof(new_key));
L
Li Zefan 已提交
3662
			new_key.objectid = btrfs_ino(inode);
3663 3664 3665 3666
			if (off <= key.offset)
				new_key.offset = key.offset + destoff - off;
			else
				new_key.offset = destoff;
Z
Zheng Yan 已提交
3667

3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679
			/*
			 * Deal with a hole that doesn't have an extent item
			 * that represents it (NO_HOLES feature enabled).
			 * This hole is either in the middle of the cloning
			 * range or at the beginning (fully overlaps it or
			 * partially overlaps it).
			 */
			if (new_key.offset != last_dest_end)
				drop_start = last_dest_end;
			else
				drop_start = new_key.offset;

3680 3681 3682 3683 3684 3685
			/*
			 * 1 - adjusting old extent (we may have to split it)
			 * 1 - add new extent
			 * 1 - inode update
			 */
			trans = btrfs_start_transaction(root, 3);
3686 3687 3688 3689 3690
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				goto out;
			}

3691 3692
			if (type == BTRFS_FILE_EXTENT_REG ||
			    type == BTRFS_FILE_EXTENT_PREALLOC) {
3693 3694 3695 3696 3697
				/*
				 *    a  | --- range to clone ---|  b
				 * | ------------- extent ------------- |
				 */

3698
				/* subtract range b */
3699 3700 3701
				if (key.offset + datal > off + len)
					datal = off + len - key.offset;

3702
				/* subtract range a */
3703 3704 3705 3706 3707
				if (off > key.offset) {
					datao += off - key.offset;
					datal -= off - key.offset;
				}

J
Josef Bacik 已提交
3708
				ret = btrfs_drop_extents(trans, root, inode,
3709
							 drop_start,
3710
							 new_key.offset + datal,
3711
							 1);
3712
				if (ret) {
3713
					if (ret != -EOPNOTSUPP)
3714
						btrfs_abort_transaction(trans,
3715
									ret);
3716
					btrfs_end_transaction(trans);
3717 3718
					goto out;
				}
3719

3720 3721
				ret = btrfs_insert_empty_item(trans, root, path,
							      &new_key, size);
3722
				if (ret) {
3723
					btrfs_abort_transaction(trans, ret);
3724
					btrfs_end_transaction(trans);
3725 3726
					goto out;
				}
3727 3728 3729 3730

				leaf = path->nodes[0];
				slot = path->slots[0];
				write_extent_buffer(leaf, buf,
Z
Zheng Yan 已提交
3731 3732
					    btrfs_item_ptr_offset(leaf, slot),
					    size);
Y
Yan Zheng 已提交
3733

3734
				extent = btrfs_item_ptr(leaf, slot,
C
Christoph Hellwig 已提交
3735
						struct btrfs_file_extent_item);
3736 3737 3738 3739 3740 3741 3742 3743 3744

				/* disko == 0 means it's a hole */
				if (!disko)
					datao = 0;

				btrfs_set_file_extent_offset(leaf, extent,
							     datao);
				btrfs_set_file_extent_num_bytes(leaf, extent,
								datal);
J
Josef Bacik 已提交
3745

3746 3747
				if (disko) {
					inode_add_bytes(inode, datal);
3748 3749
					ret = btrfs_inc_extent_ref(trans,
							fs_info,
3750 3751
							disko, diskl, 0,
							root->root_key.objectid,
L
Li Zefan 已提交
3752
							btrfs_ino(inode),
3753
							new_key.offset - datao);
3754 3755 3756
					if (ret) {
						btrfs_abort_transaction(trans,
									ret);
3757
						btrfs_end_transaction(trans);
3758 3759 3760
						goto out;

					}
C
Christoph Hellwig 已提交
3761
				}
3762 3763 3764
			} else if (type == BTRFS_FILE_EXTENT_INLINE) {
				u64 skip = 0;
				u64 trim = 0;
3765

3766 3767 3768 3769
				if (off > key.offset) {
					skip = off - key.offset;
					new_key.offset += skip;
				}
C
Chris Mason 已提交
3770

L
Liu Bo 已提交
3771 3772
				if (key.offset + datal > off + len)
					trim = key.offset + datal - (off + len);
C
Chris Mason 已提交
3773

3774 3775
				if (comp && (skip || trim)) {
					ret = -EINVAL;
3776
					btrfs_end_transaction(trans);
3777 3778 3779 3780
					goto out;
				}
				size -= skip + trim;
				datal -= skip + trim;
3781

3782 3783 3784 3785 3786 3787
				ret = clone_copy_inline_extent(src, inode,
							       trans, path,
							       &new_key,
							       drop_start,
							       datal,
							       skip, size, buf);
3788
				if (ret) {
3789
					if (ret != -EOPNOTSUPP)
3790
						btrfs_abort_transaction(trans,
3791
									ret);
3792
					btrfs_end_transaction(trans);
3793 3794
					goto out;
				}
3795 3796
				leaf = path->nodes[0];
				slot = path->slots[0];
C
Christoph Hellwig 已提交
3797
			}
3798

3799 3800 3801
			/* If we have an implicit hole (NO_HOLES feature). */
			if (drop_start < new_key.offset)
				clone_update_extent_map(inode, trans,
3802
						NULL, drop_start,
3803 3804
						new_key.offset - drop_start);

3805
			clone_update_extent_map(inode, trans, path, 0, 0);
3806

3807
			btrfs_mark_buffer_dirty(leaf);
3808
			btrfs_release_path(path);
3809

3810
			last_dest_end = ALIGN(new_key.offset + datal,
3811
					      fs_info->sectorsize);
3812 3813
			ret = clone_finish_inode_update(trans, inode,
							last_dest_end,
3814 3815
							destoff, olen,
							no_time_update);
3816
			if (ret)
3817
				goto out;
3818 3819
			if (new_key.offset + datal >= destoff + len)
				break;
3820
		}
3821
		btrfs_release_path(path);
3822
		key.offset = next_key_min_offset;
3823 3824 3825 3826 3827

		if (fatal_signal_pending(current)) {
			ret = -EINTR;
			goto out;
		}
C
Christoph Hellwig 已提交
3828 3829
	}
	ret = 0;
3830

3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850
	if (last_dest_end < destoff + len) {
		/*
		 * We have an implicit hole (NO_HOLES feature is enabled) that
		 * fully or partially overlaps our cloning range at its end.
		 */
		btrfs_release_path(path);

		/*
		 * 1 - remove extent(s)
		 * 1 - inode update
		 */
		trans = btrfs_start_transaction(root, 2);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			goto out;
		}
		ret = btrfs_drop_extents(trans, root, inode,
					 last_dest_end, destoff + len, 1);
		if (ret) {
			if (ret != -EOPNOTSUPP)
3851
				btrfs_abort_transaction(trans, ret);
3852
			btrfs_end_transaction(trans);
3853 3854
			goto out;
		}
3855 3856
		clone_update_extent_map(inode, trans, NULL, last_dest_end,
					destoff + len - last_dest_end);
3857
		ret = clone_finish_inode_update(trans, inode, destoff + len,
3858
						destoff, olen, no_time_update);
3859 3860
	}

C
Christoph Hellwig 已提交
3861
out:
3862
	btrfs_free_path(path);
3863
	kvfree(buf);
3864 3865 3866
	return ret;
}

3867 3868
static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
					u64 off, u64 olen, u64 destoff)
3869
{
A
Al Viro 已提交
3870
	struct inode *inode = file_inode(file);
3871
	struct inode *src = file_inode(file_src);
3872
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3873 3874 3875
	struct btrfs_root *root = BTRFS_I(inode)->root;
	int ret;
	u64 len = olen;
3876
	u64 bs = fs_info->sb->s_blocksize;
3877
	int same_inode = src == inode;
3878 3879 3880 3881 3882 3883 3884

	/*
	 * TODO:
	 * - split compressed inline extents.  annoying: we need to
	 *   decompress into destination's address_space (the file offset
	 *   may change, so source mapping won't do), then recompress (or
	 *   otherwise reinsert) a subrange.
3885 3886 3887
	 *
	 * - split destination inode's inline extents.  The inline extents can
	 *   be either compressed or non-compressed.
3888 3889 3890 3891 3892
	 */

	if (btrfs_root_readonly(root))
		return -EROFS;

3893 3894 3895
	if (file_src->f_path.mnt != file->f_path.mnt ||
	    src->i_sb != inode->i_sb)
		return -EXDEV;
3896 3897 3898 3899

	/* don't make the dst file partly checksummed */
	if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
	    (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
3900
		return -EINVAL;
3901 3902

	if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
3903
		return -EISDIR;
3904 3905

	if (!same_inode) {
3906
		btrfs_double_inode_lock(src, inode);
3907
	} else {
A
Al Viro 已提交
3908
		inode_lock(src);
3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920
	}

	/* determine range to clone */
	ret = -EINVAL;
	if (off + len > src->i_size || off + len < off)
		goto out_unlock;
	if (len == 0)
		olen = len = src->i_size - off;
	/* if we extend to eof, continue to block boundary */
	if (off + len == src->i_size)
		len = ALIGN(src->i_size, bs) - off;

3921 3922 3923 3924 3925
	if (len == 0) {
		ret = 0;
		goto out_unlock;
	}

3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942
	/* verify the end result is block aligned */
	if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
	    !IS_ALIGNED(destoff, bs))
		goto out_unlock;

	/* verify if ranges are overlapped within the same file */
	if (same_inode) {
		if (destoff + len > off && destoff < off + len)
			goto out_unlock;
	}

	if (destoff > inode->i_size) {
		ret = btrfs_cont_expand(inode, inode->i_size, destoff);
		if (ret)
			goto out_unlock;
	}

3943 3944 3945 3946 3947 3948 3949 3950 3951 3952
	/*
	 * Lock the target range too. Right after we replace the file extent
	 * items in the fs tree (which now point to the cloned data), we might
	 * have a worker replace them with extent items relative to a write
	 * operation that was issued before this clone operation (i.e. confront
	 * with inode.c:btrfs_finish_ordered_io).
	 */
	if (same_inode) {
		u64 lock_start = min_t(u64, off, destoff);
		u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
3953

3954
		ret = lock_extent_range(src, lock_start, lock_len, true);
3955
	} else {
3956 3957 3958 3959 3960 3961 3962
		ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
					       true);
	}
	ASSERT(ret == 0);
	if (WARN_ON(ret)) {
		/* ranges in the io trees already unlocked */
		goto out_unlock;
3963
	}
3964

3965
	ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
3966

3967 3968 3969 3970 3971 3972
	if (same_inode) {
		u64 lock_start = min_t(u64, off, destoff);
		u64 lock_end = max_t(u64, off, destoff) + len - 1;

		unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
	} else {
3973
		btrfs_double_extent_unlock(src, off, inode, destoff, len);
3974 3975 3976 3977 3978
	}
	/*
	 * Truncate page cache pages so that future reads will see the cloned
	 * data immediately and not the previous data.
	 */
3979
	truncate_inode_pages_range(&inode->i_data,
3980 3981
				round_down(destoff, PAGE_SIZE),
				round_up(destoff + len, PAGE_SIZE) - 1);
C
Christoph Hellwig 已提交
3982
out_unlock:
3983 3984 3985
	if (!same_inode)
		btrfs_double_inode_unlock(src, inode);
	else
A
Al Viro 已提交
3986
		inode_unlock(src);
3987 3988 3989
	return ret;
}

3990 3991
int btrfs_clone_file_range(struct file *src_file, loff_t off,
		struct file *dst_file, loff_t destoff, u64 len)
3992
{
3993
	return btrfs_clone_files(dst_file, src_file, off, len, destoff);
3994 3995
}

C
Christoph Hellwig 已提交
3996 3997 3998 3999 4000 4001
/*
 * there are many ways the trans_start and trans_end ioctls can lead
 * to deadlocks.  They should only be used by applications that
 * basically own the machine, and have a very in depth understanding
 * of all the possible deadlocks and enospc problems.
 */
4002
static long btrfs_ioctl_trans_start(struct file *file)
C
Christoph Hellwig 已提交
4003
{
A
Al Viro 已提交
4004
	struct inode *inode = file_inode(file);
4005
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Christoph Hellwig 已提交
4006 4007
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;
4008
	int ret;
C
Christoph Hellwig 已提交
4009

4010
	ret = -EPERM;
4011
	if (!capable(CAP_SYS_ADMIN))
4012
		goto out;
4013

4014 4015
	ret = -EINPROGRESS;
	if (file->private_data)
C
Christoph Hellwig 已提交
4016
		goto out;
4017

L
Li Zefan 已提交
4018 4019 4020 4021
	ret = -EROFS;
	if (btrfs_root_readonly(root))
		goto out;

4022
	ret = mnt_want_write_file(file);
Y
Yan Zheng 已提交
4023 4024 4025
	if (ret)
		goto out;

4026
	atomic_inc(&fs_info->open_ioctl_trans);
4027

4028
	ret = -ENOMEM;
4029
	trans = btrfs_start_ioctl_transaction(root);
4030
	if (IS_ERR(trans))
4031 4032 4033 4034 4035 4036
		goto out_drop;

	file->private_data = trans;
	return 0;

out_drop:
4037
	atomic_dec(&fs_info->open_ioctl_trans);
A
Al Viro 已提交
4038
	mnt_drop_write_file(file);
C
Christoph Hellwig 已提交
4039 4040 4041 4042
out:
	return ret;
}

4043 4044
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
{
A
Al Viro 已提交
4045
	struct inode *inode = file_inode(file);
4046
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4047 4048 4049 4050 4051 4052 4053 4054 4055
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root *new_root;
	struct btrfs_dir_item *di;
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_key location;
	struct btrfs_disk_key disk_key;
	u64 objectid = 0;
	u64 dir_id;
4056
	int ret;
4057 4058 4059 4060

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4061 4062 4063 4064 4065 4066 4067 4068
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	if (copy_from_user(&objectid, argp, sizeof(objectid))) {
		ret = -EFAULT;
		goto out;
	}
4069 4070

	if (!objectid)
4071
		objectid = BTRFS_FS_TREE_OBJECTID;
4072 4073 4074 4075 4076

	location.objectid = objectid;
	location.type = BTRFS_ROOT_ITEM_KEY;
	location.offset = (u64)-1;

4077
	new_root = btrfs_read_fs_root_no_name(fs_info, &location);
4078 4079 4080 4081
	if (IS_ERR(new_root)) {
		ret = PTR_ERR(new_root);
		goto out;
	}
4082 4083

	path = btrfs_alloc_path();
4084 4085 4086 4087
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
4088 4089 4090
	path->leave_spinning = 1;

	trans = btrfs_start_transaction(root, 1);
4091
	if (IS_ERR(trans)) {
4092
		btrfs_free_path(path);
4093 4094
		ret = PTR_ERR(trans);
		goto out;
4095 4096
	}

4097 4098
	dir_id = btrfs_super_root_dir(fs_info->super_copy);
	di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
4099
				   dir_id, "default", 7, 1);
4100
	if (IS_ERR_OR_NULL(di)) {
4101
		btrfs_free_path(path);
4102
		btrfs_end_transaction(trans);
4103
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
4104
			  "Umm, you don't have the default diritem, this isn't going to work");
4105 4106
		ret = -ENOENT;
		goto out;
4107 4108 4109 4110 4111 4112 4113
	}

	btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
	btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
	btrfs_mark_buffer_dirty(path->nodes[0]);
	btrfs_free_path(path);

4114
	btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
4115
	btrfs_end_transaction(trans);
4116 4117 4118
out:
	mnt_drop_write_file(file);
	return ret;
4119 4120
}

4121 4122
void btrfs_get_block_group_info(struct list_head *groups_list,
				struct btrfs_ioctl_space_info *space)
4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136
{
	struct btrfs_block_group_cache *block_group;

	space->total_bytes = 0;
	space->used_bytes = 0;
	space->flags = 0;
	list_for_each_entry(block_group, groups_list, list) {
		space->flags = block_group->flags;
		space->total_bytes += block_group->key.offset;
		space->used_bytes +=
			btrfs_block_group_used(&block_group->item);
	}
}

4137 4138
static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
				   void __user *arg)
J
Josef Bacik 已提交
4139 4140 4141 4142
{
	struct btrfs_ioctl_space_args space_args;
	struct btrfs_ioctl_space_info space;
	struct btrfs_ioctl_space_info *dest;
4143
	struct btrfs_ioctl_space_info *dest_orig;
4144
	struct btrfs_ioctl_space_info __user *user_dest;
J
Josef Bacik 已提交
4145
	struct btrfs_space_info *info;
4146 4147 4148 4149 4150
	u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
		       BTRFS_BLOCK_GROUP_SYSTEM,
		       BTRFS_BLOCK_GROUP_METADATA,
		       BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
	int num_types = 4;
4151
	int alloc_size;
J
Josef Bacik 已提交
4152
	int ret = 0;
4153
	u64 slot_count = 0;
4154
	int i, c;
J
Josef Bacik 已提交
4155 4156 4157 4158 4159 4160

	if (copy_from_user(&space_args,
			   (struct btrfs_ioctl_space_args __user *)arg,
			   sizeof(space_args)))
		return -EFAULT;

4161 4162 4163 4164 4165
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

		info = NULL;
		rcu_read_lock();
4166
		list_for_each_entry_rcu(tmp, &fs_info->space_info,
4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184
					list) {
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}
		rcu_read_unlock();

		if (!info)
			continue;

		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c]))
				slot_count++;
		}
		up_read(&info->groups_sem);
	}
4185

4186 4187 4188 4189 4190
	/*
	 * Global block reserve, exported as a space_info
	 */
	slot_count++;

4191 4192 4193 4194 4195
	/* space_slots == 0 means they are asking for a count */
	if (space_args.space_slots == 0) {
		space_args.total_spaces = slot_count;
		goto out;
	}
4196

4197
	slot_count = min_t(u64, space_args.space_slots, slot_count);
4198

4199
	alloc_size = sizeof(*dest) * slot_count;
4200

4201 4202 4203
	/* we generally have at most 6 or so space infos, one for each raid
	 * level.  So, a whole page should be more than enough for everyone
	 */
4204
	if (alloc_size > PAGE_SIZE)
4205 4206
		return -ENOMEM;

J
Josef Bacik 已提交
4207
	space_args.total_spaces = 0;
4208
	dest = kmalloc(alloc_size, GFP_KERNEL);
4209 4210 4211
	if (!dest)
		return -ENOMEM;
	dest_orig = dest;
J
Josef Bacik 已提交
4212

4213
	/* now we have a buffer to copy into */
4214 4215 4216
	for (i = 0; i < num_types; i++) {
		struct btrfs_space_info *tmp;

4217 4218 4219
		if (!slot_count)
			break;

4220 4221
		info = NULL;
		rcu_read_lock();
4222
		list_for_each_entry_rcu(tmp, &fs_info->space_info,
4223 4224 4225 4226 4227 4228 4229
					list) {
			if (tmp->flags == types[i]) {
				info = tmp;
				break;
			}
		}
		rcu_read_unlock();
4230

4231 4232 4233 4234 4235
		if (!info)
			continue;
		down_read(&info->groups_sem);
		for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
			if (!list_empty(&info->block_groups[c])) {
4236 4237
				btrfs_get_block_group_info(
					&info->block_groups[c], &space);
4238 4239 4240
				memcpy(dest, &space, sizeof(space));
				dest++;
				space_args.total_spaces++;
4241
				slot_count--;
4242
			}
4243 4244
			if (!slot_count)
				break;
4245 4246
		}
		up_read(&info->groups_sem);
J
Josef Bacik 已提交
4247 4248
	}

4249 4250 4251 4252
	/*
	 * Add global block reserve
	 */
	if (slot_count) {
4253
		struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4254 4255 4256 4257 4258 4259 4260 4261 4262 4263

		spin_lock(&block_rsv->lock);
		space.total_bytes = block_rsv->size;
		space.used_bytes = block_rsv->size - block_rsv->reserved;
		spin_unlock(&block_rsv->lock);
		space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
		memcpy(dest, &space, sizeof(space));
		space_args.total_spaces++;
	}

D
Daniel J Blueman 已提交
4264
	user_dest = (struct btrfs_ioctl_space_info __user *)
4265 4266 4267 4268 4269 4270 4271 4272
		(arg + sizeof(struct btrfs_ioctl_space_args));

	if (copy_to_user(user_dest, dest_orig, alloc_size))
		ret = -EFAULT;

	kfree(dest_orig);
out:
	if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
J
Josef Bacik 已提交
4273 4274 4275 4276 4277
		ret = -EFAULT;

	return ret;
}

C
Christoph Hellwig 已提交
4278 4279 4280 4281 4282 4283 4284 4285
/*
 * there are many ways the trans_start and trans_end ioctls can lead
 * to deadlocks.  They should only be used by applications that
 * basically own the machine, and have a very in depth understanding
 * of all the possible deadlocks and enospc problems.
 */
long btrfs_ioctl_trans_end(struct file *file)
{
A
Al Viro 已提交
4286
	struct inode *inode = file_inode(file);
C
Christoph Hellwig 已提交
4287 4288 4289 4290
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_trans_handle *trans;

	trans = file->private_data;
4291 4292
	if (!trans)
		return -EINVAL;
C
Christoph Hellwig 已提交
4293
	file->private_data = NULL;
4294

4295
	btrfs_end_transaction(trans);
4296

J
Josef Bacik 已提交
4297
	atomic_dec(&root->fs_info->open_ioctl_trans);
4298

A
Al Viro 已提交
4299
	mnt_drop_write_file(file);
4300
	return 0;
C
Christoph Hellwig 已提交
4301 4302
}

4303 4304
static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
					    void __user *argp)
4305 4306 4307
{
	struct btrfs_trans_handle *trans;
	u64 transid;
T
Tsutomu Itoh 已提交
4308
	int ret;
4309

M
Miao Xie 已提交
4310
	trans = btrfs_attach_transaction_barrier(root);
4311 4312 4313 4314 4315 4316 4317 4318
	if (IS_ERR(trans)) {
		if (PTR_ERR(trans) != -ENOENT)
			return PTR_ERR(trans);

		/* No running transaction, don't bother */
		transid = root->fs_info->last_trans_committed;
		goto out;
	}
4319
	transid = trans->transid;
4320
	ret = btrfs_commit_transaction_async(trans, 0);
4321
	if (ret) {
4322
		btrfs_end_transaction(trans);
T
Tsutomu Itoh 已提交
4323
		return ret;
4324
	}
4325
out:
4326 4327 4328 4329 4330 4331
	if (argp)
		if (copy_to_user(argp, &transid, sizeof(transid)))
			return -EFAULT;
	return 0;
}

4332
static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4333
					   void __user *argp)
4334 4335 4336 4337 4338 4339 4340 4341 4342
{
	u64 transid;

	if (argp) {
		if (copy_from_user(&transid, argp, sizeof(transid)))
			return -EFAULT;
	} else {
		transid = 0;  /* current trans */
	}
4343
	return btrfs_wait_for_commit(fs_info, transid);
4344 4345
}

M
Miao Xie 已提交
4346
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
J
Jan Schmidt 已提交
4347
{
4348
	struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
J
Jan Schmidt 已提交
4349
	struct btrfs_ioctl_scrub_args *sa;
M
Miao Xie 已提交
4350
	int ret;
J
Jan Schmidt 已提交
4351 4352 4353 4354 4355 4356 4357 4358

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

M
Miao Xie 已提交
4359 4360 4361 4362 4363 4364
	if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
		ret = mnt_want_write_file(file);
		if (ret)
			goto out;
	}

4365
	ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
4366 4367
			      &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
			      0);
J
Jan Schmidt 已提交
4368 4369 4370 4371

	if (copy_to_user(arg, sa, sizeof(*sa)))
		ret = -EFAULT;

M
Miao Xie 已提交
4372 4373 4374
	if (!(sa->flags & BTRFS_SCRUB_READONLY))
		mnt_drop_write_file(file);
out:
J
Jan Schmidt 已提交
4375 4376 4377 4378
	kfree(sa);
	return ret;
}

4379
static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
J
Jan Schmidt 已提交
4380 4381 4382 4383
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4384
	return btrfs_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
4385 4386
}

4387
static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
J
Jan Schmidt 已提交
4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399
				       void __user *arg)
{
	struct btrfs_ioctl_scrub_args *sa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

4400
	ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
J
Jan Schmidt 已提交
4401 4402 4403 4404 4405 4406 4407 4408

	if (copy_to_user(arg, sa, sizeof(*sa)))
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

4409
static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
4410
				      void __user *arg)
4411 4412 4413 4414 4415 4416 4417 4418
{
	struct btrfs_ioctl_get_dev_stats *sa;
	int ret;

	sa = memdup_user(arg, sizeof(*sa));
	if (IS_ERR(sa))
		return PTR_ERR(sa);

4419 4420 4421 4422 4423
	if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
		kfree(sa);
		return -EPERM;
	}

4424
	ret = btrfs_get_dev_stats(fs_info, sa);
4425 4426 4427 4428 4429 4430 4431 4432

	if (copy_to_user(arg, sa, sizeof(*sa)))
		ret = -EFAULT;

	kfree(sa);
	return ret;
}

4433 4434
static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
				    void __user *arg)
4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447
{
	struct btrfs_ioctl_dev_replace_args *p;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	p = memdup_user(arg, sizeof(*p));
	if (IS_ERR(p))
		return PTR_ERR(p);

	switch (p->cmd) {
	case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4448
		if (fs_info->sb->s_flags & MS_RDONLY) {
4449 4450 4451
			ret = -EROFS;
			goto out;
		}
4452
		if (atomic_xchg(
4453
			&fs_info->mutually_exclusive_operation_running, 1)) {
4454
			ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4455
		} else {
4456
			ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4457
			atomic_set(
4458
			 &fs_info->mutually_exclusive_operation_running, 0);
4459 4460 4461
		}
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4462
		btrfs_dev_replace_status(fs_info, p);
4463 4464 4465
		ret = 0;
		break;
	case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4466
		ret = btrfs_dev_replace_cancel(fs_info, p);
4467 4468 4469 4470 4471 4472 4473 4474
		break;
	default:
		ret = -EINVAL;
		break;
	}

	if (copy_to_user(arg, p, sizeof(*p)))
		ret = -EFAULT;
4475
out:
4476 4477 4478 4479
	kfree(p);
	return ret;
}

4480 4481 4482 4483
static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
{
	int ret = 0;
	int i;
4484
	u64 rel_ptr;
4485
	int size;
4486
	struct btrfs_ioctl_ino_path_args *ipa = NULL;
4487 4488 4489
	struct inode_fs_paths *ipath = NULL;
	struct btrfs_path *path;

4490
	if (!capable(CAP_DAC_READ_SEARCH))
4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518
		return -EPERM;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	ipa = memdup_user(arg, sizeof(*ipa));
	if (IS_ERR(ipa)) {
		ret = PTR_ERR(ipa);
		ipa = NULL;
		goto out;
	}

	size = min_t(u32, ipa->size, 4096);
	ipath = init_ipath(size, root, path);
	if (IS_ERR(ipath)) {
		ret = PTR_ERR(ipath);
		ipath = NULL;
		goto out;
	}

	ret = paths_from_inode(ipa->inum, ipath);
	if (ret < 0)
		goto out;

	for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4519 4520
		rel_ptr = ipath->fspath->val[i] -
			  (u64)(unsigned long)ipath->fspath->val;
4521
		ipath->fspath->val[i] = rel_ptr;
4522 4523
	}

4524 4525
	ret = copy_to_user((void *)(unsigned long)ipa->fspath,
			   (void *)(unsigned long)ipath->fspath, size);
4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558
	if (ret) {
		ret = -EFAULT;
		goto out;
	}

out:
	btrfs_free_path(path);
	free_ipath(ipath);
	kfree(ipa);

	return ret;
}

static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
{
	struct btrfs_data_container *inodes = ctx;
	const size_t c = 3 * sizeof(u64);

	if (inodes->bytes_left >= c) {
		inodes->bytes_left -= c;
		inodes->val[inodes->elem_cnt] = inum;
		inodes->val[inodes->elem_cnt + 1] = offset;
		inodes->val[inodes->elem_cnt + 2] = root;
		inodes->elem_cnt += 3;
	} else {
		inodes->bytes_missing += c - inodes->bytes_left;
		inodes->bytes_left = 0;
		inodes->elem_missed += 3;
	}

	return 0;
}

4559
static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571
					void __user *arg)
{
	int ret = 0;
	int size;
	struct btrfs_ioctl_logical_ino_args *loi;
	struct btrfs_data_container *inodes = NULL;
	struct btrfs_path *path = NULL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	loi = memdup_user(arg, sizeof(*loi));
4572 4573
	if (IS_ERR(loi))
		return PTR_ERR(loi);
4574 4575 4576 4577 4578 4579 4580

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

4581
	size = min_t(u32, loi->size, SZ_64K);
4582 4583 4584 4585 4586 4587 4588
	inodes = init_data_container(size);
	if (IS_ERR(inodes)) {
		ret = PTR_ERR(inodes);
		inodes = NULL;
		goto out;
	}

4589
	ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
L
Liu Bo 已提交
4590 4591
					  build_ino_list, inodes);
	if (ret == -EINVAL)
4592 4593 4594 4595
		ret = -ENOENT;
	if (ret < 0)
		goto out;

4596 4597
	ret = copy_to_user((void *)(unsigned long)loi->inodes,
			   (void *)(unsigned long)inodes, size);
4598 4599 4600 4601 4602
	if (ret)
		ret = -EFAULT;

out:
	btrfs_free_path(path);
4603
	vfree(inodes);
4604 4605 4606 4607 4608
	kfree(loi);

	return ret;
}

4609
void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
4610 4611 4612 4613 4614 4615
			       struct btrfs_ioctl_balance_args *bargs)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;

	bargs->flags = bctl->flags;

4616 4617 4618 4619
	if (atomic_read(&fs_info->balance_running))
		bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
	if (atomic_read(&fs_info->balance_pause_req))
		bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4620 4621
	if (atomic_read(&fs_info->balance_cancel_req))
		bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4622

4623 4624 4625
	memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
	memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
	memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4626 4627 4628 4629 4630 4631 4632 4633

	if (lock) {
		spin_lock(&fs_info->balance_lock);
		memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
		spin_unlock(&fs_info->balance_lock);
	} else {
		memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
	}
4634 4635
}

4636
static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4637
{
A
Al Viro 已提交
4638
	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4639 4640 4641
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_ioctl_balance_args *bargs;
	struct btrfs_balance_control *bctl;
4642
	bool need_unlock; /* for mut. excl. ops lock */
4643 4644 4645 4646 4647
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4648
	ret = mnt_want_write_file(file);
4649 4650 4651
	if (ret)
		return ret;

4652 4653 4654 4655 4656 4657 4658 4659 4660
again:
	if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
		mutex_lock(&fs_info->volume_mutex);
		mutex_lock(&fs_info->balance_mutex);
		need_unlock = true;
		goto locked;
	}

	/*
4661
	 * mut. excl. ops lock is locked.  Three possibilities:
4662 4663 4664 4665
	 *   (1) some other op is running
	 *   (2) balance is running
	 *   (3) balance is paused -- special case (think resume)
	 */
4666
	mutex_lock(&fs_info->balance_mutex);
4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693
	if (fs_info->balance_ctl) {
		/* this is either (2) or (3) */
		if (!atomic_read(&fs_info->balance_running)) {
			mutex_unlock(&fs_info->balance_mutex);
			if (!mutex_trylock(&fs_info->volume_mutex))
				goto again;
			mutex_lock(&fs_info->balance_mutex);

			if (fs_info->balance_ctl &&
			    !atomic_read(&fs_info->balance_running)) {
				/* this is (3) */
				need_unlock = false;
				goto locked;
			}

			mutex_unlock(&fs_info->balance_mutex);
			mutex_unlock(&fs_info->volume_mutex);
			goto again;
		} else {
			/* this is (2) */
			mutex_unlock(&fs_info->balance_mutex);
			ret = -EINPROGRESS;
			goto out;
		}
	} else {
		/* this is (1) */
		mutex_unlock(&fs_info->balance_mutex);
4694
		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4695 4696 4697 4698 4699
		goto out;
	}

locked:
	BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
4700 4701 4702 4703 4704

	if (arg) {
		bargs = memdup_user(arg, sizeof(*bargs));
		if (IS_ERR(bargs)) {
			ret = PTR_ERR(bargs);
4705
			goto out_unlock;
4706
		}
4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720

		if (bargs->flags & BTRFS_BALANCE_RESUME) {
			if (!fs_info->balance_ctl) {
				ret = -ENOTCONN;
				goto out_bargs;
			}

			bctl = fs_info->balance_ctl;
			spin_lock(&fs_info->balance_lock);
			bctl->flags |= BTRFS_BALANCE_RESUME;
			spin_unlock(&fs_info->balance_lock);

			goto do_balance;
		}
4721 4722 4723 4724
	} else {
		bargs = NULL;
	}

4725
	if (fs_info->balance_ctl) {
4726 4727 4728 4729
		ret = -EINPROGRESS;
		goto out_bargs;
	}

4730
	bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742
	if (!bctl) {
		ret = -ENOMEM;
		goto out_bargs;
	}

	bctl->fs_info = fs_info;
	if (arg) {
		memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
		memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
		memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));

		bctl->flags = bargs->flags;
4743 4744 4745
	} else {
		/* balance everything - no filters */
		bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4746 4747
	}

4748 4749
	if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
		ret = -EINVAL;
4750
		goto out_bctl;
4751 4752
	}

4753
do_balance:
4754
	/*
4755 4756 4757 4758 4759
	 * Ownership of bctl and mutually_exclusive_operation_running
	 * goes to to btrfs_balance.  bctl is freed in __cancel_balance,
	 * or, if restriper was paused all the way until unmount, in
	 * free_fs_info.  mutually_exclusive_operation_running is
	 * cleared in __cancel_balance.
4760
	 */
4761 4762 4763
	need_unlock = false;

	ret = btrfs_balance(bctl, bargs);
4764
	bctl = NULL;
4765

4766 4767 4768 4769 4770
	if (arg) {
		if (copy_to_user(arg, bargs, sizeof(*bargs)))
			ret = -EFAULT;
	}

4771 4772
out_bctl:
	kfree(bctl);
4773 4774
out_bargs:
	kfree(bargs);
4775
out_unlock:
4776 4777
	mutex_unlock(&fs_info->balance_mutex);
	mutex_unlock(&fs_info->volume_mutex);
4778 4779 4780
	if (need_unlock)
		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
out:
4781
	mnt_drop_write_file(file);
4782 4783 4784
	return ret;
}

4785
static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
4786 4787 4788 4789 4790 4791
{
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	switch (cmd) {
	case BTRFS_BALANCE_CTL_PAUSE:
4792
		return btrfs_pause_balance(fs_info);
4793
	case BTRFS_BALANCE_CTL_CANCEL:
4794
		return btrfs_cancel_balance(fs_info);
4795 4796 4797 4798 4799
	}

	return -EINVAL;
}

4800
static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814
					 void __user *arg)
{
	struct btrfs_ioctl_balance_args *bargs;
	int ret = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	mutex_lock(&fs_info->balance_mutex);
	if (!fs_info->balance_ctl) {
		ret = -ENOTCONN;
		goto out;
	}

4815
	bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831
	if (!bargs) {
		ret = -ENOMEM;
		goto out;
	}

	update_ioctl_balance_args(fs_info, 1, bargs);

	if (copy_to_user(arg, bargs, sizeof(*bargs)))
		ret = -EFAULT;

	kfree(bargs);
out:
	mutex_unlock(&fs_info->balance_mutex);
	return ret;
}

4832
static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4833
{
4834 4835
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
A
Arne Jansen 已提交
4836 4837 4838 4839 4840 4841 4842 4843
	struct btrfs_ioctl_quota_ctl_args *sa;
	struct btrfs_trans_handle *trans = NULL;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4844 4845 4846
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4847 4848

	sa = memdup_user(arg, sizeof(*sa));
4849 4850 4851 4852
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4853

4854 4855
	down_write(&fs_info->subvol_sem);
	trans = btrfs_start_transaction(fs_info->tree_root, 2);
J
Jan Schmidt 已提交
4856 4857 4858
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
A
Arne Jansen 已提交
4859 4860 4861 4862
	}

	switch (sa->cmd) {
	case BTRFS_QUOTA_CTL_ENABLE:
4863
		ret = btrfs_quota_enable(trans, fs_info);
A
Arne Jansen 已提交
4864 4865
		break;
	case BTRFS_QUOTA_CTL_DISABLE:
4866
		ret = btrfs_quota_disable(trans, fs_info);
A
Arne Jansen 已提交
4867 4868 4869 4870 4871 4872
		break;
	default:
		ret = -EINVAL;
		break;
	}

4873
	err = btrfs_commit_transaction(trans);
J
Jan Schmidt 已提交
4874 4875
	if (err && !ret)
		ret = err;
A
Arne Jansen 已提交
4876 4877
out:
	kfree(sa);
4878
	up_write(&fs_info->subvol_sem);
4879 4880
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4881 4882 4883
	return ret;
}

4884
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4885
{
4886 4887 4888
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4889 4890 4891 4892 4893 4894 4895 4896
	struct btrfs_ioctl_qgroup_assign_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4897 4898 4899
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4900 4901

	sa = memdup_user(arg, sizeof(*sa));
4902 4903 4904 4905
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4906 4907 4908 4909 4910 4911 4912 4913 4914

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	/* FIXME: check if the IDs really exist */
	if (sa->assign) {
4915
		ret = btrfs_add_qgroup_relation(trans, fs_info,
A
Arne Jansen 已提交
4916 4917
						sa->src, sa->dst);
	} else {
4918
		ret = btrfs_del_qgroup_relation(trans, fs_info,
A
Arne Jansen 已提交
4919 4920 4921
						sa->src, sa->dst);
	}

4922
	/* update qgroup status and info */
4923
	err = btrfs_run_qgroups(trans, fs_info);
4924
	if (err < 0)
4925 4926
		btrfs_handle_fs_error(fs_info, err,
				      "failed to update qgroup status and info");
4927
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4928 4929 4930 4931 4932
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4933 4934
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4935 4936 4937
	return ret;
}

4938
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4939
{
4940 4941 4942
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4943 4944 4945 4946 4947 4948 4949 4950
	struct btrfs_ioctl_qgroup_create_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

4951 4952 4953
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
4954 4955

	sa = memdup_user(arg, sizeof(*sa));
4956 4957 4958 4959
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
4960

M
Miao Xie 已提交
4961 4962 4963 4964 4965
	if (!sa->qgroupid) {
		ret = -EINVAL;
		goto out;
	}

A
Arne Jansen 已提交
4966 4967 4968 4969 4970 4971 4972 4973
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	/* FIXME: check if the IDs really exist */
	if (sa->create) {
4974
		ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
A
Arne Jansen 已提交
4975
	} else {
4976
		ret = btrfs_remove_qgroup(trans, fs_info, sa->qgroupid);
A
Arne Jansen 已提交
4977 4978
	}

4979
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
4980 4981 4982 4983 4984
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
4985 4986
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
4987 4988 4989
	return ret;
}

4990
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
A
Arne Jansen 已提交
4991
{
4992 4993 4994
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
A
Arne Jansen 已提交
4995 4996 4997 4998 4999 5000 5001 5002 5003
	struct btrfs_ioctl_qgroup_limit_args *sa;
	struct btrfs_trans_handle *trans;
	int ret;
	int err;
	u64 qgroupid;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

5004 5005 5006
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;
A
Arne Jansen 已提交
5007 5008

	sa = memdup_user(arg, sizeof(*sa));
5009 5010 5011 5012
	if (IS_ERR(sa)) {
		ret = PTR_ERR(sa);
		goto drop_write;
	}
A
Arne Jansen 已提交
5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026

	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

	qgroupid = sa->qgroupid;
	if (!qgroupid) {
		/* take the current subvol as qgroup */
		qgroupid = root->root_key.objectid;
	}

	/* FIXME: check if the IDs really exist */
5027
	ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
A
Arne Jansen 已提交
5028

5029
	err = btrfs_end_transaction(trans);
A
Arne Jansen 已提交
5030 5031 5032 5033 5034
	if (err && !ret)
		ret = err;

out:
	kfree(sa);
5035 5036
drop_write:
	mnt_drop_write_file(file);
A
Arne Jansen 已提交
5037 5038 5039
	return ret;
}

J
Jan Schmidt 已提交
5040 5041
static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
{
5042 5043
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
J
Jan Schmidt 已提交
5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064
	struct btrfs_ioctl_quota_rescan_args *qsa;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	qsa = memdup_user(arg, sizeof(*qsa));
	if (IS_ERR(qsa)) {
		ret = PTR_ERR(qsa);
		goto drop_write;
	}

	if (qsa->flags) {
		ret = -EINVAL;
		goto out;
	}

5065
	ret = btrfs_qgroup_rescan(fs_info);
J
Jan Schmidt 已提交
5066 5067 5068 5069 5070 5071 5072 5073 5074 5075

out:
	kfree(qsa);
drop_write:
	mnt_drop_write_file(file);
	return ret;
}

static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
{
5076 5077
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
J
Jan Schmidt 已提交
5078 5079 5080 5081 5082 5083
	struct btrfs_ioctl_quota_rescan_args *qsa;
	int ret = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

5084
	qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
J
Jan Schmidt 已提交
5085 5086 5087
	if (!qsa)
		return -ENOMEM;

5088
	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
J
Jan Schmidt 已提交
5089
		qsa->flags = 1;
5090
		qsa->progress = fs_info->qgroup_rescan_progress.objectid;
J
Jan Schmidt 已提交
5091 5092 5093 5094 5095 5096 5097 5098 5099
	}

	if (copy_to_user(arg, qsa, sizeof(*qsa)))
		ret = -EFAULT;

	kfree(qsa);
	return ret;
}

5100 5101
static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
{
5102 5103
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5104 5105 5106 5107

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

5108
	return btrfs_qgroup_wait_for_completion(fs_info, true);
5109 5110
}

5111 5112
static long _btrfs_ioctl_set_received_subvol(struct file *file,
					    struct btrfs_ioctl_received_subvol_args *sa)
5113
{
A
Al Viro 已提交
5114
	struct inode *inode = file_inode(file);
5115
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5116 5117 5118
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_root_item *root_item = &root->root_item;
	struct btrfs_trans_handle *trans;
5119
	struct timespec ct = current_time(inode);
5120
	int ret = 0;
5121
	int received_uuid_changed;
5122

5123 5124 5125
	if (!inode_owner_or_capable(inode))
		return -EPERM;

5126 5127 5128 5129
	ret = mnt_want_write_file(file);
	if (ret < 0)
		return ret;

5130
	down_write(&fs_info->subvol_sem);
5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141

	if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
		ret = -EINVAL;
		goto out;
	}

	if (btrfs_root_readonly(root)) {
		ret = -EROFS;
		goto out;
	}

5142 5143 5144 5145 5146
	/*
	 * 1 - root item
	 * 2 - uuid items (received uuid + subvol uuid)
	 */
	trans = btrfs_start_transaction(root, 3);
5147 5148 5149 5150 5151 5152 5153 5154 5155 5156
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		trans = NULL;
		goto out;
	}

	sa->rtransid = trans->transid;
	sa->rtime.sec = ct.tv_sec;
	sa->rtime.nsec = ct.tv_nsec;

5157 5158 5159 5160
	received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
				       BTRFS_UUID_SIZE);
	if (received_uuid_changed &&
	    !btrfs_is_empty_uuid(root_item->received_uuid))
5161
		btrfs_uuid_tree_rem(trans, fs_info, root_item->received_uuid,
5162 5163
				    BTRFS_UUID_KEY_RECEIVED_SUBVOL,
				    root->root_key.objectid);
5164 5165 5166
	memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
	btrfs_set_root_stransid(root_item, sa->stransid);
	btrfs_set_root_rtransid(root_item, sa->rtransid);
5167 5168 5169 5170
	btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
	btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
	btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5171

5172
	ret = btrfs_update_root(trans, fs_info->tree_root,
5173 5174
				&root->root_key, &root->root_item);
	if (ret < 0) {
5175
		btrfs_end_transaction(trans);
5176
		goto out;
5177 5178
	}
	if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5179
		ret = btrfs_uuid_tree_add(trans, fs_info, sa->uuid,
5180 5181 5182
					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
					  root->root_key.objectid);
		if (ret < 0 && ret != -EEXIST) {
5183
			btrfs_abort_transaction(trans, ret);
5184
			goto out;
5185 5186
		}
	}
5187
	ret = btrfs_commit_transaction(trans);
5188
	if (ret < 0) {
5189
		btrfs_abort_transaction(trans, ret);
5190
		goto out;
5191 5192
	}

5193
out:
5194
	up_write(&fs_info->subvol_sem);
5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207
	mnt_drop_write_file(file);
	return ret;
}

#ifdef CONFIG_64BIT
static long btrfs_ioctl_set_received_subvol_32(struct file *file,
						void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
	struct btrfs_ioctl_received_subvol_args *args64 = NULL;
	int ret = 0;

	args32 = memdup_user(arg, sizeof(*args32));
5208 5209
	if (IS_ERR(args32))
		return PTR_ERR(args32);
5210

5211
	args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
5212 5213
	if (!args64) {
		ret = -ENOMEM;
5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256
		goto out;
	}

	memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
	args64->stransid = args32->stransid;
	args64->rtransid = args32->rtransid;
	args64->stime.sec = args32->stime.sec;
	args64->stime.nsec = args32->stime.nsec;
	args64->rtime.sec = args32->rtime.sec;
	args64->rtime.nsec = args32->rtime.nsec;
	args64->flags = args32->flags;

	ret = _btrfs_ioctl_set_received_subvol(file, args64);
	if (ret)
		goto out;

	memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
	args32->stransid = args64->stransid;
	args32->rtransid = args64->rtransid;
	args32->stime.sec = args64->stime.sec;
	args32->stime.nsec = args64->stime.nsec;
	args32->rtime.sec = args64->rtime.sec;
	args32->rtime.nsec = args64->rtime.nsec;
	args32->flags = args64->flags;

	ret = copy_to_user(arg, args32, sizeof(*args32));
	if (ret)
		ret = -EFAULT;

out:
	kfree(args32);
	kfree(args64);
	return ret;
}
#endif

static long btrfs_ioctl_set_received_subvol(struct file *file,
					    void __user *arg)
{
	struct btrfs_ioctl_received_subvol_args *sa = NULL;
	int ret = 0;

	sa = memdup_user(arg, sizeof(*sa));
5257 5258
	if (IS_ERR(sa))
		return PTR_ERR(sa);
5259 5260 5261 5262 5263 5264

	ret = _btrfs_ioctl_set_received_subvol(file, sa);

	if (ret)
		goto out;

5265 5266 5267 5268 5269 5270 5271 5272 5273
	ret = copy_to_user(arg, sa, sizeof(*sa));
	if (ret)
		ret = -EFAULT;

out:
	kfree(sa);
	return ret;
}

5274 5275
static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
{
5276 5277
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5278
	size_t len;
5279
	int ret;
5280 5281
	char label[BTRFS_LABEL_SIZE];

5282 5283 5284
	spin_lock(&fs_info->super_lock);
	memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
	spin_unlock(&fs_info->super_lock);
5285 5286

	len = strnlen(label, BTRFS_LABEL_SIZE);
5287 5288

	if (len == BTRFS_LABEL_SIZE) {
5289 5290 5291
		btrfs_warn(fs_info,
			   "label is too long, return the first %zu bytes",
			   --len);
5292 5293 5294 5295 5296 5297 5298
	}

	ret = copy_to_user(arg, label, len);

	return ret ? -EFAULT : 0;
}

5299 5300
static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
{
5301 5302 5303 5304
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315
	struct btrfs_trans_handle *trans;
	char label[BTRFS_LABEL_SIZE];
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(label, arg, sizeof(label)))
		return -EFAULT;

	if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5316
		btrfs_err(fs_info,
J
Jeff Mahoney 已提交
5317 5318
			  "unable to set label with more than %d bytes",
			  BTRFS_LABEL_SIZE - 1);
5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331
		return -EINVAL;
	}

	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

	trans = btrfs_start_transaction(root, 0);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_unlock;
	}

5332
	spin_lock(&fs_info->super_lock);
5333
	strcpy(super_block->label, label);
5334
	spin_unlock(&fs_info->super_lock);
5335
	ret = btrfs_commit_transaction(trans);
5336 5337 5338 5339 5340 5341

out_unlock:
	mnt_drop_write_file(file);
	return ret;
}

5342 5343 5344 5345 5346
#define INIT_FEATURE_FLAGS(suffix) \
	{ .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
	  .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
	  .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }

5347
int btrfs_ioctl_get_supported_features(void __user *arg)
5348
{
D
David Sterba 已提交
5349
	static const struct btrfs_ioctl_feature_flags features[3] = {
5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362
		INIT_FEATURE_FLAGS(SUPP),
		INIT_FEATURE_FLAGS(SAFE_SET),
		INIT_FEATURE_FLAGS(SAFE_CLEAR)
	};

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
{
5363 5364 5365
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_super_block *super_block = fs_info->super_copy;
5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377
	struct btrfs_ioctl_feature_flags features;

	features.compat_flags = btrfs_super_compat_flags(super_block);
	features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
	features.incompat_flags = btrfs_super_incompat_flags(super_block);

	if (copy_to_user(arg, &features, sizeof(features)))
		return -EFAULT;

	return 0;
}

5378
static int check_feature_bits(struct btrfs_fs_info *fs_info,
5379
			      enum btrfs_feature_set set,
5380 5381 5382
			      u64 change_mask, u64 flags, u64 supported_flags,
			      u64 safe_set, u64 safe_clear)
{
5383 5384
	const char *type = btrfs_feature_set_names[set];
	char *names;
5385 5386 5387 5388 5389 5390
	u64 disallowed, unsupported;
	u64 set_mask = flags & change_mask;
	u64 clear_mask = ~flags & change_mask;

	unsupported = set_mask & ~supported_flags;
	if (unsupported) {
5391 5392
		names = btrfs_printable_features(set, unsupported);
		if (names) {
5393 5394 5395
			btrfs_warn(fs_info,
				   "this kernel does not support the %s feature bit%s",
				   names, strchr(names, ',') ? "s" : "");
5396 5397
			kfree(names);
		} else
5398 5399 5400
			btrfs_warn(fs_info,
				   "this kernel does not support %s bits 0x%llx",
				   type, unsupported);
5401 5402 5403 5404 5405
		return -EOPNOTSUPP;
	}

	disallowed = set_mask & ~safe_set;
	if (disallowed) {
5406 5407
		names = btrfs_printable_features(set, disallowed);
		if (names) {
5408 5409 5410
			btrfs_warn(fs_info,
				   "can't set the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
5411 5412
			kfree(names);
		} else
5413 5414 5415
			btrfs_warn(fs_info,
				   "can't set %s bits 0x%llx while mounted",
				   type, disallowed);
5416 5417 5418 5419 5420
		return -EPERM;
	}

	disallowed = clear_mask & ~safe_clear;
	if (disallowed) {
5421 5422
		names = btrfs_printable_features(set, disallowed);
		if (names) {
5423 5424 5425
			btrfs_warn(fs_info,
				   "can't clear the %s feature bit%s while mounted",
				   names, strchr(names, ',') ? "s" : "");
5426 5427
			kfree(names);
		} else
5428 5429 5430
			btrfs_warn(fs_info,
				   "can't clear %s bits 0x%llx while mounted",
				   type, disallowed);
5431 5432 5433 5434 5435 5436
		return -EPERM;
	}

	return 0;
}

5437 5438
#define check_feature(fs_info, change_mask, flags, mask_base)	\
check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags,	\
5439 5440 5441 5442 5443 5444
		   BTRFS_FEATURE_ ## mask_base ## _SUPP,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_SET,	\
		   BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)

static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
{
5445 5446 5447 5448
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_super_block *super_block = fs_info->super_copy;
5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464
	struct btrfs_ioctl_feature_flags flags[2];
	struct btrfs_trans_handle *trans;
	u64 newflags;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(flags, arg, sizeof(flags)))
		return -EFAULT;

	/* Nothing to do */
	if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
	    !flags[0].incompat_flags)
		return 0;

5465
	ret = check_feature(fs_info, flags[0].compat_flags,
5466 5467 5468 5469
			    flags[1].compat_flags, COMPAT);
	if (ret)
		return ret;

5470
	ret = check_feature(fs_info, flags[0].compat_ro_flags,
5471 5472 5473 5474
			    flags[1].compat_ro_flags, COMPAT_RO);
	if (ret)
		return ret;

5475
	ret = check_feature(fs_info, flags[0].incompat_flags,
5476 5477 5478 5479
			    flags[1].incompat_flags, INCOMPAT);
	if (ret)
		return ret;

5480 5481 5482 5483
	ret = mnt_want_write_file(file);
	if (ret)
		return ret;

5484
	trans = btrfs_start_transaction(root, 0);
5485 5486 5487 5488
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out_drop_write;
	}
5489

5490
	spin_lock(&fs_info->super_lock);
5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504
	newflags = btrfs_super_compat_flags(super_block);
	newflags |= flags[0].compat_flags & flags[1].compat_flags;
	newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
	btrfs_set_super_compat_flags(super_block, newflags);

	newflags = btrfs_super_compat_ro_flags(super_block);
	newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
	newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
	btrfs_set_super_compat_ro_flags(super_block, newflags);

	newflags = btrfs_super_incompat_flags(super_block);
	newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
	newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
	btrfs_set_super_incompat_flags(super_block, newflags);
5505
	spin_unlock(&fs_info->super_lock);
5506

5507
	ret = btrfs_commit_transaction(trans);
5508 5509 5510 5511
out_drop_write:
	mnt_drop_write_file(file);

	return ret;
5512 5513
}

C
Christoph Hellwig 已提交
5514 5515 5516
long btrfs_ioctl(struct file *file, unsigned int
		cmd, unsigned long arg)
{
5517 5518 5519
	struct inode *inode = file_inode(file);
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
	struct btrfs_root *root = BTRFS_I(inode)->root;
5520
	void __user *argp = (void __user *)arg;
C
Christoph Hellwig 已提交
5521 5522

	switch (cmd) {
5523 5524 5525 5526 5527 5528
	case FS_IOC_GETFLAGS:
		return btrfs_ioctl_getflags(file, argp);
	case FS_IOC_SETFLAGS:
		return btrfs_ioctl_setflags(file, argp);
	case FS_IOC_GETVERSION:
		return btrfs_ioctl_getversion(file, argp);
5529 5530
	case FITRIM:
		return btrfs_ioctl_fitrim(file, argp);
C
Christoph Hellwig 已提交
5531
	case BTRFS_IOC_SNAP_CREATE:
5532
		return btrfs_ioctl_snap_create(file, argp, 0);
5533
	case BTRFS_IOC_SNAP_CREATE_V2:
5534
		return btrfs_ioctl_snap_create_v2(file, argp, 0);
5535
	case BTRFS_IOC_SUBVOL_CREATE:
5536
		return btrfs_ioctl_snap_create(file, argp, 1);
A
Arne Jansen 已提交
5537 5538
	case BTRFS_IOC_SUBVOL_CREATE_V2:
		return btrfs_ioctl_snap_create_v2(file, argp, 1);
5539 5540
	case BTRFS_IOC_SNAP_DESTROY:
		return btrfs_ioctl_snap_destroy(file, argp);
5541 5542 5543 5544
	case BTRFS_IOC_SUBVOL_GETFLAGS:
		return btrfs_ioctl_subvol_getflags(file, argp);
	case BTRFS_IOC_SUBVOL_SETFLAGS:
		return btrfs_ioctl_subvol_setflags(file, argp);
5545 5546
	case BTRFS_IOC_DEFAULT_SUBVOL:
		return btrfs_ioctl_default_subvol(file, argp);
C
Christoph Hellwig 已提交
5547
	case BTRFS_IOC_DEFRAG:
C
Chris Mason 已提交
5548 5549 5550
		return btrfs_ioctl_defrag(file, NULL);
	case BTRFS_IOC_DEFRAG_RANGE:
		return btrfs_ioctl_defrag(file, argp);
C
Christoph Hellwig 已提交
5551
	case BTRFS_IOC_RESIZE:
5552
		return btrfs_ioctl_resize(file, argp);
C
Christoph Hellwig 已提交
5553
	case BTRFS_IOC_ADD_DEV:
5554
		return btrfs_ioctl_add_dev(fs_info, argp);
C
Christoph Hellwig 已提交
5555
	case BTRFS_IOC_RM_DEV:
5556
		return btrfs_ioctl_rm_dev(file, argp);
5557 5558
	case BTRFS_IOC_RM_DEV_V2:
		return btrfs_ioctl_rm_dev_v2(file, argp);
J
Jan Schmidt 已提交
5559
	case BTRFS_IOC_FS_INFO:
5560
		return btrfs_ioctl_fs_info(fs_info, argp);
J
Jan Schmidt 已提交
5561
	case BTRFS_IOC_DEV_INFO:
5562
		return btrfs_ioctl_dev_info(fs_info, argp);
C
Christoph Hellwig 已提交
5563
	case BTRFS_IOC_BALANCE:
5564
		return btrfs_ioctl_balance(file, NULL);
C
Christoph Hellwig 已提交
5565 5566 5567 5568
	case BTRFS_IOC_TRANS_START:
		return btrfs_ioctl_trans_start(file);
	case BTRFS_IOC_TRANS_END:
		return btrfs_ioctl_trans_end(file);
5569 5570
	case BTRFS_IOC_TREE_SEARCH:
		return btrfs_ioctl_tree_search(file, argp);
G
Gerhard Heift 已提交
5571 5572
	case BTRFS_IOC_TREE_SEARCH_V2:
		return btrfs_ioctl_tree_search_v2(file, argp);
5573 5574
	case BTRFS_IOC_INO_LOOKUP:
		return btrfs_ioctl_ino_lookup(file, argp);
5575 5576 5577
	case BTRFS_IOC_INO_PATHS:
		return btrfs_ioctl_ino_to_path(root, argp);
	case BTRFS_IOC_LOGICAL_INO:
5578
		return btrfs_ioctl_logical_to_ino(fs_info, argp);
J
Josef Bacik 已提交
5579
	case BTRFS_IOC_SPACE_INFO:
5580
		return btrfs_ioctl_space_info(fs_info, argp);
5581 5582 5583
	case BTRFS_IOC_SYNC: {
		int ret;

5584
		ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
5585 5586
		if (ret)
			return ret;
5587
		ret = btrfs_sync_fs(inode->i_sb, 1);
5588 5589
		/*
		 * The transaction thread may want to do more work,
5590
		 * namely it pokes the cleaner kthread that will start
5591 5592
		 * processing uncleaned subvols.
		 */
5593
		wake_up_process(fs_info->transaction_kthread);
5594 5595
		return ret;
	}
5596
	case BTRFS_IOC_START_SYNC:
5597
		return btrfs_ioctl_start_sync(root, argp);
5598
	case BTRFS_IOC_WAIT_SYNC:
5599
		return btrfs_ioctl_wait_sync(fs_info, argp);
J
Jan Schmidt 已提交
5600
	case BTRFS_IOC_SCRUB:
M
Miao Xie 已提交
5601
		return btrfs_ioctl_scrub(file, argp);
J
Jan Schmidt 已提交
5602
	case BTRFS_IOC_SCRUB_CANCEL:
5603
		return btrfs_ioctl_scrub_cancel(fs_info);
J
Jan Schmidt 已提交
5604
	case BTRFS_IOC_SCRUB_PROGRESS:
5605
		return btrfs_ioctl_scrub_progress(fs_info, argp);
5606
	case BTRFS_IOC_BALANCE_V2:
5607
		return btrfs_ioctl_balance(file, argp);
5608
	case BTRFS_IOC_BALANCE_CTL:
5609
		return btrfs_ioctl_balance_ctl(fs_info, arg);
5610
	case BTRFS_IOC_BALANCE_PROGRESS:
5611
		return btrfs_ioctl_balance_progress(fs_info, argp);
5612 5613
	case BTRFS_IOC_SET_RECEIVED_SUBVOL:
		return btrfs_ioctl_set_received_subvol(file, argp);
5614 5615 5616 5617
#ifdef CONFIG_64BIT
	case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
		return btrfs_ioctl_set_received_subvol_32(file, argp);
#endif
5618 5619
	case BTRFS_IOC_SEND:
		return btrfs_ioctl_send(file, argp);
5620
	case BTRFS_IOC_GET_DEV_STATS:
5621
		return btrfs_ioctl_get_dev_stats(fs_info, argp);
A
Arne Jansen 已提交
5622
	case BTRFS_IOC_QUOTA_CTL:
5623
		return btrfs_ioctl_quota_ctl(file, argp);
A
Arne Jansen 已提交
5624
	case BTRFS_IOC_QGROUP_ASSIGN:
5625
		return btrfs_ioctl_qgroup_assign(file, argp);
A
Arne Jansen 已提交
5626
	case BTRFS_IOC_QGROUP_CREATE:
5627
		return btrfs_ioctl_qgroup_create(file, argp);
A
Arne Jansen 已提交
5628
	case BTRFS_IOC_QGROUP_LIMIT:
5629
		return btrfs_ioctl_qgroup_limit(file, argp);
J
Jan Schmidt 已提交
5630 5631 5632 5633
	case BTRFS_IOC_QUOTA_RESCAN:
		return btrfs_ioctl_quota_rescan(file, argp);
	case BTRFS_IOC_QUOTA_RESCAN_STATUS:
		return btrfs_ioctl_quota_rescan_status(file, argp);
5634 5635
	case BTRFS_IOC_QUOTA_RESCAN_WAIT:
		return btrfs_ioctl_quota_rescan_wait(file, argp);
5636
	case BTRFS_IOC_DEV_REPLACE:
5637
		return btrfs_ioctl_dev_replace(fs_info, argp);
5638 5639
	case BTRFS_IOC_GET_FSLABEL:
		return btrfs_ioctl_get_fslabel(file, argp);
5640 5641
	case BTRFS_IOC_SET_FSLABEL:
		return btrfs_ioctl_set_fslabel(file, argp);
5642
	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5643
		return btrfs_ioctl_get_supported_features(argp);
5644 5645 5646 5647
	case BTRFS_IOC_GET_FEATURES:
		return btrfs_ioctl_get_features(file, argp);
	case BTRFS_IOC_SET_FEATURES:
		return btrfs_ioctl_set_features(file, argp);
C
Christoph Hellwig 已提交
5648 5649 5650 5651
	}

	return -ENOTTY;
}
5652 5653 5654 5655

#ifdef CONFIG_COMPAT
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
5656 5657 5658 5659
	/*
	 * These all access 32-bit values anyway so no further
	 * handling is necessary.
	 */
5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674
	switch (cmd) {
	case FS_IOC32_GETFLAGS:
		cmd = FS_IOC_GETFLAGS;
		break;
	case FS_IOC32_SETFLAGS:
		cmd = FS_IOC_SETFLAGS;
		break;
	case FS_IOC32_GETVERSION:
		cmd = FS_IOC_GETVERSION;
		break;
	}

	return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif