xfs_reflink.c 44.5 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0+
D
Darrick J. Wong 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright (C) 2016 Oracle.  All Rights Reserved.
 * Author: Darrick J. Wong <darrick.wong@oracle.com>
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_error.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_ioctl.h"
#include "xfs_trace.h"
#include "xfs_log.h"
#include "xfs_icache.h"
#include "xfs_pnfs.h"
29
#include "xfs_btree.h"
D
Darrick J. Wong 已提交
30 31 32 33 34 35 36 37 38
#include "xfs_refcount_btree.h"
#include "xfs_refcount.h"
#include "xfs_bmap_btree.h"
#include "xfs_trans_space.h"
#include "xfs_bit.h"
#include "xfs_alloc.h"
#include "xfs_quota_defs.h"
#include "xfs_quota.h"
#include "xfs_reflink.h"
39
#include "xfs_iomap.h"
40
#include "xfs_rmap_btree.h"
41 42
#include "xfs_sb.h"
#include "xfs_ag_resv.h"
D
Darrick J. Wong 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68

/*
 * Copy on Write of Shared Blocks
 *
 * XFS must preserve "the usual" file semantics even when two files share
 * the same physical blocks.  This means that a write to one file must not
 * alter the blocks in a different file; the way that we'll do that is
 * through the use of a copy-on-write mechanism.  At a high level, that
 * means that when we want to write to a shared block, we allocate a new
 * block, write the data to the new block, and if that succeeds we map the
 * new block into the file.
 *
 * XFS provides a "delayed allocation" mechanism that defers the allocation
 * of disk blocks to dirty-but-not-yet-mapped file blocks as long as
 * possible.  This reduces fragmentation by enabling the filesystem to ask
 * for bigger chunks less often, which is exactly what we want for CoW.
 *
 * The delalloc mechanism begins when the kernel wants to make a block
 * writable (write_begin or page_mkwrite).  If the offset is not mapped, we
 * create a delalloc mapping, which is a regular in-core extent, but without
 * a real startblock.  (For delalloc mappings, the startblock encodes both
 * a flag that this is a delalloc mapping, and a worst-case estimate of how
 * many blocks might be required to put the mapping into the BMBT.)  delalloc
 * mappings are a reservation against the free space in the filesystem;
 * adjacent mappings can also be combined into fewer larger mappings.
 *
69 70 71 72 73 74 75 76
 * As an optimization, the CoW extent size hint (cowextsz) creates
 * outsized aligned delalloc reservations in the hope of landing out of
 * order nearby CoW writes in a single extent on disk, thereby reducing
 * fragmentation and improving future performance.
 *
 * D: --RRRRRRSSSRRRRRRRR--- (data fork)
 * C: ------DDDDDDD--------- (CoW fork)
 *
D
Darrick J. Wong 已提交
77
 * When dirty pages are being written out (typically in writepage), the
78 79 80 81 82 83 84
 * delalloc reservations are converted into unwritten mappings by
 * allocating blocks and replacing the delalloc mapping with real ones.
 * A delalloc mapping can be replaced by several unwritten ones if the
 * free space is fragmented.
 *
 * D: --RRRRRRSSSRRRRRRRR---
 * C: ------UUUUUUU---------
D
Darrick J. Wong 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98
 *
 * We want to adapt the delalloc mechanism for copy-on-write, since the
 * write paths are similar.  The first two steps (creating the reservation
 * and allocating the blocks) are exactly the same as delalloc except that
 * the mappings must be stored in a separate CoW fork because we do not want
 * to disturb the mapping in the data fork until we're sure that the write
 * succeeded.  IO completion in this case is the process of removing the old
 * mapping from the data fork and moving the new mapping from the CoW fork to
 * the data fork.  This will be discussed shortly.
 *
 * For now, unaligned directio writes will be bounced back to the page cache.
 * Block-aligned directio writes will use the same mechanism as buffered
 * writes.
 *
99 100 101 102 103 104 105 106
 * Just prior to submitting the actual disk write requests, we convert
 * the extents representing the range of the file actually being written
 * (as opposed to extra pieces created for the cowextsize hint) to real
 * extents.  This will become important in the next step:
 *
 * D: --RRRRRRSSSRRRRRRRR---
 * C: ------UUrrUUU---------
 *
D
Darrick J. Wong 已提交
107 108 109 110 111 112
 * CoW remapping must be done after the data block write completes,
 * because we don't want to destroy the old data fork map until we're sure
 * the new block has been written.  Since the new mappings are kept in a
 * separate fork, we can simply iterate these mappings to find the ones
 * that cover the file blocks that we just CoW'd.  For each extent, simply
 * unmap the corresponding range in the data fork, map the new range into
113 114 115 116 117 118 119 120 121
 * the data fork, and remove the extent from the CoW fork.  Because of
 * the presence of the cowextsize hint, however, we must be careful
 * only to remap the blocks that we've actually written out --  we must
 * never remap delalloc reservations nor CoW staging blocks that have
 * yet to be written.  This corresponds exactly to the real extents in
 * the CoW fork:
 *
 * D: --RRRRRRrrSRRRRRRRR---
 * C: ------UU--UUU---------
D
Darrick J. Wong 已提交
122 123 124 125 126 127 128 129 130
 *
 * Since the remapping operation can be applied to an arbitrary file
 * range, we record the need for the remap step as a flag in the ioend
 * instead of declaring a new IO type.  This is required for direct io
 * because we only have ioend for the whole dio, and we have to be able to
 * remember the presence of unwritten blocks and CoW blocks with a single
 * ioend structure.  Better yet, the more ground we can cover with one
 * ioend, the better.
 */
131 132 133 134 135 136 137 138 139 140 141

/*
 * Given an AG extent, find the lowest-numbered run of shared blocks
 * within that range and return the range in fbno/flen.  If
 * find_end_of_shared is true, return the longest contiguous extent of
 * shared blocks.  If there are no shared extents, fbno and flen will
 * be set to NULLAGBLOCK and 0, respectively.
 */
int
xfs_reflink_find_shared(
	struct xfs_mount	*mp,
142
	struct xfs_trans	*tp,
143 144 145 146 147 148 149 150 151 152 153
	xfs_agnumber_t		agno,
	xfs_agblock_t		agbno,
	xfs_extlen_t		aglen,
	xfs_agblock_t		*fbno,
	xfs_extlen_t		*flen,
	bool			find_end_of_shared)
{
	struct xfs_buf		*agbp;
	struct xfs_btree_cur	*cur;
	int			error;

154
	error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
155 156
	if (error)
		return error;
157 158
	if (!agbp)
		return -ENOMEM;
159

160
	cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
161 162 163 164 165 166

	error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
			find_end_of_shared);

	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);

167
	xfs_trans_brelse(tp, agbp);
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	return error;
}

/*
 * Trim the mapping to the next block where there's a change in the
 * shared/unshared status.  More specifically, this means that we
 * find the lowest-numbered extent of shared blocks that coincides with
 * the given block mapping.  If the shared extent overlaps the start of
 * the mapping, trim the mapping to the end of the shared extent.  If
 * the shared region intersects the mapping, trim the mapping to the
 * start of the shared extent.  If there are no shared regions that
 * overlap, just return the original extent.
 */
int
xfs_reflink_trim_around_shared(
	struct xfs_inode	*ip,
	struct xfs_bmbt_irec	*irec,
	bool			*shared,
	bool			*trimmed)
{
	xfs_agnumber_t		agno;
	xfs_agblock_t		agbno;
	xfs_extlen_t		aglen;
	xfs_agblock_t		fbno;
	xfs_extlen_t		flen;
	int			error = 0;

	/* Holes, unwritten, and delalloc extents cannot be shared */
196
	if (!xfs_is_reflink_inode(ip) || !xfs_bmap_is_real_extent(irec)) {
197 198 199 200 201 202 203 204 205 206
		*shared = false;
		return 0;
	}

	trace_xfs_reflink_trim_around_shared(ip, irec);

	agno = XFS_FSB_TO_AGNO(ip->i_mount, irec->br_startblock);
	agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock);
	aglen = irec->br_blockcount;

207
	error = xfs_reflink_find_shared(ip->i_mount, NULL, agno, agbno,
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
			aglen, &fbno, &flen, true);
	if (error)
		return error;

	*shared = *trimmed = false;
	if (fbno == NULLAGBLOCK) {
		/* No shared blocks at all. */
		return 0;
	} else if (fbno == agbno) {
		/*
		 * The start of this extent is shared.  Truncate the
		 * mapping at the end of the shared region so that a
		 * subsequent iteration starts at the start of the
		 * unshared region.
		 */
		irec->br_blockcount = flen;
		*shared = true;
		if (flen != aglen)
			*trimmed = true;
		return 0;
	} else {
		/*
		 * There's a shared extent midway through this extent.
		 * Truncate the mapping at the start of the shared
		 * extent so that a subsequent iteration starts at the
		 * start of the shared region.
		 */
		irec->br_blockcount = fbno - agbno;
		*trimmed = true;
		return 0;
	}
}

241 242 243 244 245 246 247 248 249 250 251
/*
 * Trim the passed in imap to the next shared/unshared extent boundary, and
 * if imap->br_startoff points to a shared extent reserve space for it in the
 * COW fork.  In this case *shared is set to true, else to false.
 *
 * Note that imap will always contain the block numbers for the existing blocks
 * in the data fork, as the upper layers need them for read-modify-write
 * operations.
 */
int
xfs_reflink_reserve_cow(
252
	struct xfs_inode	*ip,
253 254
	struct xfs_bmbt_irec	*imap,
	bool			*shared)
255
{
256 257 258 259
	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	struct xfs_bmbt_irec	got;
	int			error = 0;
	bool			eof = false, trimmed;
260
	struct xfs_iext_cursor	icur;
261

262 263 264 265 266 267 268 269
	/*
	 * Search the COW fork extent list first.  This serves two purposes:
	 * first this implement the speculative preallocation using cowextisze,
	 * so that we also unshared block adjacent to shared blocks instead
	 * of just the shared blocks themselves.  Second the lookup in the
	 * extent list is generally faster than going out to the shared extent
	 * tree.
	 */
270

271
	if (!xfs_iext_lookup_extent(ip, ifp, imap->br_startoff, &icur, &got))
272
		eof = true;
273 274 275
	if (!eof && got.br_startoff <= imap->br_startoff) {
		trace_xfs_reflink_cow_found(ip, imap);
		xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
276

277 278 279
		*shared = true;
		return 0;
	}
280 281

	/* Trim the mapping to the nearest shared extent boundary. */
282
	error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
283
	if (error)
284
		return error;
285 286

	/* Not shared?  Just report the (potentially capped) extent. */
287 288
	if (!*shared)
		return 0;
289 290 291 292 293

	/*
	 * Fork all the shared blocks from our write offset until the end of
	 * the extent.
	 */
294
	error = xfs_qm_dqattach_locked(ip, false);
295
	if (error)
296 297 298
		return error;

	error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
299
			imap->br_blockcount, 0, &got, &icur, eof);
300
	if (error == -ENOSPC || error == -EDQUOT)
301
		trace_xfs_reflink_cow_enospc(ip, imap);
302
	if (error)
303
		return error;
304

305
	trace_xfs_reflink_cow_alloc(ip, &got);
306
	return 0;
307
}
308

309 310 311 312 313 314 315 316 317
/* Convert part of an unwritten CoW extent to a real one. */
STATIC int
xfs_reflink_convert_cow_extent(
	struct xfs_inode		*ip,
	struct xfs_bmbt_irec		*imap,
	xfs_fileoff_t			offset_fsb,
	xfs_filblks_t			count_fsb,
	struct xfs_defer_ops		*dfops)
{
318
	xfs_fsblock_t			first_block = NULLFSBLOCK;
319 320 321 322 323
	int				nimaps = 1;

	if (imap->br_state == XFS_EXT_NORM)
		return 0;

324 325 326
	xfs_trim_extent(imap, offset_fsb, count_fsb);
	trace_xfs_reflink_convert_cow(ip, imap);
	if (imap->br_blockcount == 0)
327
		return 0;
328
	return xfs_bmapi_write(NULL, ip, imap->br_startoff, imap->br_blockcount,
329
			XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT, &first_block,
330
			0, imap, &nimaps, dfops);
331 332 333 334 335 336 337 338 339 340 341 342
}

/* Convert all of the unwritten CoW extents in a file's range to real ones. */
int
xfs_reflink_convert_cow(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	xfs_off_t		count)
{
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
343 344 345 346 347
	xfs_filblks_t		count_fsb = end_fsb - offset_fsb;
	struct xfs_bmbt_irec	imap;
	struct xfs_defer_ops	dfops;
	xfs_fsblock_t		first_block = NULLFSBLOCK;
	int			nimaps = 1, error = 0;
348

349
	ASSERT(count != 0);
350

351 352 353 354 355
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	error = xfs_bmapi_write(NULL, ip, offset_fsb, count_fsb,
			XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT |
			XFS_BMAPI_CONVERT_ONLY, &first_block, 0, &imap, &nimaps,
			&dfops);
356 357 358 359
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
}

360
/* Allocate all CoW reservations covering a range of blocks in a file. */
361 362
int
xfs_reflink_allocate_cow(
363
	struct xfs_inode	*ip,
364 365 366
	struct xfs_bmbt_irec	*imap,
	bool			*shared,
	uint			*lockmode)
367 368
{
	struct xfs_mount	*mp = ip->i_mount;
369 370 371
	xfs_fileoff_t		offset_fsb = imap->br_startoff;
	xfs_filblks_t		count_fsb = imap->br_blockcount;
	struct xfs_bmbt_irec	got;
372
	struct xfs_defer_ops	dfops;
373
	struct xfs_trans	*tp = NULL;
374
	xfs_fsblock_t		first_block;
375 376
	int			nimaps, error = 0;
	bool			trimmed;
377
	xfs_filblks_t		resaligned;
378
	xfs_extlen_t		resblks = 0;
379
	struct xfs_iext_cursor	icur;
380

381 382
retry:
	ASSERT(xfs_is_reflink_inode(ip));
383
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
384

385 386 387 388
	/*
	 * Even if the extent is not shared we might have a preallocation for
	 * it in the COW fork.  If so use it.
	 */
389
	if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) &&
390 391 392
	    got.br_startoff <= offset_fsb) {
		*shared = true;

393 394
		/* If we have a real allocation in the COW fork we're done. */
		if (!isnullstartblock(got.br_startblock)) {
395 396 397
			xfs_trim_extent(&got, offset_fsb, count_fsb);
			*imap = got;
			goto convert;
398
		}
399 400

		xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
401
	} else {
402 403 404 405
		error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
		if (error || !*shared)
			goto out;
	}
406

407 408 409 410
	if (!tp) {
		resaligned = xfs_aligned_fsb_count(imap->br_startoff,
			imap->br_blockcount, xfs_get_cowextsz_hint(ip));
		resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
411

412 413 414 415
		xfs_iunlock(ip, *lockmode);
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
		*lockmode = XFS_ILOCK_EXCL;
		xfs_ilock(ip, *lockmode);
416

417 418 419
		if (error)
			return error;

420
		error = xfs_qm_dqattach_locked(ip, false);
421 422 423
		if (error)
			goto out;
		goto retry;
424 425 426 427
	}

	error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
428
	if (error)
429
		goto out;
430

431 432 433 434
	xfs_trans_ijoin(tp, ip, 0);

	xfs_defer_init(&dfops, &first_block);
	nimaps = 1;
435

436
	/* Allocate the entire reservation as unwritten blocks. */
437
	error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
438
			XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block,
439
			resblks, imap, &nimaps, &dfops);
440
	if (error)
441
		goto out_bmap_cancel;
442

443 444
	xfs_inode_set_cowblocks_tag(ip);

445
	/* Finish up. */
446
	error = xfs_defer_finish(&tp, &dfops);
447
	if (error)
448
		goto out_bmap_cancel;
449 450

	error = xfs_trans_commit(tp);
451
	if (error)
452
		return error;
453 454 455 456 457 458 459

	/*
	 * Allocation succeeded but the requested range was not even partially
	 * satisfied?  Bail out!
	 */
	if (nimaps == 0)
		return -ENOSPC;
460 461 462
convert:
	return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb,
			&dfops);
463
out_bmap_cancel:
464
	xfs_defer_cancel(&dfops);
465 466
	xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
467 468 469 470
out:
	if (tp)
		xfs_trans_cancel(tp);
	return error;
471 472
}

473
/*
474
 * Find the CoW reservation for a given byte offset of a file.
475 476 477 478 479
 */
bool
xfs_reflink_find_cow_mapping(
	struct xfs_inode		*ip,
	xfs_off_t			offset,
480
	struct xfs_bmbt_irec		*imap)
481
{
482 483 484
	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	xfs_fileoff_t			offset_fsb;
	struct xfs_bmbt_irec		got;
485
	struct xfs_iext_cursor		icur;
486 487 488

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));

489 490
	if (!xfs_is_reflink_inode(ip))
		return false;
491
	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
492
	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
493
		return false;
494
	if (got.br_startoff > offset_fsb)
495 496 497
		return false;

	trace_xfs_reflink_find_cow_mapping(ip, offset, 1, XFS_IO_OVERWRITE,
498 499
			&got);
	*imap = got;
500 501 502 503 504 505
	return true;
}

/*
 * Trim an extent to end at the next CoW reservation past offset_fsb.
 */
506
void
507 508 509 510 511
xfs_reflink_trim_irec_to_next_cow(
	struct xfs_inode		*ip,
	xfs_fileoff_t			offset_fsb,
	struct xfs_bmbt_irec		*imap)
{
512 513
	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	struct xfs_bmbt_irec		got;
514
	struct xfs_iext_cursor		icur;
515 516

	if (!xfs_is_reflink_inode(ip))
517
		return;
518 519

	/* Find the extent in the CoW fork. */
520
	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
521
		return;
522 523

	/* This is the extent before; try sliding up one. */
524
	if (got.br_startoff < offset_fsb) {
525
		if (!xfs_iext_next_extent(ifp, &icur, &got))
526
			return;
527 528
	}

529 530
	if (got.br_startoff >= imap->br_startoff + imap->br_blockcount)
		return;
531

532
	imap->br_blockcount = got.br_startoff - imap->br_startoff;
533 534
	trace_xfs_reflink_trim_irec(ip, imap);
}
535 536

/*
537 538 539 540
 * Cancel CoW reservations for some block range of an inode.
 *
 * If cancel_real is true this function cancels all COW fork extents for the
 * inode; if cancel_real is false, real extents are not cleared.
541 542 543
 *
 * Caller must have already joined the inode to the current transaction. The
 * inode will be joined to the transaction returned to the caller.
544 545 546 547 548 549
 */
int
xfs_reflink_cancel_cow_blocks(
	struct xfs_inode		*ip,
	struct xfs_trans		**tpp,
	xfs_fileoff_t			offset_fsb,
550 551
	xfs_fileoff_t			end_fsb,
	bool				cancel_real)
552
{
553
	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
554
	struct xfs_bmbt_irec		got, del;
555
	struct xfs_iext_cursor		icur;
556 557
	xfs_fsblock_t			firstfsb;
	struct xfs_defer_ops		dfops;
558
	int				error = 0;
559 560 561

	if (!xfs_is_reflink_inode(ip))
		return 0;
562
	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
563
		return 0;
564

565 566
	/* Walk backwards until we're out of the I/O range... */
	while (got.br_startoff + got.br_blockcount > offset_fsb) {
567 568
		del = got;
		xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
569 570 571 572 573 574 575

		/* Extent delete may have bumped ext forward */
		if (!del.br_blockcount) {
			xfs_iext_prev(ifp, &icur);
			goto next_extent;
		}

576
		trace_xfs_reflink_cancel_cow(ip, &del);
577

578 579
		if (isnullstartblock(del.br_startblock)) {
			error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
580
					&icur, &got, &del);
581 582
			if (error)
				break;
583
		} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
584 585
			xfs_defer_init(&dfops, &firstfsb);

586 587
			/* Free the CoW orphan record. */
			error = xfs_refcount_free_cow_extent(ip->i_mount,
588 589
					&dfops, del.br_startblock,
					del.br_blockcount);
590 591 592
			if (error)
				break;

593
			xfs_bmap_add_free(ip->i_mount, &dfops,
594
					del.br_startblock, del.br_blockcount,
595 596 597
					NULL);

			/* Roll the transaction */
598 599
			xfs_defer_ijoin(&dfops, ip);
			error = xfs_defer_finish(tpp, &dfops);
600 601 602 603 604 605
			if (error) {
				xfs_defer_cancel(&dfops);
				break;
			}

			/* Remove the mapping from the CoW fork. */
606
			xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
607 608 609 610 611 612 613

			/* Remove the quota reservation */
			error = xfs_trans_reserve_quota_nblks(NULL, ip,
					-(long)del.br_blockcount, 0,
					XFS_QMOPT_RES_REGBLKS);
			if (error)
				break;
614 615 616
		} else {
			/* Didn't do anything, push cursor back. */
			xfs_iext_prev(ifp, &icur);
617
		}
618 619
next_extent:
		if (!xfs_iext_get_extent(ifp, &icur, &got))
620
			break;
621 622
	}

623 624 625 626
	/* clear tag if cow fork is emptied */
	if (!ifp->if_bytes)
		xfs_inode_clear_cowblocks_tag(ip);

627 628 629 630
	return error;
}

/*
631 632 633 634
 * Cancel CoW reservations for some byte range of an inode.
 *
 * If cancel_real is true this function cancels all COW fork extents for the
 * inode; if cancel_real is false, real extents are not cleared.
635 636 637 638 639
 */
int
xfs_reflink_cancel_cow_range(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
640 641
	xfs_off_t		count,
	bool			cancel_real)
642 643 644 645 646 647 648
{
	struct xfs_trans	*tp;
	xfs_fileoff_t		offset_fsb;
	xfs_fileoff_t		end_fsb;
	int			error;

	trace_xfs_reflink_cancel_cow_range(ip, offset, count);
649
	ASSERT(xfs_is_reflink_inode(ip));
650 651 652 653 654 655 656 657 658

	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
	if (count == NULLFILEOFF)
		end_fsb = NULLFILEOFF;
	else
		end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);

	/* Start a rolling transaction to remove the mappings */
	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
659
			0, 0, XFS_TRANS_NOFS, &tp);
660 661 662 663 664 665 666
	if (error)
		goto out;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, 0);

	/* Scrape out the old CoW reservations */
667 668
	error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
			cancel_real);
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
	if (error)
		goto out_cancel;

	error = xfs_trans_commit(tp);

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;

out_cancel:
	xfs_trans_cancel(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
	trace_xfs_reflink_cancel_cow_range_error(ip, error, _RET_IP_);
	return error;
}

/*
 * Remap parts of a file's data fork after a successful CoW.
 */
int
xfs_reflink_end_cow(
	struct xfs_inode		*ip,
	xfs_off_t			offset,
	xfs_off_t			count)
{
694
	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
695
	struct xfs_bmbt_irec		got, del;
696 697 698 699 700
	struct xfs_trans		*tp;
	xfs_fileoff_t			offset_fsb;
	xfs_fileoff_t			end_fsb;
	xfs_fsblock_t			firstfsb;
	struct xfs_defer_ops		dfops;
701
	int				error;
702 703
	unsigned int			resblks;
	xfs_filblks_t			rlen;
704
	struct xfs_iext_cursor		icur;
705 706 707

	trace_xfs_reflink_end_cow(ip, offset, count);

708 709 710 711
	/* No COW extents?  That's easy! */
	if (ifp->if_bytes == 0)
		return 0;

712 713 714
	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
	end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
	/*
	 * Start a rolling transaction to switch the mappings.  We're
	 * unlikely ever to have to remap 16T worth of single-block
	 * extents, so just cap the worst case extent count to 2^32-1.
	 * Stick a warning in just in case, and avoid 64-bit division.
	 */
	BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);
	if (end_fsb - offset_fsb > UINT_MAX) {
		error = -EFSCORRUPTED;
		xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);
		ASSERT(0);
		goto out;
	}
	resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount,
			(unsigned int)(end_fsb - offset_fsb),
			XFS_DATA_FORK);
731
	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
732
			resblks, 0, XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
733 734 735 736 737 738
	if (error)
		goto out;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, 0);

739 740 741 742 743
	/*
	 * In case of racing, overlapping AIO writes no COW extents might be
	 * left by the time I/O completes for the loser of the race.  In that
	 * case we are done.
	 */
744
	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
745
		goto out_cancel;
746

747 748 749 750 751
	/* Walk backwards until we're out of the I/O range... */
	while (got.br_startoff + got.br_blockcount > offset_fsb) {
		del = got;
		xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);

752
		/* Extent delete may have bumped ext forward */
753 754
		if (!del.br_blockcount)
			goto prev_extent;
755 756

		ASSERT(!isnullstartblock(got.br_startblock));
757

758 759 760 761 762
		/*
		 * Don't remap unwritten extents; these are
		 * speculatively preallocated CoW extents that have been
		 * allocated but have not yet been involved in a write.
		 */
763 764
		if (got.br_state == XFS_EXT_UNWRITTEN)
			goto prev_extent;
765

766
		/* Unmap the old blocks in the data fork. */
767 768 769 770 771 772
		xfs_defer_init(&dfops, &firstfsb);
		rlen = del.br_blockcount;
		error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1,
				&firstfsb, &dfops);
		if (error)
			goto out_defer;
773

774 775 776 777 778 779
		/* Trim the extent to whatever got unmapped. */
		if (rlen) {
			xfs_trim_extent(&del, del.br_startoff + rlen,
				del.br_blockcount - rlen);
		}
		trace_xfs_reflink_cow_remap(ip, &del);
780

781 782 783 784 785
		/* Free the CoW orphan record. */
		error = xfs_refcount_free_cow_extent(tp->t_mountp, &dfops,
				del.br_startblock, del.br_blockcount);
		if (error)
			goto out_defer;
786

787 788 789 790
		/* Map the new blocks into the data fork. */
		error = xfs_bmap_map_extent(tp->t_mountp, &dfops, ip, &del);
		if (error)
			goto out_defer;
791

792 793 794 795
		/* Charge this new data fork mapping to the on-disk quota. */
		xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
				(long)del.br_blockcount);

796
		/* Remove the mapping from the CoW fork. */
797
		xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
798

799 800
		xfs_defer_ijoin(&dfops, ip);
		error = xfs_defer_finish(&tp, &dfops);
801 802
		if (error)
			goto out_defer;
803
		if (!xfs_iext_get_extent(ifp, &icur, &got))
804
			break;
805 806 807 808
		continue;
prev_extent:
		if (!xfs_iext_prev_extent(ifp, &icur, &got))
			break;
809 810 811 812 813 814 815 816 817 818
	}

	error = xfs_trans_commit(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	if (error)
		goto out;
	return 0;

out_defer:
	xfs_defer_cancel(&dfops);
819
out_cancel:
820 821 822 823 824 825
	xfs_trans_cancel(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
	trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
	return error;
}
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847

/*
 * Free leftover CoW reservations that didn't get cleaned out.
 */
int
xfs_reflink_recover_cow(
	struct xfs_mount	*mp)
{
	xfs_agnumber_t		agno;
	int			error = 0;

	if (!xfs_sb_version_hasreflink(&mp->m_sb))
		return 0;

	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
		error = xfs_refcount_recover_cow_leftovers(mp, agno);
		if (error)
			break;
	}

	return error;
}
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947

/*
 * Reflinking (Block) Ranges of Two Files Together
 *
 * First, ensure that the reflink flag is set on both inodes.  The flag is an
 * optimization to avoid unnecessary refcount btree lookups in the write path.
 *
 * Now we can iteratively remap the range of extents (and holes) in src to the
 * corresponding ranges in dest.  Let drange and srange denote the ranges of
 * logical blocks in dest and src touched by the reflink operation.
 *
 * While the length of drange is greater than zero,
 *    - Read src's bmbt at the start of srange ("imap")
 *    - If imap doesn't exist, make imap appear to start at the end of srange
 *      with zero length.
 *    - If imap starts before srange, advance imap to start at srange.
 *    - If imap goes beyond srange, truncate imap to end at the end of srange.
 *    - Punch (imap start - srange start + imap len) blocks from dest at
 *      offset (drange start).
 *    - If imap points to a real range of pblks,
 *         > Increase the refcount of the imap's pblks
 *         > Map imap's pblks into dest at the offset
 *           (drange start + imap start - srange start)
 *    - Advance drange and srange by (imap start - srange start + imap len)
 *
 * Finally, if the reflink made dest longer, update both the in-core and
 * on-disk file sizes.
 *
 * ASCII Art Demonstration:
 *
 * Let's say we want to reflink this source file:
 *
 * ----SSSSSSS-SSSSS----SSSSSS (src file)
 *   <-------------------->
 *
 * into this destination file:
 *
 * --DDDDDDDDDDDDDDDDDDD--DDD (dest file)
 *        <-------------------->
 * '-' means a hole, and 'S' and 'D' are written blocks in the src and dest.
 * Observe that the range has different logical offsets in either file.
 *
 * Consider that the first extent in the source file doesn't line up with our
 * reflink range.  Unmapping  and remapping are separate operations, so we can
 * unmap more blocks from the destination file than we remap.
 *
 * ----SSSSSSS-SSSSS----SSSSSS
 *   <------->
 * --DDDDD---------DDDDD--DDD
 *        <------->
 *
 * Now remap the source extent into the destination file:
 *
 * ----SSSSSSS-SSSSS----SSSSSS
 *   <------->
 * --DDDDD--SSSSSSSDDDDD--DDD
 *        <------->
 *
 * Do likewise with the second hole and extent in our range.  Holes in the
 * unmap range don't affect our operation.
 *
 * ----SSSSSSS-SSSSS----SSSSSS
 *            <---->
 * --DDDDD--SSSSSSS-SSSSS-DDD
 *                 <---->
 *
 * Finally, unmap and remap part of the third extent.  This will increase the
 * size of the destination file.
 *
 * ----SSSSSSS-SSSSS----SSSSSS
 *                  <----->
 * --DDDDD--SSSSSSS-SSSSS----SSS
 *                       <----->
 *
 * Once we update the destination file's i_size, we're done.
 */

/*
 * Ensure the reflink bit is set in both inodes.
 */
STATIC int
xfs_reflink_set_inode_flag(
	struct xfs_inode	*src,
	struct xfs_inode	*dest)
{
	struct xfs_mount	*mp = src->i_mount;
	int			error;
	struct xfs_trans	*tp;

	if (xfs_is_reflink_inode(src) && xfs_is_reflink_inode(dest))
		return 0;

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
	if (error)
		goto out_error;

	/* Lock both files against IO */
	if (src->i_ino == dest->i_ino)
		xfs_ilock(src, XFS_ILOCK_EXCL);
	else
948
		xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL);
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982

	if (!xfs_is_reflink_inode(src)) {
		trace_xfs_reflink_set_inode_flag(src);
		xfs_trans_ijoin(tp, src, XFS_ILOCK_EXCL);
		src->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
		xfs_trans_log_inode(tp, src, XFS_ILOG_CORE);
		xfs_ifork_init_cow(src);
	} else
		xfs_iunlock(src, XFS_ILOCK_EXCL);

	if (src->i_ino == dest->i_ino)
		goto commit_flags;

	if (!xfs_is_reflink_inode(dest)) {
		trace_xfs_reflink_set_inode_flag(dest);
		xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
		dest->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
		xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
		xfs_ifork_init_cow(dest);
	} else
		xfs_iunlock(dest, XFS_ILOCK_EXCL);

commit_flags:
	error = xfs_trans_commit(tp);
	if (error)
		goto out_error;
	return error;

out_error:
	trace_xfs_reflink_set_inode_flag_error(dest, error, _RET_IP_);
	return error;
}

/*
983
 * Update destination inode size & cowextsize hint, if necessary.
984 985 986 987
 */
STATIC int
xfs_reflink_update_dest(
	struct xfs_inode	*dest,
988
	xfs_off_t		newlen,
989 990
	xfs_extlen_t		cowextsize,
	bool			is_dedupe)
991 992 993 994 995
{
	struct xfs_mount	*mp = dest->i_mount;
	struct xfs_trans	*tp;
	int			error;

996
	if (is_dedupe && newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
997 998 999 1000 1001 1002 1003 1004 1005
		return 0;

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
	if (error)
		goto out_error;

	xfs_ilock(dest, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
	if (newlen > i_size_read(VFS_I(dest))) {
		trace_xfs_reflink_update_inode_size(dest, newlen);
		i_size_write(VFS_I(dest), newlen);
		dest->i_d.di_size = newlen;
	}

	if (cowextsize) {
		dest->i_d.di_cowextsize = cowextsize;
		dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
	}

1017 1018 1019 1020
	if (!is_dedupe) {
		xfs_trans_ichgtime(tp, dest,
				   XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	}
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
	xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);

	error = xfs_trans_commit(tp);
	if (error)
		goto out_error;
	return error;

out_error:
	trace_xfs_reflink_update_inode_size_error(dest, error, _RET_IP_);
	return error;
}

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
/*
 * Do we have enough reserve in this AG to handle a reflink?  The refcount
 * btree already reserved all the space it needs, but the rmap btree can grow
 * infinitely, so we won't allow more reflinks when the AG is down to the
 * btree reserves.
 */
static int
xfs_reflink_ag_has_free_space(
	struct xfs_mount	*mp,
	xfs_agnumber_t		agno)
{
	struct xfs_perag	*pag;
	int			error = 0;

	if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
		return 0;

	pag = xfs_perag_get(mp, agno);
1051
	if (xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) ||
1052 1053 1054 1055 1056 1057
	    xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA))
		error = -ENOSPC;
	xfs_perag_put(pag);
	return error;
}

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
/*
 * Unmap a range of blocks from a file, then map other blocks into the hole.
 * The range to unmap is (destoff : destoff + srcioff + irec->br_blockcount).
 * The extent irec is mapped into dest at irec->br_startoff.
 */
STATIC int
xfs_reflink_remap_extent(
	struct xfs_inode	*ip,
	struct xfs_bmbt_irec	*irec,
	xfs_fileoff_t		destoff,
	xfs_off_t		new_isize)
{
	struct xfs_mount	*mp = ip->i_mount;
1071
	bool			real_extent = xfs_bmap_is_real_extent(irec);
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
	struct xfs_trans	*tp;
	xfs_fsblock_t		firstfsb;
	unsigned int		resblks;
	struct xfs_defer_ops	dfops;
	struct xfs_bmbt_irec	uirec;
	xfs_filblks_t		rlen;
	xfs_filblks_t		unmap_len;
	xfs_off_t		newlen;
	int			error;

	unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
	trace_xfs_reflink_punch_range(ip, destoff, unmap_len);

1085 1086 1087 1088 1089 1090 1091 1092
	/* No reflinking if we're low on space */
	if (real_extent) {
		error = xfs_reflink_ag_has_free_space(mp,
				XFS_FSB_TO_AGNO(mp, irec->br_startblock));
		if (error)
			goto out;
	}

1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	/* Start a rolling transaction to switch the mappings */
	resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
	if (error)
		goto out;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, 0);

	/* If we're not just clearing space, then do we have enough quota? */
	if (real_extent) {
		error = xfs_trans_reserve_quota_nblks(tp, ip,
				irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);
		if (error)
			goto out_cancel;
	}

	trace_xfs_reflink_remap(ip, irec->br_startoff,
				irec->br_blockcount, irec->br_startblock);

	/* Unmap the old blocks in the data fork. */
	rlen = unmap_len;
	while (rlen) {
		xfs_defer_init(&dfops, &firstfsb);
		error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1,
				&firstfsb, &dfops);
		if (error)
			goto out_defer;

		/*
		 * Trim the extent to whatever got unmapped.
		 * Remember, bunmapi works backwards.
		 */
		uirec.br_startblock = irec->br_startblock + rlen;
		uirec.br_startoff = irec->br_startoff + rlen;
		uirec.br_blockcount = unmap_len - rlen;
		unmap_len = rlen;

		/* If this isn't a real mapping, we're done. */
		if (!real_extent || uirec.br_blockcount == 0)
			goto next_extent;

		trace_xfs_reflink_remap(ip, uirec.br_startoff,
				uirec.br_blockcount, uirec.br_startblock);

		/* Update the refcount tree */
		error = xfs_refcount_increase_extent(mp, &dfops, &uirec);
		if (error)
			goto out_defer;

		/* Map the new blocks into the data fork. */
		error = xfs_bmap_map_extent(mp, &dfops, ip, &uirec);
		if (error)
			goto out_defer;

		/* Update quota accounting. */
		xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
				uirec.br_blockcount);

		/* Update dest isize if needed. */
		newlen = XFS_FSB_TO_B(mp,
				uirec.br_startoff + uirec.br_blockcount);
		newlen = min_t(xfs_off_t, newlen, new_isize);
		if (newlen > i_size_read(VFS_I(ip))) {
			trace_xfs_reflink_update_inode_size(ip, newlen);
			i_size_write(VFS_I(ip), newlen);
			ip->i_d.di_size = newlen;
			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
		}

next_extent:
		/* Process all the deferred stuff. */
1165 1166
		xfs_defer_ijoin(&dfops, ip);
		error = xfs_defer_finish(&tp, &dfops);
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
		if (error)
			goto out_defer;
	}

	error = xfs_trans_commit(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	if (error)
		goto out;
	return 0;

out_defer:
	xfs_defer_cancel(&dfops);
out_cancel:
	xfs_trans_cancel(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
	trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_);
	return error;
}

/*
 * Iteratively remap one file's extents (and holes) to another's.
 */
STATIC int
xfs_reflink_remap_blocks(
	struct xfs_inode	*src,
	xfs_fileoff_t		srcoff,
	struct xfs_inode	*dest,
	xfs_fileoff_t		destoff,
	xfs_filblks_t		len,
	xfs_off_t		new_isize)
{
	struct xfs_bmbt_irec	imap;
	int			nimaps;
	int			error = 0;
	xfs_filblks_t		range_len;

	/* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
	while (len) {
1206 1207
		uint		lock_mode;

1208 1209
		trace_xfs_reflink_remap_blocks_loop(src, srcoff, len,
				dest, destoff);
1210

1211 1212
		/* Read extent from the source file */
		nimaps = 1;
1213
		lock_mode = xfs_ilock_data_map_shared(src);
1214
		error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
1215
		xfs_iunlock(src, lock_mode);
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
		if (error)
			goto err;
		ASSERT(nimaps == 1);

		trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_IO_OVERWRITE,
				&imap);

		/* Translate imap into the destination file. */
		range_len = imap.br_startoff + imap.br_blockcount - srcoff;
		imap.br_startoff += destoff - srcoff;

		/* Clear dest from destoff to the end of imap and map it in. */
		error = xfs_reflink_remap_extent(dest, &imap, destoff,
				new_isize);
		if (error)
			goto err;

		if (fatal_signal_pending(current)) {
			error = -EINTR;
			goto err;
		}

		/* Advance drange/srange */
		srcoff += range_len;
		destoff += range_len;
		len -= range_len;
	}

	return 0;

err:
	trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
	return error;
}

1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
/*
 * Grab the exclusive iolock for a data copy from src to dest, making
 * sure to abide vfs locking order (lowest pointer value goes first) and
 * breaking the pnfs layout leases on dest before proceeding.  The loop
 * is needed because we cannot call the blocking break_layout() with the
 * src iolock held, and therefore have to back out both locks.
 */
static int
xfs_iolock_two_inodes_and_break_layout(
	struct inode		*src,
	struct inode		*dest)
{
	int			error;

retry:
	if (src < dest) {
1267
		inode_lock_shared(src);
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
		inode_lock_nested(dest, I_MUTEX_NONDIR2);
	} else {
		/* src >= dest */
		inode_lock(dest);
	}

	error = break_layout(dest, false);
	if (error == -EWOULDBLOCK) {
		inode_unlock(dest);
		if (src < dest)
1278
			inode_unlock_shared(src);
1279 1280 1281 1282 1283 1284 1285 1286
		error = break_layout(dest, true);
		if (error)
			return error;
		goto retry;
	}
	if (error) {
		inode_unlock(dest);
		if (src < dest)
1287
			inode_unlock_shared(src);
1288 1289 1290
		return error;
	}
	if (src > dest)
1291
		inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
1292 1293 1294
	return 0;
}

1295 1296 1297 1298 1299
/*
 * Link a range of blocks from one file to another.
 */
int
xfs_reflink_remap_range(
1300 1301 1302 1303 1304 1305
	struct file		*file_in,
	loff_t			pos_in,
	struct file		*file_out,
	loff_t			pos_out,
	u64			len,
	bool			is_dedupe)
1306
{
1307 1308 1309 1310
	struct inode		*inode_in = file_inode(file_in);
	struct xfs_inode	*src = XFS_I(inode_in);
	struct inode		*inode_out = file_inode(file_out);
	struct xfs_inode	*dest = XFS_I(inode_out);
1311
	struct xfs_mount	*mp = src->i_mount;
1312
	bool			same_inode = (inode_in == inode_out);
1313 1314
	xfs_fileoff_t		sfsbno, dfsbno;
	xfs_filblks_t		fsblen;
1315
	xfs_extlen_t		cowextsize;
1316
	ssize_t			ret;
1317 1318 1319 1320 1321 1322 1323

	if (!xfs_sb_version_hasreflink(&mp->m_sb))
		return -EOPNOTSUPP;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

1324
	/* Lock both files against IO */
1325 1326 1327
	ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
	if (ret)
		return ret;
1328
	if (same_inode)
1329
		xfs_ilock(src, XFS_MMAPLOCK_EXCL);
1330
	else
1331
		xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest,
1332
				XFS_MMAPLOCK_EXCL);
1333

1334
	/* Check file eligibility and prepare for block sharing. */
1335
	ret = -EINVAL;
1336 1337
	/* Don't reflink realtime inodes */
	if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
1338 1339 1340 1341 1342 1343
		goto out_unlock;

	/* Don't share DAX file data for now. */
	if (IS_DAX(inode_in) || IS_DAX(inode_out))
		goto out_unlock;

1344 1345
	ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
			&len, is_dedupe);
1346
	if (ret <= 0)
1347 1348
		goto out_unlock;

1349
	/* Attach dquots to dest inode before changing block map */
1350
	ret = xfs_qm_dqattach(dest);
1351 1352 1353
	if (ret)
		goto out_unlock;

1354
	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1355

1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
	/*
	 * Clear out post-eof preallocations because we don't have page cache
	 * backing the delayed allocations and they'll never get freed on
	 * their own.
	 */
	if (xfs_can_free_eofblocks(dest, true)) {
		ret = xfs_free_eofblocks(dest);
		if (ret)
			goto out_unlock;
	}

1367
	/* Set flags and remap blocks. */
1368 1369 1370
	ret = xfs_reflink_set_inode_flag(src, dest);
	if (ret)
		goto out_unlock;
1371

1372 1373
	dfsbno = XFS_B_TO_FSBT(mp, pos_out);
	sfsbno = XFS_B_TO_FSBT(mp, pos_in);
1374
	fsblen = XFS_B_TO_FSB(mp, len);
1375 1376 1377 1378
	ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
			pos_out + len);
	if (ret)
		goto out_unlock;
1379

1380 1381 1382 1383
	/* Zap any page cache for the destination file's range. */
	truncate_inode_pages_range(&inode_out->i_data, pos_out,
				   PAGE_ALIGN(pos_out + len) - 1);

1384 1385 1386 1387 1388 1389
	/*
	 * Carry the cowextsize hint from src to dest if we're sharing the
	 * entire source file to the entire destination file, the source file
	 * has a cowextsize hint, and the destination file does not.
	 */
	cowextsize = 0;
1390
	if (pos_in == 0 && len == i_size_read(inode_in) &&
1391
	    (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1392
	    pos_out == 0 && len >= i_size_read(inode_out) &&
1393 1394 1395
	    !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
		cowextsize = src->i_d.di_cowextsize;

1396 1397
	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
			is_dedupe);
1398

1399
out_unlock:
1400 1401 1402 1403
	xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
	if (!same_inode)
		xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
	inode_unlock(inode_out);
1404
	if (!same_inode)
1405
		inode_unlock_shared(inode_in);
1406 1407 1408
	if (ret)
		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
	return ret;
1409
}
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435

/*
 * The user wants to preemptively CoW all shared blocks in this file,
 * which enables us to turn off the reflink flag.  Iterate all
 * extents which are not prealloc/delalloc to see which ranges are
 * mentioned in the refcount tree, then read those blocks into the
 * pagecache, dirty them, fsync them back out, and then we can update
 * the inode flag.  What happens if we run out of memory? :)
 */
STATIC int
xfs_reflink_dirty_extents(
	struct xfs_inode	*ip,
	xfs_fileoff_t		fbno,
	xfs_filblks_t		end,
	xfs_off_t		isize)
{
	struct xfs_mount	*mp = ip->i_mount;
	xfs_agnumber_t		agno;
	xfs_agblock_t		agbno;
	xfs_extlen_t		aglen;
	xfs_agblock_t		rbno;
	xfs_extlen_t		rlen;
	xfs_off_t		fpos;
	xfs_off_t		flen;
	struct xfs_bmbt_irec	map[2];
	int			nmaps;
D
Darrick J. Wong 已提交
1436
	int			error = 0;
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448

	while (end - fbno > 0) {
		nmaps = 1;
		/*
		 * Look for extents in the file.  Skip holes, delalloc, or
		 * unwritten extents; they can't be reflinked.
		 */
		error = xfs_bmapi_read(ip, fbno, end - fbno, map, &nmaps, 0);
		if (error)
			goto out;
		if (nmaps == 0)
			break;
1449
		if (!xfs_bmap_is_real_extent(&map[0]))
1450 1451 1452 1453 1454 1455 1456 1457
			goto next;

		map[1] = map[0];
		while (map[1].br_blockcount) {
			agno = XFS_FSB_TO_AGNO(mp, map[1].br_startblock);
			agbno = XFS_FSB_TO_AGBNO(mp, map[1].br_startblock);
			aglen = map[1].br_blockcount;

1458 1459
			error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
					aglen, &rbno, &rlen, true);
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
			if (error)
				goto out;
			if (rbno == NULLAGBLOCK)
				break;

			/* Dirty the pages */
			xfs_iunlock(ip, XFS_ILOCK_EXCL);
			fpos = XFS_FSB_TO_B(mp, map[1].br_startoff +
					(rbno - agbno));
			flen = XFS_FSB_TO_B(mp, rlen);
			if (fpos + flen > isize)
				flen = isize - fpos;
			error = iomap_file_dirty(VFS_I(ip), fpos, flen,
					&xfs_iomap_ops);
			xfs_ilock(ip, XFS_ILOCK_EXCL);
			if (error)
				goto out;

			map[1].br_blockcount -= (rbno - agbno + rlen);
			map[1].br_startoff += (rbno - agbno + rlen);
			map[1].br_startblock += (rbno - agbno + rlen);
		}

next:
		fbno = map[0].br_startoff + map[0].br_blockcount;
	}
out:
	return error;
}

1490
/* Does this inode need the reflink flag? */
1491
int
1492 1493 1494 1495
xfs_reflink_inode_has_shared_extents(
	struct xfs_trans		*tp,
	struct xfs_inode		*ip,
	bool				*has_shared)
1496
{
1497 1498 1499 1500 1501 1502 1503 1504
	struct xfs_bmbt_irec		got;
	struct xfs_mount		*mp = ip->i_mount;
	struct xfs_ifork		*ifp;
	xfs_agnumber_t			agno;
	xfs_agblock_t			agbno;
	xfs_extlen_t			aglen;
	xfs_agblock_t			rbno;
	xfs_extlen_t			rlen;
1505
	struct xfs_iext_cursor		icur;
1506 1507
	bool				found;
	int				error;
1508

1509 1510 1511
	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
		error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1512 1513
		if (error)
			return error;
1514
	}
1515

1516
	*has_shared = false;
1517
	found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got);
1518 1519 1520 1521 1522 1523 1524
	while (found) {
		if (isnullstartblock(got.br_startblock) ||
		    got.br_state != XFS_EXT_NORM)
			goto next;
		agno = XFS_FSB_TO_AGNO(mp, got.br_startblock);
		agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock);
		aglen = got.br_blockcount;
1525

1526
		error = xfs_reflink_find_shared(mp, tp, agno, agbno, aglen,
1527 1528 1529 1530
				&rbno, &rlen, false);
		if (error)
			return error;
		/* Is there still a shared block here? */
1531 1532
		if (rbno != NULLAGBLOCK) {
			*has_shared = true;
1533
			return 0;
1534
		}
1535
next:
1536
		found = xfs_iext_next_extent(ifp, &icur, &got);
1537 1538
	}

1539 1540 1541
	return 0;
}

1542 1543 1544 1545 1546 1547
/*
 * Clear the inode reflink flag if there are no shared extents.
 *
 * The caller is responsible for joining the inode to the transaction passed in.
 * The inode will be joined to the transaction that is returned to the caller.
 */
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
int
xfs_reflink_clear_inode_flag(
	struct xfs_inode	*ip,
	struct xfs_trans	**tpp)
{
	bool			needs_flag;
	int			error = 0;

	ASSERT(xfs_is_reflink_inode(ip));

	error = xfs_reflink_inode_has_shared_extents(*tpp, ip, &needs_flag);
	if (error || needs_flag)
		return error;

1562 1563 1564 1565
	/*
	 * We didn't find any shared blocks so turn off the reflink flag.
	 * First, get rid of any leftover CoW mappings.
	 */
1566
	error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
1567 1568 1569 1570 1571 1572
	if (error)
		return error;

	/* Clear the inode flag. */
	trace_xfs_reflink_unset_inode_flag(ip);
	ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1573
	xfs_inode_clear_cowblocks_tag(ip);
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
	xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);

	return error;
}

/*
 * Clear the inode reflink flag if there are no shared extents and the size
 * hasn't changed.
 */
STATIC int
xfs_reflink_try_clear_inode_flag(
1585
	struct xfs_inode	*ip)
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error = 0;

	/* Start a rolling transaction to remove the mappings */
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
	if (error)
		return error;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, 0);

	error = xfs_reflink_clear_inode_flag(ip, &tp);
	if (error)
		goto cancel;

	error = xfs_trans_commit(tp);
	if (error)
		goto out;

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return 0;
cancel:
	xfs_trans_cancel(tp);
out:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
}

/*
 * Pre-COW all shared blocks within a given byte range of a file and turn off
 * the reflink flag if we unshare all of the file's blocks.
 */
int
xfs_reflink_unshare(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	xfs_off_t		len)
{
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		fbno;
	xfs_filblks_t		end;
	xfs_off_t		isize;
	int			error;

	if (!xfs_is_reflink_inode(ip))
		return 0;

	trace_xfs_reflink_unshare(ip, offset, len);

	inode_dio_wait(VFS_I(ip));

	/* Try to CoW the selected ranges */
	xfs_ilock(ip, XFS_ILOCK_EXCL);
1641
	fbno = XFS_B_TO_FSBT(mp, offset);
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
	isize = i_size_read(VFS_I(ip));
	end = XFS_B_TO_FSB(mp, offset + len);
	error = xfs_reflink_dirty_extents(ip, fbno, end, isize);
	if (error)
		goto out_unlock;
	xfs_iunlock(ip, XFS_ILOCK_EXCL);

	/* Wait for the IO to finish */
	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
	if (error)
		goto out;

1654 1655 1656 1657
	/* Turn off the reflink flag if possible. */
	error = xfs_reflink_try_clear_inode_flag(ip);
	if (error)
		goto out;
1658 1659 1660 1661 1662 1663 1664 1665 1666

	return 0;

out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
	trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
	return error;
}