xfs_iomap.c 35.2 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4
 * Copyright (c) 2016-2018 Christoph Hellwig.
5
 * All Rights Reserved.
L
Linus Torvalds 已提交
6 7 8
 */
#include "xfs.h"
#include "xfs_fs.h"
9
#include "xfs_shared.h"
10 11 12
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
13 14
#include "xfs_mount.h"
#include "xfs_inode.h"
15
#include "xfs_btree.h"
16
#include "xfs_bmap_btree.h"
L
Linus Torvalds 已提交
17
#include "xfs_bmap.h"
D
Dave Chinner 已提交
18
#include "xfs_bmap_util.h"
19
#include "xfs_errortag.h"
L
Linus Torvalds 已提交
20
#include "xfs_error.h"
21
#include "xfs_trans.h"
L
Linus Torvalds 已提交
22
#include "xfs_trans_space.h"
23
#include "xfs_inode_item.h"
L
Linus Torvalds 已提交
24
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
25
#include "xfs_trace.h"
26
#include "xfs_quota.h"
27 28
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
29
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
30 31 32 33 34


#define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
						<< mp->m_writeio_log)

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
static int
xfs_alert_fsblock_zero(
	xfs_inode_t	*ip,
	xfs_bmbt_irec_t	*imap)
{
	xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
			"Access to block zero in inode %llu "
			"start_block: %llx start_off: %llx "
			"blkcnt: %llx extent-state: %x",
		(unsigned long long)ip->i_ino,
		(unsigned long long)imap->br_startblock,
		(unsigned long long)imap->br_startoff,
		(unsigned long long)imap->br_blockcount,
		imap->br_state);
	return -EFSCORRUPTED;
}

int
53 54 55
xfs_bmbt_to_iomap(
	struct xfs_inode	*ip,
	struct iomap		*iomap,
56
	struct xfs_bmbt_irec	*imap,
57
	u16			flags)
58 59 60
{
	struct xfs_mount	*mp = ip->i_mount;

61
	if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
62 63
		return xfs_alert_fsblock_zero(ip, imap);

64
	if (imap->br_startblock == HOLESTARTBLOCK) {
65
		iomap->addr = IOMAP_NULL_ADDR;
66
		iomap->type = IOMAP_HOLE;
67 68
	} else if (imap->br_startblock == DELAYSTARTBLOCK ||
		   isnullstartblock(imap->br_startblock)) {
69
		iomap->addr = IOMAP_NULL_ADDR;
70 71
		iomap->type = IOMAP_DELALLOC;
	} else {
72
		iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
73 74 75 76 77 78 79 80
		if (imap->br_state == XFS_EXT_UNWRITTEN)
			iomap->type = IOMAP_UNWRITTEN;
		else
			iomap->type = IOMAP_MAPPED;
	}
	iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
	iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
	iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
81
	iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
82
	iomap->flags = flags;
83 84 85 86 87

	if (xfs_ipincount(ip) &&
	    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
		iomap->flags |= IOMAP_F_DIRTY;
	return 0;
88 89
}

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static void
xfs_hole_to_iomap(
	struct xfs_inode	*ip,
	struct iomap		*iomap,
	xfs_fileoff_t		offset_fsb,
	xfs_fileoff_t		end_fsb)
{
	iomap->addr = IOMAP_NULL_ADDR;
	iomap->type = IOMAP_HOLE;
	iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
	iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
	iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
	iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
}

105
xfs_extlen_t
106 107 108
xfs_eof_alignment(
	struct xfs_inode	*ip,
	xfs_extlen_t		extsize)
109
{
110 111
	struct xfs_mount	*mp = ip->i_mount;
	xfs_extlen_t		align = 0;
112

113 114 115 116 117 118 119 120 121 122 123 124 125 126
	if (!XFS_IS_REALTIME_INODE(ip)) {
		/*
		 * Round up the allocation request to a stripe unit
		 * (m_dalign) boundary if the file size is >= stripe unit
		 * size, and we are allocating past the allocation eof.
		 *
		 * If mounted with the "-o swalloc" option the alignment is
		 * increased from the strip unit size to the stripe width.
		 */
		if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
			align = mp->m_swidth;
		else if (mp->m_dalign)
			align = mp->m_dalign;

127 128
		if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
			align = 0;
129
	}
130 131 132 133 134 135

	/*
	 * Always round up the allocation request to an extent boundary
	 * (when file on a real-time subvolume or has di_extsize hint).
	 */
	if (extsize) {
136 137
		if (align)
			align = roundup_64(align, extsize);
138 139 140 141
		else
			align = extsize;
	}

142 143 144 145 146 147 148 149 150 151 152
	return align;
}

STATIC int
xfs_iomap_eof_align_last_fsb(
	struct xfs_inode	*ip,
	xfs_extlen_t		extsize,
	xfs_fileoff_t		*last_fsb)
{
	xfs_extlen_t		align = xfs_eof_alignment(ip, extsize);

153 154
	if (align) {
		xfs_fileoff_t	new_last_fsb = roundup_64(*last_fsb, align);
155 156
		int		eof, error;

157
		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
158 159 160 161 162 163 164 165
		if (error)
			return error;
		if (eof)
			*last_fsb = new_last_fsb;
	}
	return 0;
}

C
Christoph Hellwig 已提交
166
int
L
Linus Torvalds 已提交
167 168
xfs_iomap_write_direct(
	xfs_inode_t	*ip,
169
	xfs_off_t	offset,
L
Linus Torvalds 已提交
170
	size_t		count,
171
	xfs_bmbt_irec_t *imap,
172
	int		nmaps)
L
Linus Torvalds 已提交
173 174 175 176
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_fileoff_t	last_fsb;
177
	xfs_filblks_t	count_fsb, resaligned;
178
	xfs_extlen_t	extsz;
179
	int		nimaps;
180
	int		quota_flag;
L
Linus Torvalds 已提交
181 182
	int		rt;
	xfs_trans_t	*tp;
183 184
	uint		qblocks, resblks, resrtextents;
	int		error;
185
	int		lockmode;
186
	int		bmapi_flags = XFS_BMAPI_PREALLOC;
187
	uint		tflags = 0;
L
Linus Torvalds 已提交
188

189
	rt = XFS_IS_REALTIME_INODE(ip);
190
	extsz = xfs_get_extsz_hint(ip);
191 192 193
	lockmode = XFS_ILOCK_SHARED;	/* locked by caller */

	ASSERT(xfs_isilocked(ip, lockmode));
L
Linus Torvalds 已提交
194

195 196
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
197
	if ((offset + count) > XFS_ISIZE(ip)) {
198 199 200 201 202 203 204 205 206
		/*
		 * Assert that the in-core extent list is present since this can
		 * call xfs_iread_extents() and we only have the ilock shared.
		 * This should be safe because the lock was held around a bmapi
		 * call in the caller and we only need it to access the in-core
		 * list.
		 */
		ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
								XFS_IFEXTENTS);
207
		error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
208
		if (error)
209
			goto out_unlock;
L
Linus Torvalds 已提交
210
	} else {
211
		if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
D
Dave Chinner 已提交
212
			last_fsb = min(last_fsb, (xfs_fileoff_t)
213 214
					imap->br_blockcount +
					imap->br_startoff);
L
Linus Torvalds 已提交
215
	}
216 217
	count_fsb = last_fsb - offset_fsb;
	ASSERT(count_fsb > 0);
218
	resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
219 220 221 222

	if (unlikely(rt)) {
		resrtextents = qblocks = resaligned;
		resrtextents /= mp->m_sb.sb_rextsize;
223 224 225 226
		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
		quota_flag = XFS_QMOPT_RES_RTBLKS;
	} else {
		resrtextents = 0;
227
		resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
228 229
		quota_flag = XFS_QMOPT_RES_REGBLKS;
	}
L
Linus Torvalds 已提交
230

231 232 233 234 235
	/*
	 * Drop the shared lock acquired by the caller, attach the dquot if
	 * necessary and move on to transaction setup.
	 */
	xfs_iunlock(ip, lockmode);
236
	error = xfs_qm_dqattach(ip);
237 238 239
	if (error)
		return error;

240 241 242 243 244 245
	/*
	 * For DAX, we do not allocate unwritten extents, but instead we zero
	 * the block before we commit the transaction.  Ideally we'd like to do
	 * this outside the transaction context, but if we commit and then crash
	 * we may not have zeroed the blocks and this will be exposed on
	 * recovery of the allocation. Hence we must zero before commit.
246
	 *
247 248 249
	 * Further, if we are mapping unwritten extents here, we need to zero
	 * and convert them to written so that we don't need an unwritten extent
	 * callback for DAX. This also means that we need to be able to dip into
250 251
	 * the reserve block pool for bmbt block allocation if there is no space
	 * left but we need to do unwritten extent conversion.
252 253 254
	 */
	if (IS_DAX(VFS_I(ip))) {
		bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
255
		if (imap->br_state == XFS_EXT_UNWRITTEN) {
256
			tflags |= XFS_TRANS_RESERVE;
257 258
			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
		}
259
	}
260 261 262
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
			tflags, &tp);
	if (error)
E
Eric Sandeen 已提交
263
		return error;
264

265 266
	lockmode = XFS_ILOCK_EXCL;
	xfs_ilock(ip, lockmode);
L
Linus Torvalds 已提交
267

C
Christoph Hellwig 已提交
268
	error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
269
	if (error)
270
		goto out_trans_cancel;
L
Linus Torvalds 已提交
271

272
	xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
273 274

	/*
275 276
	 * From this point onwards we overwrite the imap pointer that the
	 * caller gave to us.
L
Linus Torvalds 已提交
277
	 */
278
	nimaps = 1;
279
	error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
280
				bmapi_flags, resblks, imap, &nimaps);
281
	if (error)
282
		goto out_res_cancel;
L
Linus Torvalds 已提交
283 284

	/*
285
	 * Complete the transaction
L
Linus Torvalds 已提交
286
	 */
287
	error = xfs_trans_commit(tp);
288
	if (error)
289
		goto out_unlock;
L
Linus Torvalds 已提交
290

291 292 293
	/*
	 * Copy any maps to caller's array and return any error.
	 */
L
Linus Torvalds 已提交
294
	if (nimaps == 0) {
D
Dave Chinner 已提交
295
		error = -ENOSPC;
296
		goto out_unlock;
297 298
	}

299
	if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
300
		error = xfs_alert_fsblock_zero(ip, imap);
L
Linus Torvalds 已提交
301

302
out_unlock:
303
	xfs_iunlock(ip, lockmode);
304
	return error;
L
Linus Torvalds 已提交
305

306
out_res_cancel:
307
	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
308
out_trans_cancel:
309
	xfs_trans_cancel(tp);
310
	goto out_unlock;
L
Linus Torvalds 已提交
311 312
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
STATIC bool
xfs_quota_need_throttle(
	struct xfs_inode *ip,
	int type,
	xfs_fsblock_t alloc_blocks)
{
	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);

	if (!dq || !xfs_this_quota_on(ip->i_mount, type))
		return false;

	/* no hi watermark, no throttle */
	if (!dq->q_prealloc_hi_wmark)
		return false;

	/* under the lo watermark, no throttle */
	if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
		return false;

	return true;
}

STATIC void
xfs_quota_calc_throttle(
	struct xfs_inode *ip,
	int type,
	xfs_fsblock_t *qblocks,
340 341
	int *qshift,
	int64_t	*qfreesp)
342 343 344 345 346
{
	int64_t freesp;
	int shift = 0;
	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);

347 348
	/* no dq, or over hi wmark, squash the prealloc completely */
	if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
349
		*qblocks = 0;
350
		*qfreesp = 0;
351 352 353 354 355 356 357 358 359 360 361 362
		return;
	}

	freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
	if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
		shift = 2;
		if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
			shift += 2;
		if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
			shift += 2;
	}

363 364 365
	if (freesp < *qfreesp)
		*qfreesp = freesp;

366 367 368 369 370 371 372
	/* only overwrite the throttle values if we are more aggressive */
	if ((freesp >> shift) < (*qblocks >> *qshift)) {
		*qblocks = freesp;
		*qshift = shift;
	}
}

373
/*
374 375 376 377
 * If we are doing a write at the end of the file and there are no allocations
 * past this one, then extend the allocation out to the file system's write
 * iosize.
 *
378
 * If we don't have a user specified preallocation size, dynamically increase
379
 * the preallocation size as the size of the file grows.  Cap the maximum size
380 381
 * at a single extent or less if the filesystem is near full. The closer the
 * filesystem is to full, the smaller the maximum prealocation.
382 383 384 385 386 387 388 389
 *
 * As an exception we don't do any preallocation at all if the file is smaller
 * than the minimum preallocation and we are using the default dynamic
 * preallocation scheme, as it is likely this is the only write to the file that
 * is going to be done.
 *
 * We clean up any extra space left over when the file is closed in
 * xfs_inactive().
390 391 392
 */
STATIC xfs_fsblock_t
xfs_iomap_prealloc_size(
393
	struct xfs_inode	*ip,
394
	int			whichfork,
395 396
	loff_t			offset,
	loff_t			count,
397
	struct xfs_iext_cursor	*icur)
398
{
399
	struct xfs_mount	*mp = ip->i_mount;
400
	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
401
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
402
	struct xfs_bmbt_irec	prev;
403 404
	int			shift = 0;
	int64_t			freesp;
405 406
	xfs_fsblock_t		qblocks;
	int			qshift = 0;
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
	xfs_fsblock_t		alloc_blocks = 0;

	if (offset + count <= XFS_ISIZE(ip))
		return 0;

	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
	    (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
		return 0;

	/*
	 * If an explicit allocsize is set, the file is small, or we
	 * are writing behind a hole, then use the minimum prealloc:
	 */
	if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
	    XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
422
	    !xfs_iext_peek_prev_extent(ifp, icur, &prev) ||
423
	    prev.br_startoff + prev.br_blockcount < offset_fsb)
424
		return mp->m_writeio_blocks;
425

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
	/*
	 * Determine the initial size of the preallocation. We are beyond the
	 * current EOF here, but we need to take into account whether this is
	 * a sparse write or an extending write when determining the
	 * preallocation size.  Hence we need to look up the extent that ends
	 * at the current write offset and use the result to determine the
	 * preallocation size.
	 *
	 * If the extent is a hole, then preallocation is essentially disabled.
	 * Otherwise we take the size of the preceding data extent as the basis
	 * for the preallocation size. If the size of the extent is greater than
	 * half the maximum extent length, then use the current offset as the
	 * basis. This ensures that for large files the preallocation size
	 * always extends to MAXEXTLEN rather than falling short due to things
	 * like stripe unit/width alignment of real extents.
	 */
442 443
	if (prev.br_blockcount <= (MAXEXTLEN >> 1))
		alloc_blocks = prev.br_blockcount << 1;
444 445
	else
		alloc_blocks = XFS_B_TO_FSB(mp, offset);
446 447
	if (!alloc_blocks)
		goto check_writeio;
448
	qblocks = alloc_blocks;
449

450 451 452 453 454 455 456 457 458
	/*
	 * MAXEXTLEN is not a power of two value but we round the prealloc down
	 * to the nearest power of two value after throttling. To prevent the
	 * round down from unconditionally reducing the maximum supported prealloc
	 * size, we round up first, apply appropriate throttling, round down and
	 * cap the value to MAXEXTLEN.
	 */
	alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
				       alloc_blocks);
459

460
	freesp = percpu_counter_read_positive(&mp->m_fdblocks);
461 462 463 464 465 466 467 468 469 470
	if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
		shift = 2;
		if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
			shift++;
471
	}
472 473

	/*
474 475
	 * Check each quota to cap the prealloc size, provide a shift value to
	 * throttle with and adjust amount of available space.
476 477
	 */
	if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
478 479
		xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
					&freesp);
480
	if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
481 482
		xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
					&freesp);
483
	if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
484 485
		xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
					&freesp);
486 487 488 489 490 491 492 493

	/*
	 * The final prealloc size is set to the minimum of free space available
	 * in each of the quotas and the overall filesystem.
	 *
	 * The shift throttle value is set to the maximum value as determined by
	 * the global low free space values and per-quota low free space values.
	 */
D
Dave Chinner 已提交
494 495
	alloc_blocks = min(alloc_blocks, qblocks);
	shift = max(shift, qshift);
496

497 498
	if (shift)
		alloc_blocks >>= shift;
499 500 501 502 503 504 505 506
	/*
	 * rounddown_pow_of_two() returns an undefined result if we pass in
	 * alloc_blocks = 0.
	 */
	if (alloc_blocks)
		alloc_blocks = rounddown_pow_of_two(alloc_blocks);
	if (alloc_blocks > MAXEXTLEN)
		alloc_blocks = MAXEXTLEN;
507 508 509 510 511 512 513 514 515 516

	/*
	 * If we are still trying to allocate more space than is
	 * available, squash the prealloc hard. This can happen if we
	 * have a large file on a small filesystem and the above
	 * lowspace thresholds are smaller than MAXEXTLEN.
	 */
	while (alloc_blocks && alloc_blocks >= freesp)
		alloc_blocks >>= 4;
check_writeio:
517 518
	if (alloc_blocks < mp->m_writeio_blocks)
		alloc_blocks = mp->m_writeio_blocks;
519 520
	trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
				      mp->m_writeio_blocks);
521 522 523
	return alloc_blocks;
}

524 525 526 527 528
static int
xfs_file_iomap_begin_delay(
	struct inode		*inode,
	loff_t			offset,
	loff_t			count,
529
	unsigned		flags,
530
	struct iomap		*iomap)
L
Linus Torvalds 已提交
531
{
532 533 534 535 536
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		maxbytes_fsb =
		XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
537
	xfs_fileoff_t		end_fsb;
538 539
	struct xfs_bmbt_irec	imap, cmap;
	struct xfs_iext_cursor	icur, ccur;
540
	xfs_fsblock_t		prealloc_blocks = 0;
541
	bool			eof = false, cow_eof = false, shared = false;
542 543
	int			whichfork = XFS_DATA_FORK;
	int			error = 0;
544 545 546

	ASSERT(!XFS_IS_REALTIME_INODE(ip));
	ASSERT(!xfs_get_extsz_hint(ip));
547

548
	xfs_ilock(ip, XFS_ILOCK_EXCL);
L
Linus Torvalds 已提交
549

550 551 552
	if (unlikely(XFS_TEST_ERROR(
	    (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
	     XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
553
	     mp, XFS_ERRTAG_BMAPIFORMAT))) {
554 555 556 557
		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
		error = -EFSCORRUPTED;
		goto out_unlock;
	}
558

559
	XFS_STATS_INC(mp, xs_blk_mapw);
560

561
	if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
562 563 564
		error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
		if (error)
			goto out_unlock;
L
Linus Torvalds 已提交
565 566
	}

567 568
	end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);

569 570 571 572 573 574 575
	/*
	 * Search the data fork fork first to look up our source mapping.  We
	 * always need the data fork map, as we have to return it to the
	 * iomap code so that the higher level write code can read data in to
	 * perform read-modify-write cycles for unaligned writes.
	 */
	eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
576
	if (eof)
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
		imap.br_startoff = end_fsb; /* fake hole until the end */

	/* We never need to allocate blocks for zeroing a hole. */
	if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
		xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
		goto out_unlock;
	}

	/*
	 * Search the COW fork extent list even if we did not find a data fork
	 * extent.  This serves two purposes: first this implements the
	 * speculative preallocation using cowextsize, so that we also unshare
	 * block adjacent to shared blocks instead of just the shared blocks
	 * themselves.  Second the lookup in the extent list is generally faster
	 * than going out to the shared extent tree.
	 */
593 594 595 596 597
	if (xfs_is_cow_inode(ip)) {
		if (!ip->i_cowfp) {
			ASSERT(!xfs_is_reflink_inode(ip));
			xfs_ifork_init_cow(ip);
		}
598 599 600 601
		cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
				&ccur, &cmap);
		if (!cow_eof && cmap.br_startoff <= offset_fsb) {
			trace_xfs_reflink_cow_found(ip, &cmap);
602
			goto found_cow;
603 604
		}
	}
605

606
	if (imap.br_startoff <= offset_fsb) {
607 608 609 610 611
		/*
		 * For reflink files we may need a delalloc reservation when
		 * overwriting shared extents.   This includes zeroing of
		 * existing extents that contain data.
		 */
612
		if (!xfs_is_cow_inode(ip) ||
613 614 615
		    ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
			trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
					&imap);
616
			goto found_imap;
617 618
		}

619
		xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
620

621
		/* Trim the mapping to the nearest shared extent boundary. */
622
		error = xfs_inode_need_cow(ip, &imap, &shared);
623 624 625 626 627 628 629
		if (error)
			goto out_unlock;

		/* Not shared?  Just report the (potentially capped) extent. */
		if (!shared) {
			trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
					&imap);
630
			goto found_imap;
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
		}

		/*
		 * Fork all the shared blocks from our write offset until the
		 * end of the extent.
		 */
		whichfork = XFS_COW_FORK;
		end_fsb = imap.br_startoff + imap.br_blockcount;
	} else {
		/*
		 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
		 * pages to keep the chunks of work done where somewhat
		 * symmetric with the work writeback does.  This is a completely
		 * arbitrary number pulled out of thin air.
		 *
		 * Note that the values needs to be less than 32-bits wide until
		 * the lower level functions are updated.
		 */
		count = min_t(loff_t, count, 1024 * PAGE_SIZE);
		end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
651 652 653

		if (xfs_is_always_cow_inode(ip))
			whichfork = XFS_COW_FORK;
654 655
	}

656
	error = xfs_qm_dqattach_locked(ip, false);
657 658 659
	if (error)
		goto out_unlock;

660 661 662
	if (eof) {
		prealloc_blocks = xfs_iomap_prealloc_size(ip, whichfork, offset,
				count, &icur);
663 664 665
		if (prealloc_blocks) {
			xfs_extlen_t	align;
			xfs_off_t	end_offset;
666
			xfs_fileoff_t	p_end_fsb;
667

668
			end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
669 670
			p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
					prealloc_blocks;
671 672 673

			align = xfs_eof_alignment(ip, 0);
			if (align)
674
				p_end_fsb = roundup_64(p_end_fsb, align);
675

676 677 678
			p_end_fsb = min(p_end_fsb, maxbytes_fsb);
			ASSERT(p_end_fsb > offset_fsb);
			prealloc_blocks = p_end_fsb - end_fsb;
679 680 681 682
		}
	}

retry:
683 684 685 686 687
	error = xfs_bmapi_reserve_delalloc(ip, whichfork, offset_fsb,
			end_fsb - offset_fsb, prealloc_blocks,
			whichfork == XFS_DATA_FORK ? &imap : &cmap,
			whichfork == XFS_DATA_FORK ? &icur : &ccur,
			whichfork == XFS_DATA_FORK ? eof : cow_eof);
688 689
	switch (error) {
	case 0:
690
		break;
D
Dave Chinner 已提交
691 692
	case -ENOSPC:
	case -EDQUOT:
693
		/* retry without any preallocation */
C
Christoph Hellwig 已提交
694
		trace_xfs_delalloc_enospc(ip, offset, count);
695 696
		if (prealloc_blocks) {
			prealloc_blocks = 0;
D
Dave Chinner 已提交
697
			goto retry;
698
		}
699 700 701
		/*FALLTHRU*/
	default:
		goto out_unlock;
L
Linus Torvalds 已提交
702 703
	}

704 705 706 707 708
	if (whichfork == XFS_COW_FORK) {
		trace_xfs_iomap_alloc(ip, offset, count, whichfork, &cmap);
		goto found_cow;
	}

709 710 711 712
	/*
	 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
	 * them out if the write happens to fail.
	 */
713 714 715 716 717 718 719 720 721 722 723
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	trace_xfs_iomap_alloc(ip, offset, count, whichfork, &imap);
	return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);

found_imap:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);

found_cow:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	if (imap.br_startoff <= offset_fsb) {
724 725
		/* ensure we only report blocks we have a reservation for */
		xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
726
		return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_SHARED);
727
	}
728 729 730
	xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
	return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);

731 732 733
out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
734

L
Linus Torvalds 已提交
735 736 737 738 739
}

int
xfs_iomap_write_unwritten(
	xfs_inode_t	*ip,
740
	xfs_off_t	offset,
741 742
	xfs_off_t	count,
	bool		update_isize)
L
Linus Torvalds 已提交
743 744 745 746 747
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_filblks_t	count_fsb;
	xfs_filblks_t	numblks_fsb;
748 749 750
	int		nimaps;
	xfs_trans_t	*tp;
	xfs_bmbt_irec_t imap;
751
	struct inode	*inode = VFS_I(ip);
752
	xfs_fsize_t	i_size;
753
	uint		resblks;
L
Linus Torvalds 已提交
754 755
	int		error;

C
Christoph Hellwig 已提交
756
	trace_xfs_unwritten_convert(ip, offset, count);
L
Linus Torvalds 已提交
757 758 759 760 761

	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);

762 763 764 765 766 767 768 769 770 771
	/*
	 * Reserve enough blocks in this transaction for two complete extent
	 * btree splits.  We may be converting the middle part of an unwritten
	 * extent and in this case we will insert two new extents in the btree
	 * each of which could cause a full split.
	 *
	 * This reservation amount will be used in the first call to
	 * xfs_bmbt_split() to select an AG with enough space to satisfy the
	 * rest of the operation.
	 */
772
	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
L
Linus Torvalds 已提交
773

774
	do {
L
Linus Torvalds 已提交
775
		/*
776
		 * Set up a transaction to convert the range of extents
L
Linus Torvalds 已提交
777 778
		 * from unwritten to real. Do allocations in a loop until
		 * we have covered the range passed in.
779
		 *
780 781 782
		 * Note that we can't risk to recursing back into the filesystem
		 * here as we might be asked to write out the same inode that we
		 * complete here and might deadlock on the iolock.
L
Linus Torvalds 已提交
783
		 */
784
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
C
Christoph Hellwig 已提交
785
				XFS_TRANS_RESERVE, &tp);
786
		if (error)
E
Eric Sandeen 已提交
787
			return error;
L
Linus Torvalds 已提交
788 789

		xfs_ilock(ip, XFS_ILOCK_EXCL);
790
		xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
791 792 793 794 795

		/*
		 * Modify the unwritten extent state of the buffer.
		 */
		nimaps = 1;
796
		error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
797 798
					XFS_BMAPI_CONVERT, resblks, &imap,
					&nimaps);
L
Linus Torvalds 已提交
799 800 801
		if (error)
			goto error_on_bmapi_transaction;

802 803 804 805 806 807 808 809
		/*
		 * Log the updated inode size as we go.  We have to be careful
		 * to only log it up to the actual write offset if it is
		 * halfway into a block.
		 */
		i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
		if (i_size > offset + count)
			i_size = offset + count;
810 811
		if (update_isize && i_size > i_size_read(inode))
			i_size_write(inode, i_size);
812 813 814 815 816 817
		i_size = xfs_new_eof(ip, i_size);
		if (i_size) {
			ip->i_d.di_size = i_size;
			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
		}

818
		error = xfs_trans_commit(tp);
L
Linus Torvalds 已提交
819 820
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
		if (error)
E
Eric Sandeen 已提交
821
			return error;
822

823
		if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock)))
824
			return xfs_alert_fsblock_zero(ip, &imap);
L
Linus Torvalds 已提交
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840

		if ((numblks_fsb = imap.br_blockcount) == 0) {
			/*
			 * The numblks_fsb value should always get
			 * smaller, otherwise the loop is stuck.
			 */
			ASSERT(imap.br_blockcount);
			break;
		}
		offset_fsb += numblks_fsb;
		count_fsb -= numblks_fsb;
	} while (count_fsb > 0);

	return 0;

error_on_bmapi_transaction:
841
	xfs_trans_cancel(tp);
L
Linus Torvalds 已提交
842
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
E
Eric Sandeen 已提交
843
	return error;
L
Linus Torvalds 已提交
844
}
845

846 847 848 849 850
static inline bool
imap_needs_alloc(
	struct inode		*inode,
	struct xfs_bmbt_irec	*imap,
	int			nimaps)
851 852 853
{
	return !nimaps ||
		imap->br_startblock == HOLESTARTBLOCK ||
854
		imap->br_startblock == DELAYSTARTBLOCK ||
855
		(IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
856 857
}

858 859 860 861
static inline bool
needs_cow_for_zeroing(
	struct xfs_bmbt_irec	*imap,
	int			nimaps)
862 863 864 865 866 867
{
	return nimaps &&
		imap->br_startblock != HOLESTARTBLOCK &&
		imap->br_state != XFS_EXT_UNWRITTEN;
}

868 869 870 871 872
static int
xfs_ilock_for_iomap(
	struct xfs_inode	*ip,
	unsigned		flags,
	unsigned		*lockmode)
C
Christoph Hellwig 已提交
873
{
874
	unsigned		mode = XFS_ILOCK_SHARED;
875
	bool			is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
876

C
Christoph Hellwig 已提交
877
	/*
878 879
	 * COW writes may allocate delalloc space or convert unwritten COW
	 * extents, so we need to make sure to take the lock exclusively here.
C
Christoph Hellwig 已提交
880
	 */
881
	if (xfs_is_cow_inode(ip) && is_write) {
882 883 884 885 886 887 888 889
		/*
		 * FIXME: It could still overwrite on unshared extents and not
		 * need allocation.
		 */
		if (flags & IOMAP_NOWAIT)
			return -EAGAIN;
		mode = XFS_ILOCK_EXCL;
	}
890 891

	/*
892 893
	 * Extents not yet cached requires exclusive access, don't block.  This
	 * is an opencoded xfs_ilock_data_map_shared() call but with
894 895
	 * non-blocking behaviour.
	 */
896 897 898 899 900 901
	if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
		if (flags & IOMAP_NOWAIT)
			return -EAGAIN;
		mode = XFS_ILOCK_EXCL;
	}

902
relock:
903 904 905 906 907 908 909
	if (flags & IOMAP_NOWAIT) {
		if (!xfs_ilock_nowait(ip, mode))
			return -EAGAIN;
	} else {
		xfs_ilock(ip, mode);
	}

910 911 912 913 914
	/*
	 * The reflink iflag could have changed since the earlier unlocked
	 * check, so if we got ILOCK_SHARED for a write and but we're now a
	 * reflink inode we have to switch to ILOCK_EXCL and relock.
	 */
915
	if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
916 917 918 919 920
		xfs_iunlock(ip, mode);
		mode = XFS_ILOCK_EXCL;
		goto relock;
	}

921 922
	*lockmode = mode;
	return 0;
C
Christoph Hellwig 已提交
923 924
}

925 926 927 928 929 930
static int
xfs_file_iomap_begin(
	struct inode		*inode,
	loff_t			offset,
	loff_t			length,
	unsigned		flags,
931 932
	struct iomap		*iomap,
	struct iomap		*srcmap)
933 934 935 936 937 938
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_bmbt_irec	imap;
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			nimaps = 1, error = 0;
939
	bool			shared = false;
940
	u16			iomap_flags = 0;
941
	unsigned		lockmode;
942 943 944 945

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

946
	if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) &&
C
Christoph Hellwig 已提交
947
			!IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
948
		/* Reserve delalloc blocks for regular writeback. */
949 950
		return xfs_file_iomap_begin_delay(inode, offset, length, flags,
				iomap);
951 952
	}

953 954 955 956 957 958 959 960 961
	/*
	 * Lock the inode in the manner required for the specified operation and
	 * check for as many conditions that would result in blocking as
	 * possible. This removes most of the non-blocking checks from the
	 * mapping code below.
	 */
	error = xfs_ilock_for_iomap(ip, flags, &lockmode);
	if (error)
		return error;
G
Goldwyn Rodrigues 已提交
962

963
	ASSERT(offset <= mp->m_super->s_maxbytes);
964
	if (offset > mp->m_super->s_maxbytes - length)
965 966 967 968 969
		length = mp->m_super->s_maxbytes - offset;
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	end_fsb = XFS_B_TO_FSB(mp, offset + length);

	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
970
			       &nimaps, 0);
971 972
	if (error)
		goto out_unlock;
973

974
	if (flags & IOMAP_REPORT) {
975
		/* Trim the mapping to the nearest shared extent boundary. */
976
		error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
977 978 979 980
		if (error)
			goto out_unlock;
	}

981 982 983 984
	/* Non-modifying mapping requested, so we are done */
	if (!(flags & (IOMAP_WRITE | IOMAP_ZERO)))
		goto out_found;

985 986 987 988
	/*
	 * Break shared extents if necessary. Checks for non-blocking IO have
	 * been done up front, so we don't need to do them here.
	 */
989
	if (xfs_is_cow_inode(ip)) {
990
		struct xfs_bmbt_irec	cmap;
991
		bool			directio = (flags & IOMAP_DIRECT);
992

993 994 995 996 997
		/* if zeroing doesn't need COW allocation, then we are done. */
		if ((flags & IOMAP_ZERO) &&
		    !needs_cow_for_zeroing(&imap, nimaps))
			goto out_found;

998
		/* may drop and re-acquire the ilock */
999 1000
		error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
				&lockmode, directio);
1001 1002 1003 1004 1005 1006
		if (error)
			goto out_unlock;

		/*
		 * For buffered writes we need to report the address of the
		 * previous block (if there was any) so that the higher level
1007 1008 1009
		 * write code can perform read-modify-write operations; we
		 * won't need the CoW fork mapping until writeback.  For direct
		 * I/O, which must be block aligned, we need to report the
1010 1011
		 * newly allocated address.  If the data fork has a hole, copy
		 * the COW fork mapping to avoid allocating to the data fork.
1012
		 */
1013 1014
		if (shared &&
		    (directio || imap.br_startblock == HOLESTARTBLOCK))
1015
			imap = cmap;
1016 1017 1018

		end_fsb = imap.br_startoff + imap.br_blockcount;
		length = XFS_FSB_TO_B(mp, end_fsb) - offset;
1019 1020
	}

1021 1022 1023
	/* Don't need to allocate over holes when doing zeroing operations. */
	if (flags & IOMAP_ZERO)
		goto out_found;
1024

1025 1026
	if (!imap_needs_alloc(inode, &imap, nimaps))
		goto out_found;
1027

1028 1029 1030 1031
	/* If nowait is set bail since we are going to make allocations. */
	if (flags & IOMAP_NOWAIT) {
		error = -EAGAIN;
		goto out_unlock;
1032 1033
	}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	/*
	 * We cap the maximum length we map to a sane size  to keep the chunks
	 * of work done where somewhat symmetric with the work writeback does.
	 * This is a completely arbitrary number pulled out of thin air as a
	 * best guess for initial testing.
	 *
	 * Note that the values needs to be less than 32-bits wide until the
	 * lower level functions are updated.
	 */
	length = min_t(loff_t, length, 1024 * PAGE_SIZE);

	/*
	 * xfs_iomap_write_direct() expects the shared lock. It is unlocked on
	 * return.
	 */
	if (lockmode == XFS_ILOCK_EXCL)
		xfs_ilock_demote(ip, lockmode);
	error = xfs_iomap_write_direct(ip, offset, length, &imap,
			nimaps);
	if (error)
		return error;

1056
	iomap_flags |= IOMAP_F_NEW;
1057
	trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
1058 1059

out_finish:
1060 1061 1062 1063 1064 1065
	/*
	 * Writes that span EOF might trigger an IO size update on completion,
	 * so consider them to be dirty for the purposes of O_DSYNC even if
	 * there is no other metadata changes pending or have been made here.
	 */
	if ((flags & IOMAP_WRITE) && offset + length > i_size_read(inode))
1066 1067 1068 1069
		iomap_flags |= IOMAP_F_DIRTY;
	if (shared)
		iomap_flags |= IOMAP_F_SHARED;
	return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
1070 1071 1072 1073

out_found:
	ASSERT(nimaps);
	xfs_iunlock(ip, lockmode);
1074
	trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
1075 1076
	goto out_finish;

1077 1078 1079
out_unlock:
	xfs_iunlock(ip, lockmode);
	return error;
1080 1081 1082 1083 1084 1085 1086
}

static int
xfs_file_iomap_end_delalloc(
	struct xfs_inode	*ip,
	loff_t			offset,
	loff_t			length,
1087 1088
	ssize_t			written,
	struct iomap		*iomap)
1089 1090 1091 1092 1093 1094
{
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		start_fsb;
	xfs_fileoff_t		end_fsb;
	int			error = 0;

1095 1096 1097 1098
	/*
	 * Behave as if the write failed if drop writes is enabled. Set the NEW
	 * flag to force delalloc cleanup.
	 */
1099
	if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
1100
		iomap->flags |= IOMAP_F_NEW;
1101
		written = 0;
1102
	}
1103

1104 1105 1106 1107 1108 1109 1110 1111 1112
	/*
	 * start_fsb refers to the first unused block after a short write. If
	 * nothing was written, round offset down to point at the first block in
	 * the range.
	 */
	if (unlikely(!written))
		start_fsb = XFS_B_TO_FSBT(mp, offset);
	else
		start_fsb = XFS_B_TO_FSB(mp, offset + written);
1113 1114 1115
	end_fsb = XFS_B_TO_FSB(mp, offset + length);

	/*
1116 1117
	 * Trim delalloc blocks if they were allocated by this write and we
	 * didn't manage to write the whole range.
1118 1119 1120 1121 1122
	 *
	 * We don't need to care about racing delalloc as we hold i_mutex
	 * across the reserve/allocate/unreserve calls. If there are delalloc
	 * blocks in the range, they are ours.
	 */
1123
	if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
1124 1125 1126
		truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
					 XFS_FSB_TO_B(mp, end_fsb) - 1);

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
					       end_fsb - start_fsb);
		if (error && !XFS_FORCED_SHUTDOWN(mp)) {
			xfs_alert(mp, "%s: unable to clean up ino %lld",
				__func__, ip->i_ino);
			return error;
		}
	}

	return 0;
}

static int
xfs_file_iomap_end(
	struct inode		*inode,
	loff_t			offset,
	loff_t			length,
	ssize_t			written,
	unsigned		flags,
	struct iomap		*iomap)
{
1148 1149
	if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) &&
	    iomap->type == IOMAP_DELALLOC)
1150
		return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1151
				length, written, iomap);
1152 1153 1154
	return 0;
}

1155
const struct iomap_ops xfs_iomap_ops = {
1156 1157 1158
	.iomap_begin		= xfs_file_iomap_begin,
	.iomap_end		= xfs_file_iomap_end,
};
1159

1160 1161 1162 1163 1164 1165
static int
xfs_seek_iomap_begin(
	struct inode		*inode,
	loff_t			offset,
	loff_t			length,
	unsigned		flags,
1166 1167
	struct iomap		*iomap,
	struct iomap		*srcmap)
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + length);
	xfs_fileoff_t		cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
	struct xfs_iext_cursor	icur;
	struct xfs_bmbt_irec	imap, cmap;
	int			error = 0;
	unsigned		lockmode;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	lockmode = xfs_ilock_data_map_shared(ip);
	if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
		error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
		if (error)
			goto out_unlock;
	}

	if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
		/*
		 * If we found a data extent we are done.
		 */
		if (imap.br_startoff <= offset_fsb)
			goto done;
		data_fsb = imap.br_startoff;
	} else {
		/*
		 * Fake a hole until the end of the file.
		 */
		data_fsb = min(XFS_B_TO_FSB(mp, offset + length),
			       XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
	}

	/*
	 * If a COW fork extent covers the hole, report it - capped to the next
	 * data fork extent:
	 */
	if (xfs_inode_has_cow_data(ip) &&
	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
		cow_fsb = cmap.br_startoff;
	if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
		if (data_fsb < cow_fsb + cmap.br_blockcount)
			end_fsb = min(end_fsb, data_fsb);
		xfs_trim_extent(&cmap, offset_fsb, end_fsb);
1215
		error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
		/*
		 * This is a COW extent, so we must probe the page cache
		 * because there could be dirty page cache being backed
		 * by this extent.
		 */
		iomap->type = IOMAP_UNWRITTEN;
		goto out_unlock;
	}

	/*
	 * Else report a hole, capped to the next found data or COW extent.
	 */
	if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
		imap.br_blockcount = cow_fsb - offset_fsb;
	else
		imap.br_blockcount = data_fsb - offset_fsb;
	imap.br_startoff = offset_fsb;
	imap.br_startblock = HOLESTARTBLOCK;
	imap.br_state = XFS_EXT_NORM;
done:
	xfs_trim_extent(&imap, offset_fsb, end_fsb);
1237
	error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
1238 1239 1240 1241 1242 1243 1244 1245 1246
out_unlock:
	xfs_iunlock(ip, lockmode);
	return error;
}

const struct iomap_ops xfs_seek_iomap_ops = {
	.iomap_begin		= xfs_seek_iomap_begin,
};

1247 1248 1249 1250 1251 1252
static int
xfs_xattr_iomap_begin(
	struct inode		*inode,
	loff_t			offset,
	loff_t			length,
	unsigned		flags,
1253 1254
	struct iomap		*iomap,
	struct iomap		*srcmap)
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + length);
	struct xfs_bmbt_irec	imap;
	int			nimaps = 1, error = 0;
	unsigned		lockmode;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

1267
	lockmode = xfs_ilock_attr_map_shared(ip);
1268 1269

	/* if there are no attribute fork or extents, return ENOENT */
1270
	if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1271 1272 1273 1274 1275 1276
		error = -ENOENT;
		goto out_unlock;
	}

	ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1277
			       &nimaps, XFS_BMAPI_ATTRFORK);
1278 1279 1280
out_unlock:
	xfs_iunlock(ip, lockmode);

1281 1282 1283
	if (error)
		return error;
	ASSERT(nimaps);
1284
	return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
1285 1286
}

1287
const struct iomap_ops xfs_xattr_iomap_ops = {
1288 1289
	.iomap_begin		= xfs_xattr_iomap_begin,
};