xfs_iomap.c 34.3 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4
 * Copyright (c) 2016-2018 Christoph Hellwig.
5
 * All Rights Reserved.
L
Linus Torvalds 已提交
6
 */
7
#include <linux/iomap.h>
L
Linus Torvalds 已提交
8 9
#include "xfs.h"
#include "xfs_fs.h"
10
#include "xfs_shared.h"
11 12 13
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
14
#include "xfs_mount.h"
15
#include "xfs_defer.h"
L
Linus Torvalds 已提交
16
#include "xfs_inode.h"
17
#include "xfs_btree.h"
18
#include "xfs_bmap_btree.h"
L
Linus Torvalds 已提交
19
#include "xfs_bmap.h"
D
Dave Chinner 已提交
20
#include "xfs_bmap_util.h"
21
#include "xfs_errortag.h"
L
Linus Torvalds 已提交
22
#include "xfs_error.h"
23
#include "xfs_trans.h"
L
Linus Torvalds 已提交
24
#include "xfs_trans_space.h"
25
#include "xfs_inode_item.h"
L
Linus Torvalds 已提交
26
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
27
#include "xfs_trace.h"
28
#include "xfs_icache.h"
29
#include "xfs_quota.h"
30 31
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
32
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
33 34 35 36 37


#define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
						<< mp->m_writeio_log)

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
static int
xfs_alert_fsblock_zero(
	xfs_inode_t	*ip,
	xfs_bmbt_irec_t	*imap)
{
	xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
			"Access to block zero in inode %llu "
			"start_block: %llx start_off: %llx "
			"blkcnt: %llx extent-state: %x",
		(unsigned long long)ip->i_ino,
		(unsigned long long)imap->br_startblock,
		(unsigned long long)imap->br_startoff,
		(unsigned long long)imap->br_blockcount,
		imap->br_state);
	return -EFSCORRUPTED;
}

int
56 57 58
xfs_bmbt_to_iomap(
	struct xfs_inode	*ip,
	struct iomap		*iomap,
59 60
	struct xfs_bmbt_irec	*imap,
	bool			shared)
61 62 63
{
	struct xfs_mount	*mp = ip->i_mount;

64 65 66
	if (unlikely(!imap->br_startblock && !XFS_IS_REALTIME_INODE(ip)))
		return xfs_alert_fsblock_zero(ip, imap);

67
	if (imap->br_startblock == HOLESTARTBLOCK) {
68
		iomap->addr = IOMAP_NULL_ADDR;
69
		iomap->type = IOMAP_HOLE;
70 71
	} else if (imap->br_startblock == DELAYSTARTBLOCK ||
		   isnullstartblock(imap->br_startblock)) {
72
		iomap->addr = IOMAP_NULL_ADDR;
73 74
		iomap->type = IOMAP_DELALLOC;
	} else {
75
		iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
76 77 78 79 80 81 82 83
		if (imap->br_state == XFS_EXT_UNWRITTEN)
			iomap->type = IOMAP_UNWRITTEN;
		else
			iomap->type = IOMAP_MAPPED;
	}
	iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
	iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
	iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
84
	iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
85 86 87 88 89 90 91

	if (xfs_ipincount(ip) &&
	    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
		iomap->flags |= IOMAP_F_DIRTY;
	if (shared)
		iomap->flags |= IOMAP_F_SHARED;
	return 0;
92 93
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
static void
xfs_hole_to_iomap(
	struct xfs_inode	*ip,
	struct iomap		*iomap,
	xfs_fileoff_t		offset_fsb,
	xfs_fileoff_t		end_fsb)
{
	iomap->addr = IOMAP_NULL_ADDR;
	iomap->type = IOMAP_HOLE;
	iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
	iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
	iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
	iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
}

109
xfs_extlen_t
110 111 112
xfs_eof_alignment(
	struct xfs_inode	*ip,
	xfs_extlen_t		extsize)
113
{
114 115
	struct xfs_mount	*mp = ip->i_mount;
	xfs_extlen_t		align = 0;
116

117 118 119 120 121 122 123 124 125 126 127 128 129 130
	if (!XFS_IS_REALTIME_INODE(ip)) {
		/*
		 * Round up the allocation request to a stripe unit
		 * (m_dalign) boundary if the file size is >= stripe unit
		 * size, and we are allocating past the allocation eof.
		 *
		 * If mounted with the "-o swalloc" option the alignment is
		 * increased from the strip unit size to the stripe width.
		 */
		if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
			align = mp->m_swidth;
		else if (mp->m_dalign)
			align = mp->m_dalign;

131 132
		if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
			align = 0;
133
	}
134 135 136 137 138 139

	/*
	 * Always round up the allocation request to an extent boundary
	 * (when file on a real-time subvolume or has di_extsize hint).
	 */
	if (extsize) {
140 141
		if (align)
			align = roundup_64(align, extsize);
142 143 144 145
		else
			align = extsize;
	}

146 147 148 149 150 151 152 153 154 155 156
	return align;
}

STATIC int
xfs_iomap_eof_align_last_fsb(
	struct xfs_inode	*ip,
	xfs_extlen_t		extsize,
	xfs_fileoff_t		*last_fsb)
{
	xfs_extlen_t		align = xfs_eof_alignment(ip, extsize);

157 158
	if (align) {
		xfs_fileoff_t	new_last_fsb = roundup_64(*last_fsb, align);
159 160
		int		eof, error;

161
		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
162 163 164 165 166 167 168 169
		if (error)
			return error;
		if (eof)
			*last_fsb = new_last_fsb;
	}
	return 0;
}

C
Christoph Hellwig 已提交
170
int
L
Linus Torvalds 已提交
171 172
xfs_iomap_write_direct(
	xfs_inode_t	*ip,
173
	xfs_off_t	offset,
L
Linus Torvalds 已提交
174
	size_t		count,
175
	xfs_bmbt_irec_t *imap,
176
	int		nmaps)
L
Linus Torvalds 已提交
177 178 179 180
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_fileoff_t	last_fsb;
181
	xfs_filblks_t	count_fsb, resaligned;
182
	xfs_extlen_t	extsz;
183
	int		nimaps;
184
	int		quota_flag;
L
Linus Torvalds 已提交
185 186
	int		rt;
	xfs_trans_t	*tp;
187 188
	uint		qblocks, resblks, resrtextents;
	int		error;
189
	int		lockmode;
190
	int		bmapi_flags = XFS_BMAPI_PREALLOC;
191
	uint		tflags = 0;
L
Linus Torvalds 已提交
192

193
	rt = XFS_IS_REALTIME_INODE(ip);
194
	extsz = xfs_get_extsz_hint(ip);
195 196 197
	lockmode = XFS_ILOCK_SHARED;	/* locked by caller */

	ASSERT(xfs_isilocked(ip, lockmode));
L
Linus Torvalds 已提交
198

199 200
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
201
	if ((offset + count) > XFS_ISIZE(ip)) {
202 203 204 205 206 207 208 209 210
		/*
		 * Assert that the in-core extent list is present since this can
		 * call xfs_iread_extents() and we only have the ilock shared.
		 * This should be safe because the lock was held around a bmapi
		 * call in the caller and we only need it to access the in-core
		 * list.
		 */
		ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
								XFS_IFEXTENTS);
211
		error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
212
		if (error)
213
			goto out_unlock;
L
Linus Torvalds 已提交
214
	} else {
215
		if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
D
Dave Chinner 已提交
216
			last_fsb = min(last_fsb, (xfs_fileoff_t)
217 218
					imap->br_blockcount +
					imap->br_startoff);
L
Linus Torvalds 已提交
219
	}
220 221
	count_fsb = last_fsb - offset_fsb;
	ASSERT(count_fsb > 0);
222
	resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
223 224 225 226

	if (unlikely(rt)) {
		resrtextents = qblocks = resaligned;
		resrtextents /= mp->m_sb.sb_rextsize;
227 228 229 230
		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
		quota_flag = XFS_QMOPT_RES_RTBLKS;
	} else {
		resrtextents = 0;
231
		resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
232 233
		quota_flag = XFS_QMOPT_RES_REGBLKS;
	}
L
Linus Torvalds 已提交
234

235 236 237 238 239
	/*
	 * Drop the shared lock acquired by the caller, attach the dquot if
	 * necessary and move on to transaction setup.
	 */
	xfs_iunlock(ip, lockmode);
240
	error = xfs_qm_dqattach(ip);
241 242 243
	if (error)
		return error;

244 245 246 247 248 249
	/*
	 * For DAX, we do not allocate unwritten extents, but instead we zero
	 * the block before we commit the transaction.  Ideally we'd like to do
	 * this outside the transaction context, but if we commit and then crash
	 * we may not have zeroed the blocks and this will be exposed on
	 * recovery of the allocation. Hence we must zero before commit.
250
	 *
251 252 253
	 * Further, if we are mapping unwritten extents here, we need to zero
	 * and convert them to written so that we don't need an unwritten extent
	 * callback for DAX. This also means that we need to be able to dip into
254 255
	 * the reserve block pool for bmbt block allocation if there is no space
	 * left but we need to do unwritten extent conversion.
256 257 258
	 */
	if (IS_DAX(VFS_I(ip))) {
		bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
259
		if (imap->br_state == XFS_EXT_UNWRITTEN) {
260
			tflags |= XFS_TRANS_RESERVE;
261 262
			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
		}
263
	}
264 265 266
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
			tflags, &tp);
	if (error)
E
Eric Sandeen 已提交
267
		return error;
268

269 270
	lockmode = XFS_ILOCK_EXCL;
	xfs_ilock(ip, lockmode);
L
Linus Torvalds 已提交
271

C
Christoph Hellwig 已提交
272
	error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
273
	if (error)
274
		goto out_trans_cancel;
L
Linus Torvalds 已提交
275

276
	xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
277 278

	/*
279 280
	 * From this point onwards we overwrite the imap pointer that the
	 * caller gave to us.
L
Linus Torvalds 已提交
281
	 */
282
	nimaps = 1;
283
	error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
284
				bmapi_flags, resblks, imap, &nimaps);
285
	if (error)
286
		goto out_res_cancel;
L
Linus Torvalds 已提交
287 288

	/*
289
	 * Complete the transaction
L
Linus Torvalds 已提交
290
	 */
291
	error = xfs_trans_commit(tp);
292
	if (error)
293
		goto out_unlock;
L
Linus Torvalds 已提交
294

295 296 297
	/*
	 * Copy any maps to caller's array and return any error.
	 */
L
Linus Torvalds 已提交
298
	if (nimaps == 0) {
D
Dave Chinner 已提交
299
		error = -ENOSPC;
300
		goto out_unlock;
301 302
	}

303
	if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
304
		error = xfs_alert_fsblock_zero(ip, imap);
L
Linus Torvalds 已提交
305

306
out_unlock:
307
	xfs_iunlock(ip, lockmode);
308
	return error;
L
Linus Torvalds 已提交
309

310
out_res_cancel:
311
	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
312
out_trans_cancel:
313
	xfs_trans_cancel(tp);
314
	goto out_unlock;
L
Linus Torvalds 已提交
315 316
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
STATIC bool
xfs_quota_need_throttle(
	struct xfs_inode *ip,
	int type,
	xfs_fsblock_t alloc_blocks)
{
	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);

	if (!dq || !xfs_this_quota_on(ip->i_mount, type))
		return false;

	/* no hi watermark, no throttle */
	if (!dq->q_prealloc_hi_wmark)
		return false;

	/* under the lo watermark, no throttle */
	if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
		return false;

	return true;
}

STATIC void
xfs_quota_calc_throttle(
	struct xfs_inode *ip,
	int type,
	xfs_fsblock_t *qblocks,
344 345
	int *qshift,
	int64_t	*qfreesp)
346 347 348 349 350
{
	int64_t freesp;
	int shift = 0;
	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);

351 352
	/* no dq, or over hi wmark, squash the prealloc completely */
	if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
353
		*qblocks = 0;
354
		*qfreesp = 0;
355 356 357 358 359 360 361 362 363 364 365 366
		return;
	}

	freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
	if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
		shift = 2;
		if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
			shift += 2;
		if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
			shift += 2;
	}

367 368 369
	if (freesp < *qfreesp)
		*qfreesp = freesp;

370 371 372 373 374 375 376
	/* only overwrite the throttle values if we are more aggressive */
	if ((freesp >> shift) < (*qblocks >> *qshift)) {
		*qblocks = freesp;
		*qshift = shift;
	}
}

377
/*
378 379 380 381
 * If we are doing a write at the end of the file and there are no allocations
 * past this one, then extend the allocation out to the file system's write
 * iosize.
 *
382
 * If we don't have a user specified preallocation size, dynamically increase
383
 * the preallocation size as the size of the file grows.  Cap the maximum size
384 385
 * at a single extent or less if the filesystem is near full. The closer the
 * filesystem is to full, the smaller the maximum prealocation.
386 387 388 389 390 391 392 393
 *
 * As an exception we don't do any preallocation at all if the file is smaller
 * than the minimum preallocation and we are using the default dynamic
 * preallocation scheme, as it is likely this is the only write to the file that
 * is going to be done.
 *
 * We clean up any extra space left over when the file is closed in
 * xfs_inactive().
394 395 396
 */
STATIC xfs_fsblock_t
xfs_iomap_prealloc_size(
397
	struct xfs_inode	*ip,
398 399
	loff_t			offset,
	loff_t			count,
400
	struct xfs_iext_cursor	*icur)
401
{
402
	struct xfs_mount	*mp = ip->i_mount;
403
	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
404
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
405
	struct xfs_bmbt_irec	prev;
406 407
	int			shift = 0;
	int64_t			freesp;
408 409
	xfs_fsblock_t		qblocks;
	int			qshift = 0;
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
	xfs_fsblock_t		alloc_blocks = 0;

	if (offset + count <= XFS_ISIZE(ip))
		return 0;

	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
	    (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
		return 0;

	/*
	 * If an explicit allocsize is set, the file is small, or we
	 * are writing behind a hole, then use the minimum prealloc:
	 */
	if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
	    XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
425
	    !xfs_iext_peek_prev_extent(ifp, icur, &prev) ||
426
	    prev.br_startoff + prev.br_blockcount < offset_fsb)
427
		return mp->m_writeio_blocks;
428

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
	/*
	 * Determine the initial size of the preallocation. We are beyond the
	 * current EOF here, but we need to take into account whether this is
	 * a sparse write or an extending write when determining the
	 * preallocation size.  Hence we need to look up the extent that ends
	 * at the current write offset and use the result to determine the
	 * preallocation size.
	 *
	 * If the extent is a hole, then preallocation is essentially disabled.
	 * Otherwise we take the size of the preceding data extent as the basis
	 * for the preallocation size. If the size of the extent is greater than
	 * half the maximum extent length, then use the current offset as the
	 * basis. This ensures that for large files the preallocation size
	 * always extends to MAXEXTLEN rather than falling short due to things
	 * like stripe unit/width alignment of real extents.
	 */
445 446
	if (prev.br_blockcount <= (MAXEXTLEN >> 1))
		alloc_blocks = prev.br_blockcount << 1;
447 448
	else
		alloc_blocks = XFS_B_TO_FSB(mp, offset);
449 450
	if (!alloc_blocks)
		goto check_writeio;
451
	qblocks = alloc_blocks;
452

453 454 455 456 457 458 459 460 461
	/*
	 * MAXEXTLEN is not a power of two value but we round the prealloc down
	 * to the nearest power of two value after throttling. To prevent the
	 * round down from unconditionally reducing the maximum supported prealloc
	 * size, we round up first, apply appropriate throttling, round down and
	 * cap the value to MAXEXTLEN.
	 */
	alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
				       alloc_blocks);
462

463
	freesp = percpu_counter_read_positive(&mp->m_fdblocks);
464 465 466 467 468 469 470 471 472 473
	if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
		shift = 2;
		if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
			shift++;
474
	}
475 476

	/*
477 478
	 * Check each quota to cap the prealloc size, provide a shift value to
	 * throttle with and adjust amount of available space.
479 480
	 */
	if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
481 482
		xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
					&freesp);
483
	if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
484 485
		xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
					&freesp);
486
	if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
487 488
		xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
					&freesp);
489 490 491 492 493 494 495 496

	/*
	 * The final prealloc size is set to the minimum of free space available
	 * in each of the quotas and the overall filesystem.
	 *
	 * The shift throttle value is set to the maximum value as determined by
	 * the global low free space values and per-quota low free space values.
	 */
D
Dave Chinner 已提交
497 498
	alloc_blocks = min(alloc_blocks, qblocks);
	shift = max(shift, qshift);
499

500 501
	if (shift)
		alloc_blocks >>= shift;
502 503 504 505 506 507 508 509
	/*
	 * rounddown_pow_of_two() returns an undefined result if we pass in
	 * alloc_blocks = 0.
	 */
	if (alloc_blocks)
		alloc_blocks = rounddown_pow_of_two(alloc_blocks);
	if (alloc_blocks > MAXEXTLEN)
		alloc_blocks = MAXEXTLEN;
510 511 512 513 514 515 516 517 518 519

	/*
	 * If we are still trying to allocate more space than is
	 * available, squash the prealloc hard. This can happen if we
	 * have a large file on a small filesystem and the above
	 * lowspace thresholds are smaller than MAXEXTLEN.
	 */
	while (alloc_blocks && alloc_blocks >= freesp)
		alloc_blocks >>= 4;
check_writeio:
520 521
	if (alloc_blocks < mp->m_writeio_blocks)
		alloc_blocks = mp->m_writeio_blocks;
522 523
	trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
				      mp->m_writeio_blocks);
524 525 526
	return alloc_blocks;
}

527 528 529 530 531
static int
xfs_file_iomap_begin_delay(
	struct inode		*inode,
	loff_t			offset,
	loff_t			count,
532
	unsigned		flags,
533
	struct iomap		*iomap)
L
Linus Torvalds 已提交
534
{
535 536 537 538 539
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		maxbytes_fsb =
		XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
540
	xfs_fileoff_t		end_fsb;
541 542
	struct xfs_bmbt_irec	imap, cmap;
	struct xfs_iext_cursor	icur, ccur;
543
	xfs_fsblock_t		prealloc_blocks = 0;
544
	bool			eof = false, cow_eof = false, shared = false;
545 546
	int			whichfork = XFS_DATA_FORK;
	int			error = 0;
547 548 549

	ASSERT(!XFS_IS_REALTIME_INODE(ip));
	ASSERT(!xfs_get_extsz_hint(ip));
550

551
	xfs_ilock(ip, XFS_ILOCK_EXCL);
L
Linus Torvalds 已提交
552

553 554 555
	if (unlikely(XFS_TEST_ERROR(
	    (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
	     XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
556
	     mp, XFS_ERRTAG_BMAPIFORMAT))) {
557 558 559 560
		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
		error = -EFSCORRUPTED;
		goto out_unlock;
	}
561

562
	XFS_STATS_INC(mp, xs_blk_mapw);
563

564
	if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
565 566 567
		error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
		if (error)
			goto out_unlock;
L
Linus Torvalds 已提交
568 569
	}

570 571
	end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);

572 573 574 575 576 577 578
	/*
	 * Search the data fork fork first to look up our source mapping.  We
	 * always need the data fork map, as we have to return it to the
	 * iomap code so that the higher level write code can read data in to
	 * perform read-modify-write cycles for unaligned writes.
	 */
	eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
579
	if (eof)
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
		imap.br_startoff = end_fsb; /* fake hole until the end */

	/* We never need to allocate blocks for zeroing a hole. */
	if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
		xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
		goto out_unlock;
	}

	/*
	 * Search the COW fork extent list even if we did not find a data fork
	 * extent.  This serves two purposes: first this implements the
	 * speculative preallocation using cowextsize, so that we also unshare
	 * block adjacent to shared blocks instead of just the shared blocks
	 * themselves.  Second the lookup in the extent list is generally faster
	 * than going out to the shared extent tree.
	 */
	if (xfs_is_reflink_inode(ip)) {
		cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
				&ccur, &cmap);
		if (!cow_eof && cmap.br_startoff <= offset_fsb) {
			trace_xfs_reflink_cow_found(ip, &cmap);
			whichfork = XFS_COW_FORK;
			goto done;
		}
	}
605

606
	if (imap.br_startoff <= offset_fsb) {
607 608 609 610 611
		/*
		 * For reflink files we may need a delalloc reservation when
		 * overwriting shared extents.   This includes zeroing of
		 * existing extents that contain data.
		 */
612 613 614 615 616
		if (!xfs_is_reflink_inode(ip) ||
		    ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
			trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
					&imap);
			goto done;
617 618
		}

619
		xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
620

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
		/* Trim the mapping to the nearest shared extent boundary. */
		error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
		if (error)
			goto out_unlock;

		/* Not shared?  Just report the (potentially capped) extent. */
		if (!shared) {
			trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
					&imap);
			goto done;
		}

		/*
		 * Fork all the shared blocks from our write offset until the
		 * end of the extent.
		 */
		whichfork = XFS_COW_FORK;
		end_fsb = imap.br_startoff + imap.br_blockcount;
	} else {
		/*
		 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
		 * pages to keep the chunks of work done where somewhat
		 * symmetric with the work writeback does.  This is a completely
		 * arbitrary number pulled out of thin air.
		 *
		 * Note that the values needs to be less than 32-bits wide until
		 * the lower level functions are updated.
		 */
		count = min_t(loff_t, count, 1024 * PAGE_SIZE);
		end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
651 652
	}

653
	error = xfs_qm_dqattach_locked(ip, false);
654 655 656
	if (error)
		goto out_unlock;

657
	if (eof && whichfork == XFS_DATA_FORK) {
658 659
		prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count,
				&icur);
660 661 662
		if (prealloc_blocks) {
			xfs_extlen_t	align;
			xfs_off_t	end_offset;
663
			xfs_fileoff_t	p_end_fsb;
664

665
			end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
666 667
			p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
					prealloc_blocks;
668 669 670

			align = xfs_eof_alignment(ip, 0);
			if (align)
671
				p_end_fsb = roundup_64(p_end_fsb, align);
672

673 674 675
			p_end_fsb = min(p_end_fsb, maxbytes_fsb);
			ASSERT(p_end_fsb > offset_fsb);
			prealloc_blocks = p_end_fsb - end_fsb;
676 677 678 679
		}
	}

retry:
680 681 682 683 684
	error = xfs_bmapi_reserve_delalloc(ip, whichfork, offset_fsb,
			end_fsb - offset_fsb, prealloc_blocks,
			whichfork == XFS_DATA_FORK ? &imap : &cmap,
			whichfork == XFS_DATA_FORK ? &icur : &ccur,
			whichfork == XFS_DATA_FORK ? eof : cow_eof);
685 686
	switch (error) {
	case 0:
687
		break;
D
Dave Chinner 已提交
688 689
	case -ENOSPC:
	case -EDQUOT:
690
		/* retry without any preallocation */
C
Christoph Hellwig 已提交
691
		trace_xfs_delalloc_enospc(ip, offset, count);
692 693
		if (prealloc_blocks) {
			prealloc_blocks = 0;
D
Dave Chinner 已提交
694
			goto retry;
695
		}
696 697 698
		/*FALLTHRU*/
	default:
		goto out_unlock;
L
Linus Torvalds 已提交
699 700
	}

701 702 703 704
	/*
	 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
	 * them out if the write happens to fail.
	 */
705
	iomap->flags |= IOMAP_F_NEW;
706 707
	trace_xfs_iomap_alloc(ip, offset, count, whichfork,
			whichfork == XFS_DATA_FORK ? &imap : &cmap);
708
done:
709 710 711 712
	if (whichfork == XFS_COW_FORK) {
		if (imap.br_startoff > offset_fsb) {
			xfs_trim_extent(&cmap, offset_fsb,
					imap.br_startoff - offset_fsb);
713
			error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
714 715 716 717
			goto out_unlock;
		}
		/* ensure we only report blocks we have a reservation for */
		xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
718
		shared = true;
719
	}
720
	error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
721 722 723
out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
L
Linus Torvalds 已提交
724 725 726 727 728
}

int
xfs_iomap_write_unwritten(
	xfs_inode_t	*ip,
729
	xfs_off_t	offset,
730 731
	xfs_off_t	count,
	bool		update_isize)
L
Linus Torvalds 已提交
732 733 734 735 736
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_filblks_t	count_fsb;
	xfs_filblks_t	numblks_fsb;
737 738 739
	int		nimaps;
	xfs_trans_t	*tp;
	xfs_bmbt_irec_t imap;
740
	struct inode	*inode = VFS_I(ip);
741
	xfs_fsize_t	i_size;
742
	uint		resblks;
L
Linus Torvalds 已提交
743 744
	int		error;

C
Christoph Hellwig 已提交
745
	trace_xfs_unwritten_convert(ip, offset, count);
L
Linus Torvalds 已提交
746 747 748 749 750

	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);

751 752 753 754 755 756 757 758 759 760
	/*
	 * Reserve enough blocks in this transaction for two complete extent
	 * btree splits.  We may be converting the middle part of an unwritten
	 * extent and in this case we will insert two new extents in the btree
	 * each of which could cause a full split.
	 *
	 * This reservation amount will be used in the first call to
	 * xfs_bmbt_split() to select an AG with enough space to satisfy the
	 * rest of the operation.
	 */
761
	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
L
Linus Torvalds 已提交
762

763
	do {
L
Linus Torvalds 已提交
764
		/*
765
		 * Set up a transaction to convert the range of extents
L
Linus Torvalds 已提交
766 767
		 * from unwritten to real. Do allocations in a loop until
		 * we have covered the range passed in.
768
		 *
769 770 771
		 * Note that we can't risk to recursing back into the filesystem
		 * here as we might be asked to write out the same inode that we
		 * complete here and might deadlock on the iolock.
L
Linus Torvalds 已提交
772
		 */
773 774 775
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
				XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
		if (error)
E
Eric Sandeen 已提交
776
			return error;
L
Linus Torvalds 已提交
777 778

		xfs_ilock(ip, XFS_ILOCK_EXCL);
779
		xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
780 781 782 783 784

		/*
		 * Modify the unwritten extent state of the buffer.
		 */
		nimaps = 1;
785
		error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
786 787
					XFS_BMAPI_CONVERT, resblks, &imap,
					&nimaps);
L
Linus Torvalds 已提交
788 789 790
		if (error)
			goto error_on_bmapi_transaction;

791 792 793 794 795 796 797 798
		/*
		 * Log the updated inode size as we go.  We have to be careful
		 * to only log it up to the actual write offset if it is
		 * halfway into a block.
		 */
		i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
		if (i_size > offset + count)
			i_size = offset + count;
799 800
		if (update_isize && i_size > i_size_read(inode))
			i_size_write(inode, i_size);
801 802 803 804 805 806
		i_size = xfs_new_eof(ip, i_size);
		if (i_size) {
			ip->i_d.di_size = i_size;
			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
		}

807
		error = xfs_trans_commit(tp);
L
Linus Torvalds 已提交
808 809
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
		if (error)
E
Eric Sandeen 已提交
810
			return error;
811

812
		if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
813
			return xfs_alert_fsblock_zero(ip, &imap);
L
Linus Torvalds 已提交
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829

		if ((numblks_fsb = imap.br_blockcount) == 0) {
			/*
			 * The numblks_fsb value should always get
			 * smaller, otherwise the loop is stuck.
			 */
			ASSERT(imap.br_blockcount);
			break;
		}
		offset_fsb += numblks_fsb;
		count_fsb -= numblks_fsb;
	} while (count_fsb > 0);

	return 0;

error_on_bmapi_transaction:
830
	xfs_trans_cancel(tp);
L
Linus Torvalds 已提交
831
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
E
Eric Sandeen 已提交
832
	return error;
L
Linus Torvalds 已提交
833
}
834

835 836 837 838 839
static inline bool
imap_needs_alloc(
	struct inode		*inode,
	struct xfs_bmbt_irec	*imap,
	int			nimaps)
840 841 842
{
	return !nimaps ||
		imap->br_startblock == HOLESTARTBLOCK ||
843
		imap->br_startblock == DELAYSTARTBLOCK ||
844
		(IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
845 846
}

847 848 849 850
static inline bool
needs_cow_for_zeroing(
	struct xfs_bmbt_irec	*imap,
	int			nimaps)
851 852 853 854 855 856
{
	return nimaps &&
		imap->br_startblock != HOLESTARTBLOCK &&
		imap->br_state != XFS_EXT_UNWRITTEN;
}

857 858 859 860 861
static int
xfs_ilock_for_iomap(
	struct xfs_inode	*ip,
	unsigned		flags,
	unsigned		*lockmode)
C
Christoph Hellwig 已提交
862
{
863
	unsigned		mode = XFS_ILOCK_SHARED;
864
	bool			is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
865

C
Christoph Hellwig 已提交
866
	/*
867 868
	 * COW writes may allocate delalloc space or convert unwritten COW
	 * extents, so we need to make sure to take the lock exclusively here.
C
Christoph Hellwig 已提交
869
	 */
870
	if (xfs_is_reflink_inode(ip) && is_write) {
871 872 873 874 875 876 877 878
		/*
		 * FIXME: It could still overwrite on unshared extents and not
		 * need allocation.
		 */
		if (flags & IOMAP_NOWAIT)
			return -EAGAIN;
		mode = XFS_ILOCK_EXCL;
	}
879 880

	/*
881 882
	 * Extents not yet cached requires exclusive access, don't block.  This
	 * is an opencoded xfs_ilock_data_map_shared() call but with
883 884
	 * non-blocking behaviour.
	 */
885 886 887 888 889 890
	if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
		if (flags & IOMAP_NOWAIT)
			return -EAGAIN;
		mode = XFS_ILOCK_EXCL;
	}

891
relock:
892 893 894 895 896 897 898
	if (flags & IOMAP_NOWAIT) {
		if (!xfs_ilock_nowait(ip, mode))
			return -EAGAIN;
	} else {
		xfs_ilock(ip, mode);
	}

899 900 901 902 903 904 905 906 907 908 909
	/*
	 * The reflink iflag could have changed since the earlier unlocked
	 * check, so if we got ILOCK_SHARED for a write and but we're now a
	 * reflink inode we have to switch to ILOCK_EXCL and relock.
	 */
	if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
		xfs_iunlock(ip, mode);
		mode = XFS_ILOCK_EXCL;
		goto relock;
	}

910 911
	*lockmode = mode;
	return 0;
C
Christoph Hellwig 已提交
912 913
}

914 915 916 917 918 919 920 921 922 923 924 925 926
static int
xfs_file_iomap_begin(
	struct inode		*inode,
	loff_t			offset,
	loff_t			length,
	unsigned		flags,
	struct iomap		*iomap)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_bmbt_irec	imap;
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			nimaps = 1, error = 0;
927
	bool			shared = false;
928
	unsigned		lockmode;
929 930 931 932

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

933
	if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) &&
C
Christoph Hellwig 已提交
934
			!IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
935
		/* Reserve delalloc blocks for regular writeback. */
936 937
		return xfs_file_iomap_begin_delay(inode, offset, length, flags,
				iomap);
938 939
	}

940 941 942 943 944 945 946 947 948
	/*
	 * Lock the inode in the manner required for the specified operation and
	 * check for as many conditions that would result in blocking as
	 * possible. This removes most of the non-blocking checks from the
	 * mapping code below.
	 */
	error = xfs_ilock_for_iomap(ip, flags, &lockmode);
	if (error)
		return error;
G
Goldwyn Rodrigues 已提交
949

950
	ASSERT(offset <= mp->m_super->s_maxbytes);
951
	if (offset > mp->m_super->s_maxbytes - length)
952 953 954 955 956
		length = mp->m_super->s_maxbytes - offset;
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	end_fsb = XFS_B_TO_FSB(mp, offset + length);

	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
957
			       &nimaps, 0);
958 959
	if (error)
		goto out_unlock;
960

961
	if (flags & IOMAP_REPORT) {
962
		/* Trim the mapping to the nearest shared extent boundary. */
963
		error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
964 965 966 967
		if (error)
			goto out_unlock;
	}

968 969 970 971
	/* Non-modifying mapping requested, so we are done */
	if (!(flags & (IOMAP_WRITE | IOMAP_ZERO)))
		goto out_found;

972 973 974 975 976
	/*
	 * Break shared extents if necessary. Checks for non-blocking IO have
	 * been done up front, so we don't need to do them here.
	 */
	if (xfs_is_reflink_inode(ip)) {
977 978
		struct xfs_bmbt_irec	orig = imap;

979 980 981 982 983
		/* if zeroing doesn't need COW allocation, then we are done. */
		if ((flags & IOMAP_ZERO) &&
		    !needs_cow_for_zeroing(&imap, nimaps))
			goto out_found;

984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
		/* may drop and re-acquire the ilock */
		error = xfs_reflink_allocate_cow(ip, &imap, &shared, &lockmode,
						 flags);
		if (error)
			goto out_unlock;

		/*
		 * For buffered writes we need to report the address of the
		 * previous block (if there was any) so that the higher level
		 * write code can perform read-modify-write operations.  For
		 * direct I/O code, which must be block aligned we need to
		 * report the newly allocated address.
		 */
		if (!(flags & IOMAP_DIRECT) &&
		    orig.br_startblock != HOLESTARTBLOCK)
			imap = orig;
1000 1001 1002

		end_fsb = imap.br_startoff + imap.br_blockcount;
		length = XFS_FSB_TO_B(mp, end_fsb) - offset;
1003 1004
	}

1005 1006 1007
	/* Don't need to allocate over holes when doing zeroing operations. */
	if (flags & IOMAP_ZERO)
		goto out_found;
1008

1009 1010
	if (!imap_needs_alloc(inode, &imap, nimaps))
		goto out_found;
1011

1012 1013 1014 1015
	/* If nowait is set bail since we are going to make allocations. */
	if (flags & IOMAP_NOWAIT) {
		error = -EAGAIN;
		goto out_unlock;
1016 1017
	}

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
	/*
	 * We cap the maximum length we map to a sane size  to keep the chunks
	 * of work done where somewhat symmetric with the work writeback does.
	 * This is a completely arbitrary number pulled out of thin air as a
	 * best guess for initial testing.
	 *
	 * Note that the values needs to be less than 32-bits wide until the
	 * lower level functions are updated.
	 */
	length = min_t(loff_t, length, 1024 * PAGE_SIZE);

	/*
	 * xfs_iomap_write_direct() expects the shared lock. It is unlocked on
	 * return.
	 */
	if (lockmode == XFS_ILOCK_EXCL)
		xfs_ilock_demote(ip, lockmode);
	error = xfs_iomap_write_direct(ip, offset, length, &imap,
			nimaps);
	if (error)
		return error;

1040
	iomap->flags |= IOMAP_F_NEW;
1041
	trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
1042 1043

out_finish:
1044
	return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
1045 1046 1047 1048

out_found:
	ASSERT(nimaps);
	xfs_iunlock(ip, lockmode);
1049
	trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
1050 1051
	goto out_finish;

1052 1053 1054
out_unlock:
	xfs_iunlock(ip, lockmode);
	return error;
1055 1056 1057 1058 1059 1060 1061
}

static int
xfs_file_iomap_end_delalloc(
	struct xfs_inode	*ip,
	loff_t			offset,
	loff_t			length,
1062 1063
	ssize_t			written,
	struct iomap		*iomap)
1064 1065 1066 1067 1068 1069
{
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		start_fsb;
	xfs_fileoff_t		end_fsb;
	int			error = 0;

1070 1071 1072 1073
	/*
	 * Behave as if the write failed if drop writes is enabled. Set the NEW
	 * flag to force delalloc cleanup.
	 */
1074
	if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
1075
		iomap->flags |= IOMAP_F_NEW;
1076
		written = 0;
1077
	}
1078

1079 1080 1081 1082 1083 1084 1085 1086 1087
	/*
	 * start_fsb refers to the first unused block after a short write. If
	 * nothing was written, round offset down to point at the first block in
	 * the range.
	 */
	if (unlikely(!written))
		start_fsb = XFS_B_TO_FSBT(mp, offset);
	else
		start_fsb = XFS_B_TO_FSB(mp, offset + written);
1088 1089 1090
	end_fsb = XFS_B_TO_FSB(mp, offset + length);

	/*
1091 1092
	 * Trim delalloc blocks if they were allocated by this write and we
	 * didn't manage to write the whole range.
1093 1094 1095 1096 1097
	 *
	 * We don't need to care about racing delalloc as we hold i_mutex
	 * across the reserve/allocate/unreserve calls. If there are delalloc
	 * blocks in the range, they are ours.
	 */
1098
	if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
1099 1100 1101
		truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
					 XFS_FSB_TO_B(mp, end_fsb) - 1);

1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
		error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
					       end_fsb - start_fsb);
		if (error && !XFS_FORCED_SHUTDOWN(mp)) {
			xfs_alert(mp, "%s: unable to clean up ino %lld",
				__func__, ip->i_ino);
			return error;
		}
	}

	return 0;
}

static int
xfs_file_iomap_end(
	struct inode		*inode,
	loff_t			offset,
	loff_t			length,
	ssize_t			written,
	unsigned		flags,
	struct iomap		*iomap)
{
	if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
		return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1125
				length, written, iomap);
1126 1127 1128
	return 0;
}

1129
const struct iomap_ops xfs_iomap_ops = {
1130 1131 1132
	.iomap_begin		= xfs_file_iomap_begin,
	.iomap_end		= xfs_file_iomap_end,
};
1133

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
static int
xfs_seek_iomap_begin(
	struct inode		*inode,
	loff_t			offset,
	loff_t			length,
	unsigned		flags,
	struct iomap		*iomap)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + length);
	xfs_fileoff_t		cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
	struct xfs_iext_cursor	icur;
	struct xfs_bmbt_irec	imap, cmap;
	int			error = 0;
	unsigned		lockmode;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	lockmode = xfs_ilock_data_map_shared(ip);
	if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
		error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
		if (error)
			goto out_unlock;
	}

	if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
		/*
		 * If we found a data extent we are done.
		 */
		if (imap.br_startoff <= offset_fsb)
			goto done;
		data_fsb = imap.br_startoff;
	} else {
		/*
		 * Fake a hole until the end of the file.
		 */
		data_fsb = min(XFS_B_TO_FSB(mp, offset + length),
			       XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
	}

	/*
	 * If a COW fork extent covers the hole, report it - capped to the next
	 * data fork extent:
	 */
	if (xfs_inode_has_cow_data(ip) &&
	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
		cow_fsb = cmap.br_startoff;
	if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
		if (data_fsb < cow_fsb + cmap.br_blockcount)
			end_fsb = min(end_fsb, data_fsb);
		xfs_trim_extent(&cmap, offset_fsb, end_fsb);
		error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
		/*
		 * This is a COW extent, so we must probe the page cache
		 * because there could be dirty page cache being backed
		 * by this extent.
		 */
		iomap->type = IOMAP_UNWRITTEN;
		goto out_unlock;
	}

	/*
	 * Else report a hole, capped to the next found data or COW extent.
	 */
	if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
		imap.br_blockcount = cow_fsb - offset_fsb;
	else
		imap.br_blockcount = data_fsb - offset_fsb;
	imap.br_startoff = offset_fsb;
	imap.br_startblock = HOLESTARTBLOCK;
	imap.br_state = XFS_EXT_NORM;
done:
	xfs_trim_extent(&imap, offset_fsb, end_fsb);
	error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
out_unlock:
	xfs_iunlock(ip, lockmode);
	return error;
}

const struct iomap_ops xfs_seek_iomap_ops = {
	.iomap_begin		= xfs_seek_iomap_begin,
};

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
static int
xfs_xattr_iomap_begin(
	struct inode		*inode,
	loff_t			offset,
	loff_t			length,
	unsigned		flags,
	struct iomap		*iomap)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + length);
	struct xfs_bmbt_irec	imap;
	int			nimaps = 1, error = 0;
	unsigned		lockmode;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

1239
	lockmode = xfs_ilock_attr_map_shared(ip);
1240 1241

	/* if there are no attribute fork or extents, return ENOENT */
1242
	if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1243 1244 1245 1246 1247 1248
		error = -ENOENT;
		goto out_unlock;
	}

	ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1249
			       &nimaps, XFS_BMAPI_ATTRFORK);
1250 1251 1252
out_unlock:
	xfs_iunlock(ip, lockmode);

1253 1254 1255 1256
	if (error)
		return error;
	ASSERT(nimaps);
	return xfs_bmbt_to_iomap(ip, iomap, &imap, false);
1257 1258
}

1259
const struct iomap_ops xfs_xattr_iomap_ops = {
1260 1261
	.iomap_begin		= xfs_xattr_iomap_begin,
};