xfs_iomap.c 24.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17 18 19
 */
#include "xfs.h"
#include "xfs_fs.h"
20
#include "xfs_shared.h"
21 22 23
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
24 25 26 27
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
28
#include "xfs_btree.h"
29
#include "xfs_bmap_btree.h"
L
Linus Torvalds 已提交
30
#include "xfs_bmap.h"
D
Dave Chinner 已提交
31
#include "xfs_bmap_util.h"
L
Linus Torvalds 已提交
32
#include "xfs_error.h"
33
#include "xfs_trans.h"
L
Linus Torvalds 已提交
34 35
#include "xfs_trans_space.h"
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
36
#include "xfs_trace.h"
37
#include "xfs_icache.h"
38
#include "xfs_quota.h"
39 40
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
41
#include "xfs_dinode.h"
L
Linus Torvalds 已提交
42 43 44 45 46 47


#define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
						<< mp->m_writeio_log)
#define XFS_WRITE_IMAPS		XFS_BMAP_MAX_NMAP

48 49 50
STATIC int
xfs_iomap_eof_align_last_fsb(
	xfs_mount_t	*mp,
51
	xfs_inode_t	*ip,
52 53 54 55
	xfs_extlen_t	extsize,
	xfs_fileoff_t	*last_fsb)
{
	xfs_fileoff_t	new_last_fsb = 0;
56
	xfs_extlen_t	align = 0;
57 58
	int		eof, error;

59 60 61 62 63 64 65 66 67 68 69 70 71 72
	if (!XFS_IS_REALTIME_INODE(ip)) {
		/*
		 * Round up the allocation request to a stripe unit
		 * (m_dalign) boundary if the file size is >= stripe unit
		 * size, and we are allocating past the allocation eof.
		 *
		 * If mounted with the "-o swalloc" option the alignment is
		 * increased from the strip unit size to the stripe width.
		 */
		if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
			align = mp->m_swidth;
		else if (mp->m_dalign)
			align = mp->m_dalign;

73
		if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align))
74 75
			new_last_fsb = roundup_64(*last_fsb, align);
	}
76 77 78 79 80 81 82 83 84 85 86 87 88 89

	/*
	 * Always round up the allocation request to an extent boundary
	 * (when file on a real-time subvolume or has di_extsize hint).
	 */
	if (extsize) {
		if (new_last_fsb)
			align = roundup_64(new_last_fsb, extsize);
		else
			align = extsize;
		new_last_fsb = roundup_64(*last_fsb, align);
	}

	if (new_last_fsb) {
90
		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
91 92 93 94 95 96 97 98
		if (error)
			return error;
		if (eof)
			*last_fsb = new_last_fsb;
	}
	return 0;
}

99
STATIC int
100
xfs_alert_fsblock_zero(
101 102 103
	xfs_inode_t	*ip,
	xfs_bmbt_irec_t	*imap)
{
104
	xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
105 106
			"Access to block zero in inode %llu "
			"start_block: %llx start_off: %llx "
107
			"blkcnt: %llx extent-state: %x",
108 109 110 111 112 113 114 115
		(unsigned long long)ip->i_ino,
		(unsigned long long)imap->br_startblock,
		(unsigned long long)imap->br_startoff,
		(unsigned long long)imap->br_blockcount,
		imap->br_state);
	return EFSCORRUPTED;
}

C
Christoph Hellwig 已提交
116
int
L
Linus Torvalds 已提交
117 118
xfs_iomap_write_direct(
	xfs_inode_t	*ip,
119
	xfs_off_t	offset,
L
Linus Torvalds 已提交
120
	size_t		count,
121
	xfs_bmbt_irec_t *imap,
122
	int		nmaps)
L
Linus Torvalds 已提交
123 124 125 126
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_fileoff_t	last_fsb;
127
	xfs_filblks_t	count_fsb, resaligned;
L
Linus Torvalds 已提交
128
	xfs_fsblock_t	firstfsb;
129
	xfs_extlen_t	extsz, temp;
130
	int		nimaps;
L
Linus Torvalds 已提交
131
	int		bmapi_flag;
132
	int		quota_flag;
L
Linus Torvalds 已提交
133 134 135
	int		rt;
	xfs_trans_t	*tp;
	xfs_bmap_free_t free_list;
136
	uint		qblocks, resblks, resrtextents;
L
Linus Torvalds 已提交
137
	int		committed;
138
	int		error;
L
Linus Torvalds 已提交
139

140
	error = xfs_qm_dqattach(ip, 0);
L
Linus Torvalds 已提交
141 142 143
	if (error)
		return XFS_ERROR(error);

144
	rt = XFS_IS_REALTIME_INODE(ip);
145
	extsz = xfs_get_extsz_hint(ip);
L
Linus Torvalds 已提交
146

147 148
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
149
	if ((offset + count) > XFS_ISIZE(ip)) {
150
		error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
151
		if (error)
152
			return XFS_ERROR(error);
L
Linus Torvalds 已提交
153
	} else {
154
		if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
155
			last_fsb = MIN(last_fsb, (xfs_fileoff_t)
156 157
					imap->br_blockcount +
					imap->br_startoff);
L
Linus Torvalds 已提交
158
	}
159 160 161 162 163 164 165 166 167 168 169 170 171 172
	count_fsb = last_fsb - offset_fsb;
	ASSERT(count_fsb > 0);

	resaligned = count_fsb;
	if (unlikely(extsz)) {
		if ((temp = do_mod(offset_fsb, extsz)))
			resaligned += temp;
		if ((temp = do_mod(resaligned, extsz)))
			resaligned += extsz - temp;
	}

	if (unlikely(rt)) {
		resrtextents = qblocks = resaligned;
		resrtextents /= mp->m_sb.sb_rextsize;
173 174 175 176
		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
		quota_flag = XFS_QMOPT_RES_RTBLKS;
	} else {
		resrtextents = 0;
177
		resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
178 179
		quota_flag = XFS_QMOPT_RES_REGBLKS;
	}
L
Linus Torvalds 已提交
180 181

	/*
182
	 * Allocate and setup the transaction
L
Linus Torvalds 已提交
183 184
	 */
	tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
185 186
	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
				  resblks, resrtextents);
L
Linus Torvalds 已提交
187
	/*
188
	 * Check for running out of space, note: need lock to return
L
Linus Torvalds 已提交
189
	 */
190
	if (error) {
L
Linus Torvalds 已提交
191
		xfs_trans_cancel(tp, 0);
192 193 194
		return XFS_ERROR(error);
	}

L
Linus Torvalds 已提交
195 196
	xfs_ilock(ip, XFS_ILOCK_EXCL);

C
Christoph Hellwig 已提交
197
	error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
198
	if (error)
199
		goto out_trans_cancel;
L
Linus Torvalds 已提交
200

201
	xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
202

203
	bmapi_flag = 0;
204
	if (offset < XFS_ISIZE(ip) || extsz)
L
Linus Torvalds 已提交
205 206 207
		bmapi_flag |= XFS_BMAPI_PREALLOC;

	/*
208 209
	 * From this point onwards we overwrite the imap pointer that the
	 * caller gave to us.
L
Linus Torvalds 已提交
210
	 */
211
	xfs_bmap_init(&free_list, &firstfsb);
212
	nimaps = 1;
213 214
	error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flag,
				&firstfsb, 0, imap, &nimaps, &free_list);
215
	if (error)
216
		goto out_bmap_cancel;
L
Linus Torvalds 已提交
217 218

	/*
219
	 * Complete the transaction
L
Linus Torvalds 已提交
220
	 */
221
	error = xfs_bmap_finish(&tp, &free_list, &committed);
222
	if (error)
223
		goto out_bmap_cancel;
224
	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
225
	if (error)
226
		goto out_unlock;
L
Linus Torvalds 已提交
227

228 229 230
	/*
	 * Copy any maps to caller's array and return any error.
	 */
L
Linus Torvalds 已提交
231
	if (nimaps == 0) {
232 233
		error = XFS_ERROR(ENOSPC);
		goto out_unlock;
234 235
	}

236
	if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
237
		error = xfs_alert_fsblock_zero(ip, imap);
L
Linus Torvalds 已提交
238

239 240 241
out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
L
Linus Torvalds 已提交
242

243
out_bmap_cancel:
L
Linus Torvalds 已提交
244
	xfs_bmap_cancel(&free_list);
245
	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
246
out_trans_cancel:
L
Linus Torvalds 已提交
247
	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
248
	goto out_unlock;
L
Linus Torvalds 已提交
249 250
}

251
/*
D
Dave Chinner 已提交
252 253 254
 * If the caller is doing a write at the end of the file, then extend the
 * allocation out to the file system's write iosize.  We clean up any extra
 * space left over when the file is closed in xfs_inactive().
255 256 257
 *
 * If we find we already have delalloc preallocation beyond EOF, don't do more
 * preallocation as it it not needed.
258 259 260 261
 */
STATIC int
xfs_iomap_eof_want_preallocate(
	xfs_mount_t	*mp,
262
	xfs_inode_t	*ip,
263 264 265 266 267 268 269 270 271 272
	xfs_off_t	offset,
	size_t		count,
	xfs_bmbt_irec_t *imap,
	int		nimaps,
	int		*prealloc)
{
	xfs_fileoff_t   start_fsb;
	xfs_filblks_t   count_fsb;
	xfs_fsblock_t	firstblock;
	int		n, error, imaps;
273
	int		found_delalloc = 0;
274 275

	*prealloc = 0;
276
	if (offset + count <= XFS_ISIZE(ip))
277 278
		return 0;

279 280 281 282 283 284 285 286 287
	/*
	 * If the file is smaller than the minimum prealloc and we are using
	 * dynamic preallocation, don't do any preallocation at all as it is
	 * likely this is the only write to the file that is going to be done.
	 */
	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
	    XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))
		return 0;

288 289 290 291 292
	/*
	 * If there are any real blocks past eof, then don't
	 * do any speculative allocation.
	 */
	start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
293
	count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
294 295
	while (count_fsb > 0) {
		imaps = nimaps;
296
		firstblock = NULLFSBLOCK;
D
Dave Chinner 已提交
297 298
		error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
				       0);
299 300 301 302 303 304 305 306
		if (error)
			return error;
		for (n = 0; n < imaps; n++) {
			if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
			    (imap[n].br_startblock != DELAYSTARTBLOCK))
				return 0;
			start_fsb += imap[n].br_blockcount;
			count_fsb -= imap[n].br_blockcount;
307 308 309

			if (imap[n].br_startblock == DELAYSTARTBLOCK)
				found_delalloc = 1;
310 311
		}
	}
312 313
	if (!found_delalloc)
		*prealloc = 1;
314 315 316
	return 0;
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
/*
 * Determine the initial size of the preallocation. We are beyond the current
 * EOF here, but we need to take into account whether this is a sparse write or
 * an extending write when determining the preallocation size.  Hence we need to
 * look up the extent that ends at the current write offset and use the result
 * to determine the preallocation size.
 *
 * If the extent is a hole, then preallocation is essentially disabled.
 * Otherwise we take the size of the preceeding data extent as the basis for the
 * preallocation size. If the size of the extent is greater than half the
 * maximum extent length, then use the current offset as the basis. This ensures
 * that for large files the preallocation size always extends to MAXEXTLEN
 * rather than falling short due to things like stripe unit/width alignment of
 * real extents.
 */
332
STATIC xfs_fsblock_t
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
xfs_iomap_eof_prealloc_initial_size(
	struct xfs_mount	*mp,
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	xfs_bmbt_irec_t		*imap,
	int			nimaps)
{
	xfs_fileoff_t   start_fsb;
	int		imaps = 1;
	int		error;

	ASSERT(nimaps >= imaps);

	/* if we are using a specific prealloc size, return now */
	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
		return 0;

350 351 352 353
	/* If the file is small, then use the minimum prealloc */
	if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign))
		return 0;

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
	/*
	 * As we write multiple pages, the offset will always align to the
	 * start of a page and hence point to a hole at EOF. i.e. if the size is
	 * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096)
	 * will return FSB 1. Hence if there are blocks in the file, we want to
	 * point to the block prior to the EOF block and not the hole that maps
	 * directly at @offset.
	 */
	start_fsb = XFS_B_TO_FSB(mp, offset);
	if (start_fsb)
		start_fsb--;
	error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE);
	if (error)
		return 0;

	ASSERT(imaps == 1);
	if (imap[0].br_startblock == HOLESTARTBLOCK)
		return 0;
	if (imap[0].br_blockcount <= (MAXEXTLEN >> 1))
373
		return imap[0].br_blockcount << 1;
374 375 376
	return XFS_B_TO_FSB(mp, offset);
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
STATIC bool
xfs_quota_need_throttle(
	struct xfs_inode *ip,
	int type,
	xfs_fsblock_t alloc_blocks)
{
	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);

	if (!dq || !xfs_this_quota_on(ip->i_mount, type))
		return false;

	/* no hi watermark, no throttle */
	if (!dq->q_prealloc_hi_wmark)
		return false;

	/* under the lo watermark, no throttle */
	if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
		return false;

	return true;
}

STATIC void
xfs_quota_calc_throttle(
	struct xfs_inode *ip,
	int type,
	xfs_fsblock_t *qblocks,
	int *qshift)
{
	int64_t freesp;
	int shift = 0;
	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);

	/* over hi wmark, squash the prealloc completely */
	if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
		*qblocks = 0;
		return;
	}

	freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
	if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
		shift = 2;
		if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
			shift += 2;
		if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
			shift += 2;
	}

	/* only overwrite the throttle values if we are more aggressive */
	if ((freesp >> shift) < (*qblocks >> *qshift)) {
		*qblocks = freesp;
		*qshift = shift;
	}
}

432 433 434 435 436 437 438 439 440
/*
 * If we don't have a user specified preallocation size, dynamically increase
 * the preallocation size as the size of the file grows. Cap the maximum size
 * at a single extent or less if the filesystem is near full. The closer the
 * filesystem is to full, the smaller the maximum prealocation.
 */
STATIC xfs_fsblock_t
xfs_iomap_prealloc_size(
	struct xfs_mount	*mp,
441 442 443 444
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	struct xfs_bmbt_irec	*imap,
	int			nimaps)
445 446
{
	xfs_fsblock_t		alloc_blocks = 0;
447 448
	int			shift = 0;
	int64_t			freesp;
449 450
	xfs_fsblock_t		qblocks;
	int			qshift = 0;
451

452 453
	alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset,
							   imap, nimaps);
454 455
	if (!alloc_blocks)
		goto check_writeio;
456
	qblocks = alloc_blocks;
457

458 459 460 461 462 463 464 465 466
	/*
	 * MAXEXTLEN is not a power of two value but we round the prealloc down
	 * to the nearest power of two value after throttling. To prevent the
	 * round down from unconditionally reducing the maximum supported prealloc
	 * size, we round up first, apply appropriate throttling, round down and
	 * cap the value to MAXEXTLEN.
	 */
	alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
				       alloc_blocks);
467 468 469 470 471 472 473 474 475 476 477 478 479

	xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
	freesp = mp->m_sb.sb_fdblocks;
	if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
		shift = 2;
		if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
			shift++;
480
	}
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502

	/*
	 * Check each quota to cap the prealloc size and provide a shift
	 * value to throttle with.
	 */
	if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
		xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift);
	if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
		xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift);
	if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
		xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift);

	/*
	 * The final prealloc size is set to the minimum of free space available
	 * in each of the quotas and the overall filesystem.
	 *
	 * The shift throttle value is set to the maximum value as determined by
	 * the global low free space values and per-quota low free space values.
	 */
	alloc_blocks = MIN(alloc_blocks, qblocks);
	shift = MAX(shift, qshift);

503 504
	if (shift)
		alloc_blocks >>= shift;
505 506 507 508 509 510 511 512
	/*
	 * rounddown_pow_of_two() returns an undefined result if we pass in
	 * alloc_blocks = 0.
	 */
	if (alloc_blocks)
		alloc_blocks = rounddown_pow_of_two(alloc_blocks);
	if (alloc_blocks > MAXEXTLEN)
		alloc_blocks = MAXEXTLEN;
513 514 515 516 517 518 519 520 521

	/*
	 * If we are still trying to allocate more space than is
	 * available, squash the prealloc hard. This can happen if we
	 * have a large file on a small filesystem and the above
	 * lowspace thresholds are smaller than MAXEXTLEN.
	 */
	while (alloc_blocks && alloc_blocks >= freesp)
		alloc_blocks >>= 4;
522

523
check_writeio:
524 525 526
	if (alloc_blocks < mp->m_writeio_blocks)
		alloc_blocks = mp->m_writeio_blocks;

527 528 529
	trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
				      mp->m_writeio_blocks);

530 531 532
	return alloc_blocks;
}

C
Christoph Hellwig 已提交
533
int
L
Linus Torvalds 已提交
534 535
xfs_iomap_write_delay(
	xfs_inode_t	*ip,
536
	xfs_off_t	offset,
L
Linus Torvalds 已提交
537
	size_t		count,
538
	xfs_bmbt_irec_t *ret_imap)
L
Linus Torvalds 已提交
539 540 541 542
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_fileoff_t	last_fsb;
543 544 545
	xfs_off_t	aligned_offset;
	xfs_fileoff_t	ioalign;
	xfs_extlen_t	extsz;
L
Linus Torvalds 已提交
546 547
	int		nimaps;
	xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
D
Dave Chinner 已提交
548
	int		prealloc;
549
	int		error;
L
Linus Torvalds 已提交
550

C
Christoph Hellwig 已提交
551
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
L
Linus Torvalds 已提交
552 553 554 555 556

	/*
	 * Make sure that the dquots are there. This doesn't hold
	 * the ilock across a disk read.
	 */
C
Christoph Hellwig 已提交
557
	error = xfs_qm_dqattach_locked(ip, 0);
L
Linus Torvalds 已提交
558 559 560
	if (error)
		return XFS_ERROR(error);

561
	extsz = xfs_get_extsz_hint(ip);
562 563
	offset_fsb = XFS_B_TO_FSBT(mp, offset);

564
	error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
565
				imap, XFS_WRITE_IMAPS, &prealloc);
566 567
	if (error)
		return error;
L
Linus Torvalds 已提交
568

D
Dave Chinner 已提交
569
retry:
570
	if (prealloc) {
571 572 573 574
		xfs_fsblock_t	alloc_blocks;

		alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap,
						       XFS_WRITE_IMAPS);
575

L
Linus Torvalds 已提交
576 577
		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
578
		last_fsb = ioalign + alloc_blocks;
579 580
	} else {
		last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
L
Linus Torvalds 已提交
581 582
	}

583
	if (prealloc || extsz) {
584
		error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
585
		if (error)
L
Linus Torvalds 已提交
586 587
			return error;
	}
588

589 590 591 592
	/*
	 * Make sure preallocation does not create extents beyond the range we
	 * actually support in this filesystem.
	 */
D
Dave Chinner 已提交
593 594
	if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
		last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
595 596 597

	ASSERT(last_fsb > offset_fsb);

598
	nimaps = XFS_WRITE_IMAPS;
599 600
	error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
				imap, &nimaps, XFS_BMAPI_ENTIRE);
601 602 603 604 605 606
	switch (error) {
	case 0:
	case ENOSPC:
	case EDQUOT:
		break;
	default:
L
Linus Torvalds 已提交
607
		return XFS_ERROR(error);
608
	}
609

L
Linus Torvalds 已提交
610
	/*
D
Dave Chinner 已提交
611
	 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry
612
	 * without EOF preallocation.
L
Linus Torvalds 已提交
613 614
	 */
	if (nimaps == 0) {
C
Christoph Hellwig 已提交
615
		trace_xfs_delalloc_enospc(ip, offset, count);
D
Dave Chinner 已提交
616 617 618 619
		if (prealloc) {
			prealloc = 0;
			error = 0;
			goto retry;
620
		}
D
Dave Chinner 已提交
621
		return XFS_ERROR(error ? error : ENOSPC);
L
Linus Torvalds 已提交
622 623
	}

624
	if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
625
		return xfs_alert_fsblock_zero(ip, &imap[0]);
626

627 628 629 630 631 632 633
	/*
	 * Tag the inode as speculatively preallocated so we can reclaim this
	 * space on demand, if necessary.
	 */
	if (prealloc)
		xfs_inode_set_eofblocks_tag(ip);

634
	*ret_imap = imap[0];
L
Linus Torvalds 已提交
635 636 637 638 639 640 641 642 643
	return 0;
}

/*
 * Pass in a delayed allocate extent, convert it to real extents;
 * return to the caller the extent we create which maps on top of
 * the originating callers request.
 *
 * Called without a lock on the inode.
644 645 646
 *
 * We no longer bother to look at the incoming map - all we have to
 * guarantee is that whatever we allocate fills the required range.
L
Linus Torvalds 已提交
647
 */
C
Christoph Hellwig 已提交
648
int
L
Linus Torvalds 已提交
649 650
xfs_iomap_write_allocate(
	xfs_inode_t	*ip,
651
	xfs_off_t	offset,
652
	xfs_bmbt_irec_t *imap)
L
Linus Torvalds 已提交
653 654 655 656 657 658 659 660
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb, last_block;
	xfs_fileoff_t	end_fsb, map_start_fsb;
	xfs_fsblock_t	first_block;
	xfs_bmap_free_t	free_list;
	xfs_filblks_t	count_fsb;
	xfs_trans_t	*tp;
661
	int		nimaps, committed;
L
Linus Torvalds 已提交
662 663 664 665 666 667
	int		error = 0;
	int		nres;

	/*
	 * Make sure that the dquots are there.
	 */
C
Christoph Hellwig 已提交
668 669
	error = xfs_qm_dqattach(ip, 0);
	if (error)
L
Linus Torvalds 已提交
670 671
		return XFS_ERROR(error);

672
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
673 674
	count_fsb = imap->br_blockcount;
	map_start_fsb = imap->br_startoff;
L
Linus Torvalds 已提交
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690

	XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));

	while (count_fsb != 0) {
		/*
		 * Set up a transaction with which to allocate the
		 * backing store for the file.  Do allocations in a
		 * loop until we get some space in the range we are
		 * interested in.  The other space that might be allocated
		 * is in the delayed allocation extent on which we sit
		 * but before our buffer starts.
		 */

		nimaps = 0;
		while (nimaps == 0) {
			tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
691
			tp->t_flags |= XFS_TRANS_RESERVE;
L
Linus Torvalds 已提交
692
			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
693 694
			error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
						  nres, 0);
L
Linus Torvalds 已提交
695 696 697 698 699
			if (error) {
				xfs_trans_cancel(tp, 0);
				return XFS_ERROR(error);
			}
			xfs_ilock(ip, XFS_ILOCK_EXCL);
700
			xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
701

702
			xfs_bmap_init(&free_list, &first_block);
L
Linus Torvalds 已提交
703 704

			/*
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
			 * it is possible that the extents have changed since
			 * we did the read call as we dropped the ilock for a
			 * while. We have to be careful about truncates or hole
			 * punchs here - we are not allowed to allocate
			 * non-delalloc blocks here.
			 *
			 * The only protection against truncation is the pages
			 * for the range we are being asked to convert are
			 * locked and hence a truncate will block on them
			 * first.
			 *
			 * As a result, if we go beyond the range we really
			 * need and hit an delalloc extent boundary followed by
			 * a hole while we have excess blocks in the map, we
			 * will fill the hole incorrectly and overrun the
			 * transaction reservation.
			 *
			 * Using a single map prevents this as we are forced to
			 * check each map we look for overlap with the desired
			 * range and abort as soon as we find it. Also, given
			 * that we only return a single map, having one beyond
			 * what we can return is probably a bit silly.
			 *
			 * We also need to check that we don't go beyond EOF;
			 * this is a truncate optimisation as a truncate sets
			 * the new file size before block on the pages we
			 * currently have locked under writeback. Because they
			 * are about to be tossed, we don't need to write them
			 * back....
L
Linus Torvalds 已提交
734
			 */
735
			nimaps = 1;
736
			end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
737 738 739 740 741
			error = xfs_bmap_last_offset(NULL, ip, &last_block,
							XFS_DATA_FORK);
			if (error)
				goto trans_cancel;

L
Linus Torvalds 已提交
742 743 744 745 746 747 748 749 750
			last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
			if ((map_start_fsb + count_fsb) > last_block) {
				count_fsb = last_block - map_start_fsb;
				if (count_fsb == 0) {
					error = EAGAIN;
					goto trans_cancel;
				}
			}

751 752 753 754
			/*
			 * From this point onwards we overwrite the imap
			 * pointer that the caller gave to us.
			 */
755
			error = xfs_bmapi_write(tp, ip, map_start_fsb,
756 757 758
						count_fsb,
						XFS_BMAPI_STACK_SWITCH,
						&first_block, 1,
759
						imap, &nimaps, &free_list);
L
Linus Torvalds 已提交
760 761 762
			if (error)
				goto trans_cancel;

763
			error = xfs_bmap_finish(&tp, &free_list, &committed);
L
Linus Torvalds 已提交
764 765 766
			if (error)
				goto trans_cancel;

767
			error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
L
Linus Torvalds 已提交
768 769 770 771 772 773 774 775 776 777
			if (error)
				goto error0;

			xfs_iunlock(ip, XFS_ILOCK_EXCL);
		}

		/*
		 * See if we were able to allocate an extent that
		 * covers at least part of the callers request
		 */
778
		if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
779
			return xfs_alert_fsblock_zero(ip, imap);
780

781 782 783
		if ((offset_fsb >= imap->br_startoff) &&
		    (offset_fsb < (imap->br_startoff +
				   imap->br_blockcount))) {
784 785
			XFS_STATS_INC(xs_xstrat_quick);
			return 0;
L
Linus Torvalds 已提交
786 787
		}

788 789
		/*
		 * So far we have not mapped the requested part of the
L
Linus Torvalds 已提交
790 791
		 * file, just surrounding data, try again.
		 */
792 793
		count_fsb -= imap->br_blockcount;
		map_start_fsb = imap->br_startoff + imap->br_blockcount;
L
Linus Torvalds 已提交
794 795 796 797 798 799 800 801 802 803 804 805 806
	}

trans_cancel:
	xfs_bmap_cancel(&free_list);
	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
error0:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return XFS_ERROR(error);
}

int
xfs_iomap_write_unwritten(
	xfs_inode_t	*ip,
807
	xfs_off_t	offset,
L
Linus Torvalds 已提交
808 809 810 811 812 813
	size_t		count)
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_filblks_t	count_fsb;
	xfs_filblks_t	numblks_fsb;
814 815 816 817 818
	xfs_fsblock_t	firstfsb;
	int		nimaps;
	xfs_trans_t	*tp;
	xfs_bmbt_irec_t imap;
	xfs_bmap_free_t free_list;
819
	xfs_fsize_t	i_size;
820
	uint		resblks;
L
Linus Torvalds 已提交
821 822 823
	int		committed;
	int		error;

C
Christoph Hellwig 已提交
824
	trace_xfs_unwritten_convert(ip, offset, count);
L
Linus Torvalds 已提交
825 826 827 828 829

	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);

830 831 832 833 834 835 836 837 838 839
	/*
	 * Reserve enough blocks in this transaction for two complete extent
	 * btree splits.  We may be converting the middle part of an unwritten
	 * extent and in this case we will insert two new extents in the btree
	 * each of which could cause a full split.
	 *
	 * This reservation amount will be used in the first call to
	 * xfs_bmbt_split() to select an AG with enough space to satisfy the
	 * rest of the operation.
	 */
840
	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
L
Linus Torvalds 已提交
841

842
	do {
L
Linus Torvalds 已提交
843 844 845 846
		/*
		 * set up a transaction to convert the range of extents
		 * from unwritten to real. Do allocations in a loop until
		 * we have covered the range passed in.
847 848 849 850 851 852
		 *
		 * Note that we open code the transaction allocation here
		 * to pass KM_NOFS--we can't risk to recursing back into
		 * the filesystem here as we might be asked to write out
		 * the same inode that we complete here and might deadlock
		 * on the iolock.
L
Linus Torvalds 已提交
853
		 */
J
Jan Kara 已提交
854
		sb_start_intwrite(mp->m_super);
855
		tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
J
Jan Kara 已提交
856
		tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT;
857 858
		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
					  resblks, 0);
L
Linus Torvalds 已提交
859 860
		if (error) {
			xfs_trans_cancel(tp, 0);
861
			return XFS_ERROR(error);
L
Linus Torvalds 已提交
862 863 864
		}

		xfs_ilock(ip, XFS_ILOCK_EXCL);
865
		xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
866 867 868 869

		/*
		 * Modify the unwritten extent state of the buffer.
		 */
870
		xfs_bmap_init(&free_list, &firstfsb);
L
Linus Torvalds 已提交
871
		nimaps = 1;
872 873
		error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
				  XFS_BMAPI_CONVERT, &firstfsb,
874
				  1, &imap, &nimaps, &free_list);
L
Linus Torvalds 已提交
875 876 877
		if (error)
			goto error_on_bmapi_transaction;

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
		/*
		 * Log the updated inode size as we go.  We have to be careful
		 * to only log it up to the actual write offset if it is
		 * halfway into a block.
		 */
		i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
		if (i_size > offset + count)
			i_size = offset + count;

		i_size = xfs_new_eof(ip, i_size);
		if (i_size) {
			ip->i_d.di_size = i_size;
			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
		}

		error = xfs_bmap_finish(&tp, &free_list, &committed);
L
Linus Torvalds 已提交
894 895 896
		if (error)
			goto error_on_bmapi_transaction;

897
		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
L
Linus Torvalds 已提交
898 899
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
		if (error)
900 901
			return XFS_ERROR(error);

902
		if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
903
			return xfs_alert_fsblock_zero(ip, &imap);
L
Linus Torvalds 已提交
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924

		if ((numblks_fsb = imap.br_blockcount) == 0) {
			/*
			 * The numblks_fsb value should always get
			 * smaller, otherwise the loop is stuck.
			 */
			ASSERT(imap.br_blockcount);
			break;
		}
		offset_fsb += numblks_fsb;
		count_fsb -= numblks_fsb;
	} while (count_fsb > 0);

	return 0;

error_on_bmapi_transaction:
	xfs_bmap_cancel(&free_list);
	xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return XFS_ERROR(error);
}