xfs_iomap.c 24.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17 18 19
 */
#include "xfs.h"
#include "xfs_fs.h"
20
#include "xfs_shared.h"
21 22 23
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
24 25 26 27
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
28
#include "xfs_btree.h"
29
#include "xfs_bmap_btree.h"
L
Linus Torvalds 已提交
30
#include "xfs_bmap.h"
D
Dave Chinner 已提交
31
#include "xfs_bmap_util.h"
L
Linus Torvalds 已提交
32
#include "xfs_error.h"
33
#include "xfs_trans.h"
L
Linus Torvalds 已提交
34 35
#include "xfs_trans_space.h"
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
36
#include "xfs_trace.h"
37
#include "xfs_icache.h"
38
#include "xfs_quota.h"
39 40
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
41
#include "xfs_dinode.h"
L
Linus Torvalds 已提交
42 43 44 45 46 47


#define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
						<< mp->m_writeio_log)
#define XFS_WRITE_IMAPS		XFS_BMAP_MAX_NMAP

48 49 50
STATIC int
xfs_iomap_eof_align_last_fsb(
	xfs_mount_t	*mp,
51
	xfs_inode_t	*ip,
52 53 54 55
	xfs_extlen_t	extsize,
	xfs_fileoff_t	*last_fsb)
{
	xfs_fileoff_t	new_last_fsb = 0;
56
	xfs_extlen_t	align = 0;
57 58
	int		eof, error;

59 60 61 62 63 64 65 66 67 68 69 70 71 72
	if (!XFS_IS_REALTIME_INODE(ip)) {
		/*
		 * Round up the allocation request to a stripe unit
		 * (m_dalign) boundary if the file size is >= stripe unit
		 * size, and we are allocating past the allocation eof.
		 *
		 * If mounted with the "-o swalloc" option the alignment is
		 * increased from the strip unit size to the stripe width.
		 */
		if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
			align = mp->m_swidth;
		else if (mp->m_dalign)
			align = mp->m_dalign;

73
		if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align))
74 75
			new_last_fsb = roundup_64(*last_fsb, align);
	}
76 77 78 79 80 81 82 83 84 85 86 87 88 89

	/*
	 * Always round up the allocation request to an extent boundary
	 * (when file on a real-time subvolume or has di_extsize hint).
	 */
	if (extsize) {
		if (new_last_fsb)
			align = roundup_64(new_last_fsb, extsize);
		else
			align = extsize;
		new_last_fsb = roundup_64(*last_fsb, align);
	}

	if (new_last_fsb) {
90
		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
91 92 93 94 95 96 97 98
		if (error)
			return error;
		if (eof)
			*last_fsb = new_last_fsb;
	}
	return 0;
}

99
STATIC int
100
xfs_alert_fsblock_zero(
101 102 103
	xfs_inode_t	*ip,
	xfs_bmbt_irec_t	*imap)
{
104
	xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
105 106
			"Access to block zero in inode %llu "
			"start_block: %llx start_off: %llx "
107
			"blkcnt: %llx extent-state: %x",
108 109 110 111 112 113 114 115
		(unsigned long long)ip->i_ino,
		(unsigned long long)imap->br_startblock,
		(unsigned long long)imap->br_startoff,
		(unsigned long long)imap->br_blockcount,
		imap->br_state);
	return EFSCORRUPTED;
}

C
Christoph Hellwig 已提交
116
int
L
Linus Torvalds 已提交
117 118
xfs_iomap_write_direct(
	xfs_inode_t	*ip,
119
	xfs_off_t	offset,
L
Linus Torvalds 已提交
120
	size_t		count,
121
	xfs_bmbt_irec_t *imap,
122
	int		nmaps)
L
Linus Torvalds 已提交
123 124 125 126
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_fileoff_t	last_fsb;
127
	xfs_filblks_t	count_fsb, resaligned;
L
Linus Torvalds 已提交
128
	xfs_fsblock_t	firstfsb;
129
	xfs_extlen_t	extsz, temp;
130
	int		nimaps;
131
	int		quota_flag;
L
Linus Torvalds 已提交
132 133 134
	int		rt;
	xfs_trans_t	*tp;
	xfs_bmap_free_t free_list;
135
	uint		qblocks, resblks, resrtextents;
L
Linus Torvalds 已提交
136
	int		committed;
137
	int		error;
L
Linus Torvalds 已提交
138

139
	error = xfs_qm_dqattach(ip, 0);
L
Linus Torvalds 已提交
140 141 142
	if (error)
		return XFS_ERROR(error);

143
	rt = XFS_IS_REALTIME_INODE(ip);
144
	extsz = xfs_get_extsz_hint(ip);
L
Linus Torvalds 已提交
145

146 147
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
148
	if ((offset + count) > XFS_ISIZE(ip)) {
149
		error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
150
		if (error)
151
			return XFS_ERROR(error);
L
Linus Torvalds 已提交
152
	} else {
153
		if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
154
			last_fsb = MIN(last_fsb, (xfs_fileoff_t)
155 156
					imap->br_blockcount +
					imap->br_startoff);
L
Linus Torvalds 已提交
157
	}
158 159 160 161 162 163 164 165 166 167 168 169 170 171
	count_fsb = last_fsb - offset_fsb;
	ASSERT(count_fsb > 0);

	resaligned = count_fsb;
	if (unlikely(extsz)) {
		if ((temp = do_mod(offset_fsb, extsz)))
			resaligned += temp;
		if ((temp = do_mod(resaligned, extsz)))
			resaligned += extsz - temp;
	}

	if (unlikely(rt)) {
		resrtextents = qblocks = resaligned;
		resrtextents /= mp->m_sb.sb_rextsize;
172 173 174 175
		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
		quota_flag = XFS_QMOPT_RES_RTBLKS;
	} else {
		resrtextents = 0;
176
		resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
177 178
		quota_flag = XFS_QMOPT_RES_REGBLKS;
	}
L
Linus Torvalds 已提交
179 180

	/*
181
	 * Allocate and setup the transaction
L
Linus Torvalds 已提交
182 183
	 */
	tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
184 185
	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
				  resblks, resrtextents);
L
Linus Torvalds 已提交
186
	/*
187
	 * Check for running out of space, note: need lock to return
L
Linus Torvalds 已提交
188
	 */
189
	if (error) {
L
Linus Torvalds 已提交
190
		xfs_trans_cancel(tp, 0);
191 192 193
		return XFS_ERROR(error);
	}

L
Linus Torvalds 已提交
194 195
	xfs_ilock(ip, XFS_ILOCK_EXCL);

C
Christoph Hellwig 已提交
196
	error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
197
	if (error)
198
		goto out_trans_cancel;
L
Linus Torvalds 已提交
199

200
	xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
201 202

	/*
203 204
	 * From this point onwards we overwrite the imap pointer that the
	 * caller gave to us.
L
Linus Torvalds 已提交
205
	 */
206
	xfs_bmap_init(&free_list, &firstfsb);
207
	nimaps = 1;
208 209 210
	error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
				XFS_BMAPI_PREALLOC, &firstfsb, 0,
				imap, &nimaps, &free_list);
211
	if (error)
212
		goto out_bmap_cancel;
L
Linus Torvalds 已提交
213 214

	/*
215
	 * Complete the transaction
L
Linus Torvalds 已提交
216
	 */
217
	error = xfs_bmap_finish(&tp, &free_list, &committed);
218
	if (error)
219
		goto out_bmap_cancel;
220
	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
221
	if (error)
222
		goto out_unlock;
L
Linus Torvalds 已提交
223

224 225 226
	/*
	 * Copy any maps to caller's array and return any error.
	 */
L
Linus Torvalds 已提交
227
	if (nimaps == 0) {
228 229
		error = XFS_ERROR(ENOSPC);
		goto out_unlock;
230 231
	}

232
	if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
233
		error = xfs_alert_fsblock_zero(ip, imap);
L
Linus Torvalds 已提交
234

235 236 237
out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
L
Linus Torvalds 已提交
238

239
out_bmap_cancel:
L
Linus Torvalds 已提交
240
	xfs_bmap_cancel(&free_list);
241
	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
242
out_trans_cancel:
L
Linus Torvalds 已提交
243
	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
244
	goto out_unlock;
L
Linus Torvalds 已提交
245 246
}

247
/*
D
Dave Chinner 已提交
248 249 250
 * If the caller is doing a write at the end of the file, then extend the
 * allocation out to the file system's write iosize.  We clean up any extra
 * space left over when the file is closed in xfs_inactive().
251 252 253
 *
 * If we find we already have delalloc preallocation beyond EOF, don't do more
 * preallocation as it it not needed.
254 255 256 257
 */
STATIC int
xfs_iomap_eof_want_preallocate(
	xfs_mount_t	*mp,
258
	xfs_inode_t	*ip,
259 260 261 262 263 264 265 266 267 268
	xfs_off_t	offset,
	size_t		count,
	xfs_bmbt_irec_t *imap,
	int		nimaps,
	int		*prealloc)
{
	xfs_fileoff_t   start_fsb;
	xfs_filblks_t   count_fsb;
	xfs_fsblock_t	firstblock;
	int		n, error, imaps;
269
	int		found_delalloc = 0;
270 271

	*prealloc = 0;
272
	if (offset + count <= XFS_ISIZE(ip))
273 274
		return 0;

275 276 277 278 279 280 281 282 283
	/*
	 * If the file is smaller than the minimum prealloc and we are using
	 * dynamic preallocation, don't do any preallocation at all as it is
	 * likely this is the only write to the file that is going to be done.
	 */
	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
	    XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))
		return 0;

284 285 286 287 288
	/*
	 * If there are any real blocks past eof, then don't
	 * do any speculative allocation.
	 */
	start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
289
	count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
290 291
	while (count_fsb > 0) {
		imaps = nimaps;
292
		firstblock = NULLFSBLOCK;
D
Dave Chinner 已提交
293 294
		error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
				       0);
295 296 297 298 299 300 301 302
		if (error)
			return error;
		for (n = 0; n < imaps; n++) {
			if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
			    (imap[n].br_startblock != DELAYSTARTBLOCK))
				return 0;
			start_fsb += imap[n].br_blockcount;
			count_fsb -= imap[n].br_blockcount;
303 304 305

			if (imap[n].br_startblock == DELAYSTARTBLOCK)
				found_delalloc = 1;
306 307
		}
	}
308 309
	if (!found_delalloc)
		*prealloc = 1;
310 311 312
	return 0;
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/*
 * Determine the initial size of the preallocation. We are beyond the current
 * EOF here, but we need to take into account whether this is a sparse write or
 * an extending write when determining the preallocation size.  Hence we need to
 * look up the extent that ends at the current write offset and use the result
 * to determine the preallocation size.
 *
 * If the extent is a hole, then preallocation is essentially disabled.
 * Otherwise we take the size of the preceeding data extent as the basis for the
 * preallocation size. If the size of the extent is greater than half the
 * maximum extent length, then use the current offset as the basis. This ensures
 * that for large files the preallocation size always extends to MAXEXTLEN
 * rather than falling short due to things like stripe unit/width alignment of
 * real extents.
 */
328
STATIC xfs_fsblock_t
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
xfs_iomap_eof_prealloc_initial_size(
	struct xfs_mount	*mp,
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	xfs_bmbt_irec_t		*imap,
	int			nimaps)
{
	xfs_fileoff_t   start_fsb;
	int		imaps = 1;
	int		error;

	ASSERT(nimaps >= imaps);

	/* if we are using a specific prealloc size, return now */
	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
		return 0;

346 347 348 349
	/* If the file is small, then use the minimum prealloc */
	if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign))
		return 0;

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	/*
	 * As we write multiple pages, the offset will always align to the
	 * start of a page and hence point to a hole at EOF. i.e. if the size is
	 * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096)
	 * will return FSB 1. Hence if there are blocks in the file, we want to
	 * point to the block prior to the EOF block and not the hole that maps
	 * directly at @offset.
	 */
	start_fsb = XFS_B_TO_FSB(mp, offset);
	if (start_fsb)
		start_fsb--;
	error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE);
	if (error)
		return 0;

	ASSERT(imaps == 1);
	if (imap[0].br_startblock == HOLESTARTBLOCK)
		return 0;
	if (imap[0].br_blockcount <= (MAXEXTLEN >> 1))
369
		return imap[0].br_blockcount << 1;
370 371 372
	return XFS_B_TO_FSB(mp, offset);
}

373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
STATIC bool
xfs_quota_need_throttle(
	struct xfs_inode *ip,
	int type,
	xfs_fsblock_t alloc_blocks)
{
	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);

	if (!dq || !xfs_this_quota_on(ip->i_mount, type))
		return false;

	/* no hi watermark, no throttle */
	if (!dq->q_prealloc_hi_wmark)
		return false;

	/* under the lo watermark, no throttle */
	if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
		return false;

	return true;
}

STATIC void
xfs_quota_calc_throttle(
	struct xfs_inode *ip,
	int type,
	xfs_fsblock_t *qblocks,
	int *qshift)
{
	int64_t freesp;
	int shift = 0;
	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);

	/* over hi wmark, squash the prealloc completely */
	if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
		*qblocks = 0;
		return;
	}

	freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
	if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
		shift = 2;
		if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
			shift += 2;
		if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
			shift += 2;
	}

	/* only overwrite the throttle values if we are more aggressive */
	if ((freesp >> shift) < (*qblocks >> *qshift)) {
		*qblocks = freesp;
		*qshift = shift;
	}
}

428 429 430 431 432 433 434 435 436
/*
 * If we don't have a user specified preallocation size, dynamically increase
 * the preallocation size as the size of the file grows. Cap the maximum size
 * at a single extent or less if the filesystem is near full. The closer the
 * filesystem is to full, the smaller the maximum prealocation.
 */
STATIC xfs_fsblock_t
xfs_iomap_prealloc_size(
	struct xfs_mount	*mp,
437 438 439 440
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	struct xfs_bmbt_irec	*imap,
	int			nimaps)
441 442
{
	xfs_fsblock_t		alloc_blocks = 0;
443 444
	int			shift = 0;
	int64_t			freesp;
445 446
	xfs_fsblock_t		qblocks;
	int			qshift = 0;
447

448 449
	alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset,
							   imap, nimaps);
450 451
	if (!alloc_blocks)
		goto check_writeio;
452
	qblocks = alloc_blocks;
453

454 455 456 457 458 459 460 461 462
	/*
	 * MAXEXTLEN is not a power of two value but we round the prealloc down
	 * to the nearest power of two value after throttling. To prevent the
	 * round down from unconditionally reducing the maximum supported prealloc
	 * size, we round up first, apply appropriate throttling, round down and
	 * cap the value to MAXEXTLEN.
	 */
	alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
				       alloc_blocks);
463 464 465 466 467 468 469 470 471 472 473 474 475

	xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
	freesp = mp->m_sb.sb_fdblocks;
	if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
		shift = 2;
		if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
			shift++;
		if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
			shift++;
476
	}
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498

	/*
	 * Check each quota to cap the prealloc size and provide a shift
	 * value to throttle with.
	 */
	if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
		xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift);
	if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
		xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift);
	if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
		xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift);

	/*
	 * The final prealloc size is set to the minimum of free space available
	 * in each of the quotas and the overall filesystem.
	 *
	 * The shift throttle value is set to the maximum value as determined by
	 * the global low free space values and per-quota low free space values.
	 */
	alloc_blocks = MIN(alloc_blocks, qblocks);
	shift = MAX(shift, qshift);

499 500
	if (shift)
		alloc_blocks >>= shift;
501 502 503 504 505 506 507 508
	/*
	 * rounddown_pow_of_two() returns an undefined result if we pass in
	 * alloc_blocks = 0.
	 */
	if (alloc_blocks)
		alloc_blocks = rounddown_pow_of_two(alloc_blocks);
	if (alloc_blocks > MAXEXTLEN)
		alloc_blocks = MAXEXTLEN;
509 510 511 512 513 514 515 516 517

	/*
	 * If we are still trying to allocate more space than is
	 * available, squash the prealloc hard. This can happen if we
	 * have a large file on a small filesystem and the above
	 * lowspace thresholds are smaller than MAXEXTLEN.
	 */
	while (alloc_blocks && alloc_blocks >= freesp)
		alloc_blocks >>= 4;
518

519
check_writeio:
520 521 522
	if (alloc_blocks < mp->m_writeio_blocks)
		alloc_blocks = mp->m_writeio_blocks;

523 524 525
	trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
				      mp->m_writeio_blocks);

526 527 528
	return alloc_blocks;
}

C
Christoph Hellwig 已提交
529
int
L
Linus Torvalds 已提交
530 531
xfs_iomap_write_delay(
	xfs_inode_t	*ip,
532
	xfs_off_t	offset,
L
Linus Torvalds 已提交
533
	size_t		count,
534
	xfs_bmbt_irec_t *ret_imap)
L
Linus Torvalds 已提交
535 536 537 538
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_fileoff_t	last_fsb;
539 540 541
	xfs_off_t	aligned_offset;
	xfs_fileoff_t	ioalign;
	xfs_extlen_t	extsz;
L
Linus Torvalds 已提交
542 543
	int		nimaps;
	xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
D
Dave Chinner 已提交
544
	int		prealloc;
545
	int		error;
L
Linus Torvalds 已提交
546

C
Christoph Hellwig 已提交
547
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
L
Linus Torvalds 已提交
548 549 550 551 552

	/*
	 * Make sure that the dquots are there. This doesn't hold
	 * the ilock across a disk read.
	 */
C
Christoph Hellwig 已提交
553
	error = xfs_qm_dqattach_locked(ip, 0);
L
Linus Torvalds 已提交
554 555 556
	if (error)
		return XFS_ERROR(error);

557
	extsz = xfs_get_extsz_hint(ip);
558 559
	offset_fsb = XFS_B_TO_FSBT(mp, offset);

560
	error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
561
				imap, XFS_WRITE_IMAPS, &prealloc);
562 563
	if (error)
		return error;
L
Linus Torvalds 已提交
564

D
Dave Chinner 已提交
565
retry:
566
	if (prealloc) {
567 568 569 570
		xfs_fsblock_t	alloc_blocks;

		alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap,
						       XFS_WRITE_IMAPS);
571

L
Linus Torvalds 已提交
572 573
		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
574
		last_fsb = ioalign + alloc_blocks;
575 576
	} else {
		last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
L
Linus Torvalds 已提交
577 578
	}

579
	if (prealloc || extsz) {
580
		error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
581
		if (error)
L
Linus Torvalds 已提交
582 583
			return error;
	}
584

585 586 587 588
	/*
	 * Make sure preallocation does not create extents beyond the range we
	 * actually support in this filesystem.
	 */
D
Dave Chinner 已提交
589 590
	if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
		last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
591 592 593

	ASSERT(last_fsb > offset_fsb);

594
	nimaps = XFS_WRITE_IMAPS;
595 596
	error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
				imap, &nimaps, XFS_BMAPI_ENTIRE);
597 598 599 600 601 602
	switch (error) {
	case 0:
	case ENOSPC:
	case EDQUOT:
		break;
	default:
L
Linus Torvalds 已提交
603
		return XFS_ERROR(error);
604
	}
605

L
Linus Torvalds 已提交
606
	/*
D
Dave Chinner 已提交
607
	 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry
608
	 * without EOF preallocation.
L
Linus Torvalds 已提交
609 610
	 */
	if (nimaps == 0) {
C
Christoph Hellwig 已提交
611
		trace_xfs_delalloc_enospc(ip, offset, count);
D
Dave Chinner 已提交
612 613 614 615
		if (prealloc) {
			prealloc = 0;
			error = 0;
			goto retry;
616
		}
D
Dave Chinner 已提交
617
		return XFS_ERROR(error ? error : ENOSPC);
L
Linus Torvalds 已提交
618 619
	}

620
	if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
621
		return xfs_alert_fsblock_zero(ip, &imap[0]);
622

623 624 625 626 627 628 629
	/*
	 * Tag the inode as speculatively preallocated so we can reclaim this
	 * space on demand, if necessary.
	 */
	if (prealloc)
		xfs_inode_set_eofblocks_tag(ip);

630
	*ret_imap = imap[0];
L
Linus Torvalds 已提交
631 632 633 634 635 636 637 638 639
	return 0;
}

/*
 * Pass in a delayed allocate extent, convert it to real extents;
 * return to the caller the extent we create which maps on top of
 * the originating callers request.
 *
 * Called without a lock on the inode.
640 641 642
 *
 * We no longer bother to look at the incoming map - all we have to
 * guarantee is that whatever we allocate fills the required range.
L
Linus Torvalds 已提交
643
 */
C
Christoph Hellwig 已提交
644
int
L
Linus Torvalds 已提交
645 646
xfs_iomap_write_allocate(
	xfs_inode_t	*ip,
647
	xfs_off_t	offset,
648
	xfs_bmbt_irec_t *imap)
L
Linus Torvalds 已提交
649 650 651 652 653 654 655 656
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb, last_block;
	xfs_fileoff_t	end_fsb, map_start_fsb;
	xfs_fsblock_t	first_block;
	xfs_bmap_free_t	free_list;
	xfs_filblks_t	count_fsb;
	xfs_trans_t	*tp;
657
	int		nimaps, committed;
L
Linus Torvalds 已提交
658 659 660 661 662 663
	int		error = 0;
	int		nres;

	/*
	 * Make sure that the dquots are there.
	 */
C
Christoph Hellwig 已提交
664 665
	error = xfs_qm_dqattach(ip, 0);
	if (error)
L
Linus Torvalds 已提交
666 667
		return XFS_ERROR(error);

668
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
669 670
	count_fsb = imap->br_blockcount;
	map_start_fsb = imap->br_startoff;
L
Linus Torvalds 已提交
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686

	XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));

	while (count_fsb != 0) {
		/*
		 * Set up a transaction with which to allocate the
		 * backing store for the file.  Do allocations in a
		 * loop until we get some space in the range we are
		 * interested in.  The other space that might be allocated
		 * is in the delayed allocation extent on which we sit
		 * but before our buffer starts.
		 */

		nimaps = 0;
		while (nimaps == 0) {
			tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
687
			tp->t_flags |= XFS_TRANS_RESERVE;
L
Linus Torvalds 已提交
688
			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
689 690
			error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
						  nres, 0);
L
Linus Torvalds 已提交
691 692 693 694 695
			if (error) {
				xfs_trans_cancel(tp, 0);
				return XFS_ERROR(error);
			}
			xfs_ilock(ip, XFS_ILOCK_EXCL);
696
			xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
697

698
			xfs_bmap_init(&free_list, &first_block);
L
Linus Torvalds 已提交
699 700

			/*
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
			 * it is possible that the extents have changed since
			 * we did the read call as we dropped the ilock for a
			 * while. We have to be careful about truncates or hole
			 * punchs here - we are not allowed to allocate
			 * non-delalloc blocks here.
			 *
			 * The only protection against truncation is the pages
			 * for the range we are being asked to convert are
			 * locked and hence a truncate will block on them
			 * first.
			 *
			 * As a result, if we go beyond the range we really
			 * need and hit an delalloc extent boundary followed by
			 * a hole while we have excess blocks in the map, we
			 * will fill the hole incorrectly and overrun the
			 * transaction reservation.
			 *
			 * Using a single map prevents this as we are forced to
			 * check each map we look for overlap with the desired
			 * range and abort as soon as we find it. Also, given
			 * that we only return a single map, having one beyond
			 * what we can return is probably a bit silly.
			 *
			 * We also need to check that we don't go beyond EOF;
			 * this is a truncate optimisation as a truncate sets
			 * the new file size before block on the pages we
			 * currently have locked under writeback. Because they
			 * are about to be tossed, we don't need to write them
			 * back....
L
Linus Torvalds 已提交
730
			 */
731
			nimaps = 1;
732
			end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
733 734 735 736 737
			error = xfs_bmap_last_offset(NULL, ip, &last_block,
							XFS_DATA_FORK);
			if (error)
				goto trans_cancel;

L
Linus Torvalds 已提交
738 739 740 741 742 743 744 745 746
			last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
			if ((map_start_fsb + count_fsb) > last_block) {
				count_fsb = last_block - map_start_fsb;
				if (count_fsb == 0) {
					error = EAGAIN;
					goto trans_cancel;
				}
			}

747 748 749 750
			/*
			 * From this point onwards we overwrite the imap
			 * pointer that the caller gave to us.
			 */
751
			error = xfs_bmapi_write(tp, ip, map_start_fsb,
752 753 754
						count_fsb,
						XFS_BMAPI_STACK_SWITCH,
						&first_block, 1,
755
						imap, &nimaps, &free_list);
L
Linus Torvalds 已提交
756 757 758
			if (error)
				goto trans_cancel;

759
			error = xfs_bmap_finish(&tp, &free_list, &committed);
L
Linus Torvalds 已提交
760 761 762
			if (error)
				goto trans_cancel;

763
			error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
L
Linus Torvalds 已提交
764 765 766 767 768 769 770 771 772 773
			if (error)
				goto error0;

			xfs_iunlock(ip, XFS_ILOCK_EXCL);
		}

		/*
		 * See if we were able to allocate an extent that
		 * covers at least part of the callers request
		 */
774
		if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
775
			return xfs_alert_fsblock_zero(ip, imap);
776

777 778 779
		if ((offset_fsb >= imap->br_startoff) &&
		    (offset_fsb < (imap->br_startoff +
				   imap->br_blockcount))) {
780 781
			XFS_STATS_INC(xs_xstrat_quick);
			return 0;
L
Linus Torvalds 已提交
782 783
		}

784 785
		/*
		 * So far we have not mapped the requested part of the
L
Linus Torvalds 已提交
786 787
		 * file, just surrounding data, try again.
		 */
788 789
		count_fsb -= imap->br_blockcount;
		map_start_fsb = imap->br_startoff + imap->br_blockcount;
L
Linus Torvalds 已提交
790 791 792 793 794 795 796 797 798 799 800 801 802
	}

trans_cancel:
	xfs_bmap_cancel(&free_list);
	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
error0:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return XFS_ERROR(error);
}

int
xfs_iomap_write_unwritten(
	xfs_inode_t	*ip,
803
	xfs_off_t	offset,
L
Linus Torvalds 已提交
804 805 806 807 808 809
	size_t		count)
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	offset_fsb;
	xfs_filblks_t	count_fsb;
	xfs_filblks_t	numblks_fsb;
810 811 812 813 814
	xfs_fsblock_t	firstfsb;
	int		nimaps;
	xfs_trans_t	*tp;
	xfs_bmbt_irec_t imap;
	xfs_bmap_free_t free_list;
815
	xfs_fsize_t	i_size;
816
	uint		resblks;
L
Linus Torvalds 已提交
817 818 819
	int		committed;
	int		error;

C
Christoph Hellwig 已提交
820
	trace_xfs_unwritten_convert(ip, offset, count);
L
Linus Torvalds 已提交
821 822 823 824 825

	offset_fsb = XFS_B_TO_FSBT(mp, offset);
	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);

826 827 828 829 830 831 832 833 834 835
	/*
	 * Reserve enough blocks in this transaction for two complete extent
	 * btree splits.  We may be converting the middle part of an unwritten
	 * extent and in this case we will insert two new extents in the btree
	 * each of which could cause a full split.
	 *
	 * This reservation amount will be used in the first call to
	 * xfs_bmbt_split() to select an AG with enough space to satisfy the
	 * rest of the operation.
	 */
836
	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
L
Linus Torvalds 已提交
837

838
	do {
L
Linus Torvalds 已提交
839 840 841 842
		/*
		 * set up a transaction to convert the range of extents
		 * from unwritten to real. Do allocations in a loop until
		 * we have covered the range passed in.
843 844 845 846 847 848
		 *
		 * Note that we open code the transaction allocation here
		 * to pass KM_NOFS--we can't risk to recursing back into
		 * the filesystem here as we might be asked to write out
		 * the same inode that we complete here and might deadlock
		 * on the iolock.
L
Linus Torvalds 已提交
849
		 */
J
Jan Kara 已提交
850
		sb_start_intwrite(mp->m_super);
851
		tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
J
Jan Kara 已提交
852
		tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT;
853 854
		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
					  resblks, 0);
L
Linus Torvalds 已提交
855 856
		if (error) {
			xfs_trans_cancel(tp, 0);
857
			return XFS_ERROR(error);
L
Linus Torvalds 已提交
858 859 860
		}

		xfs_ilock(ip, XFS_ILOCK_EXCL);
861
		xfs_trans_ijoin(tp, ip, 0);
L
Linus Torvalds 已提交
862 863 864 865

		/*
		 * Modify the unwritten extent state of the buffer.
		 */
866
		xfs_bmap_init(&free_list, &firstfsb);
L
Linus Torvalds 已提交
867
		nimaps = 1;
868 869
		error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
				  XFS_BMAPI_CONVERT, &firstfsb,
870
				  1, &imap, &nimaps, &free_list);
L
Linus Torvalds 已提交
871 872 873
		if (error)
			goto error_on_bmapi_transaction;

874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
		/*
		 * Log the updated inode size as we go.  We have to be careful
		 * to only log it up to the actual write offset if it is
		 * halfway into a block.
		 */
		i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
		if (i_size > offset + count)
			i_size = offset + count;

		i_size = xfs_new_eof(ip, i_size);
		if (i_size) {
			ip->i_d.di_size = i_size;
			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
		}

		error = xfs_bmap_finish(&tp, &free_list, &committed);
L
Linus Torvalds 已提交
890 891 892
		if (error)
			goto error_on_bmapi_transaction;

893
		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
L
Linus Torvalds 已提交
894 895
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
		if (error)
896 897
			return XFS_ERROR(error);

898
		if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
899
			return xfs_alert_fsblock_zero(ip, &imap);
L
Linus Torvalds 已提交
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920

		if ((numblks_fsb = imap.br_blockcount) == 0) {
			/*
			 * The numblks_fsb value should always get
			 * smaller, otherwise the loop is stuck.
			 */
			ASSERT(imap.br_blockcount);
			break;
		}
		offset_fsb += numblks_fsb;
		count_fsb -= numblks_fsb;
	} while (count_fsb > 0);

	return 0;

error_on_bmapi_transaction:
	xfs_bmap_cancel(&free_list);
	xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return XFS_ERROR(error);
}