xfs_aops.c 28.5 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4
 * Copyright (c) 2016-2018 Christoph Hellwig.
5
 * All Rights Reserved.
L
Linus Torvalds 已提交
6 7
 */
#include "xfs.h"
8
#include "xfs_shared.h"
9 10 11
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
15
#include "xfs_inode_item.h"
16
#include "xfs_alloc.h"
L
Linus Torvalds 已提交
17 18
#include "xfs_error.h"
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
19
#include "xfs_trace.h"
20
#include "xfs_bmap.h"
D
Dave Chinner 已提交
21
#include "xfs_bmap_util.h"
22
#include "xfs_bmap_btree.h"
23
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
24 25
#include <linux/writeback.h>

26 27 28 29 30 31 32 33 34
/*
 * structure owned by writepages passed to individual writepage calls
 */
struct xfs_writepage_ctx {
	struct xfs_bmbt_irec    imap;
	unsigned int		io_type;
	struct xfs_ioend	*ioend;
};

35
struct block_device *
C
Christoph Hellwig 已提交
36
xfs_find_bdev_for_inode(
C
Christoph Hellwig 已提交
37
	struct inode		*inode)
C
Christoph Hellwig 已提交
38
{
C
Christoph Hellwig 已提交
39
	struct xfs_inode	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
40 41
	struct xfs_mount	*mp = ip->i_mount;

42
	if (XFS_IS_REALTIME_INODE(ip))
C
Christoph Hellwig 已提交
43 44 45 46 47
		return mp->m_rtdev_targp->bt_bdev;
	else
		return mp->m_ddev_targp->bt_bdev;
}

48 49 50 51 52 53 54 55 56 57 58 59 60
struct dax_device *
xfs_find_daxdev_for_inode(
	struct inode		*inode)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;

	if (XFS_IS_REALTIME_INODE(ip))
		return mp->m_rtdev_targp->bt_daxdev;
	else
		return mp->m_ddev_targp->bt_daxdev;
}

61 62 63 64 65 66
static void
xfs_finish_page_writeback(
	struct inode		*inode,
	struct bio_vec		*bvec,
	int			error)
{
67 68
	struct iomap_page	*iop = to_iomap_page(bvec->bv_page);

69 70 71 72 73
	if (error) {
		SetPageError(bvec->bv_page);
		mapping_set_error(inode->i_mapping, -EIO);
	}

74 75
	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
	ASSERT(!iop || atomic_read(&iop->write_count) > 0);
76

77
	if (!iop || atomic_dec_and_test(&iop->write_count))
78
		end_page_writeback(bvec->bv_page);
79 80 81 82 83 84
}

/*
 * We're now finished for good with this ioend structure.  Update the page
 * state, release holds on bios, and finally free up memory.  Do not use the
 * ioend after this.
85
 */
86 87
STATIC void
xfs_destroy_ioend(
88 89
	struct xfs_ioend	*ioend,
	int			error)
90
{
91
	struct inode		*inode = ioend->io_inode;
92 93 94 95
	struct bio		*bio = &ioend->io_inline_bio;
	struct bio		*last = ioend->io_bio, *next;
	u64			start = bio->bi_iter.bi_sector;
	bool			quiet = bio_flagged(bio, BIO_QUIET);
96

97
	for (bio = &ioend->io_inline_bio; bio; bio = next) {
98 99 100
		struct bio_vec	*bvec;
		int		i;

101 102 103 104 105 106 107 108
		/*
		 * For the last bio, bi_private points to the ioend, so we
		 * need to explicitly end the iteration here.
		 */
		if (bio == last)
			next = NULL;
		else
			next = bio->bi_private;
C
Christoph Hellwig 已提交
109

110
		/* walk each page on bio, ending page IO on them */
111 112
		bio_for_each_segment_all(bvec, bio, i)
			xfs_finish_page_writeback(inode, bvec, error);
113
		bio_put(bio);
114
	}
115 116 117 118 119

	if (unlikely(error && !quiet)) {
		xfs_err_ratelimited(XFS_I(inode)->i_mount,
			"writeback error on sector %llu", start);
	}
120 121
}

C
Christoph Hellwig 已提交
122 123 124 125 126 127 128 129 130
/*
 * Fast and loose check if this write could update the on-disk inode size.
 */
static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
{
	return ioend->io_offset + ioend->io_size >
		XFS_I(ioend->io_inode)->i_d.di_size;
}

131 132 133 134 135 136 137 138
STATIC int
xfs_setfilesize_trans_alloc(
	struct xfs_ioend	*ioend)
{
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
	struct xfs_trans	*tp;
	int			error;

139 140
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0,
				XFS_TRANS_NOFS, &tp);
141
	if (error)
142 143 144 145
		return error;

	ioend->io_append_trans = tp;

J
Jan Kara 已提交
146
	/*
147
	 * We may pass freeze protection with a transaction.  So tell lockdep
J
Jan Kara 已提交
148 149
	 * we released it.
	 */
150
	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
151 152 153 154
	/*
	 * We hand off the transaction to the completion thread now, so
	 * clear the flag here.
	 */
155
	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
156 157 158
	return 0;
}

159
/*
160
 * Update on-disk file size now that data has been written to disk.
161
 */
162
STATIC int
163
__xfs_setfilesize(
164 165 166 167
	struct xfs_inode	*ip,
	struct xfs_trans	*tp,
	xfs_off_t		offset,
	size_t			size)
168 169 170
{
	xfs_fsize_t		isize;

171
	xfs_ilock(ip, XFS_ILOCK_EXCL);
172
	isize = xfs_new_eof(ip, offset + size);
173 174
	if (!isize) {
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
175
		xfs_trans_cancel(tp);
176
		return 0;
177 178
	}

179
	trace_xfs_setfilesize(ip, offset, size);
180 181 182 183 184

	ip->i_d.di_size = isize;
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

185
	return xfs_trans_commit(tp);
186 187
}

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
int
xfs_setfilesize(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	size_t			size)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error;

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
	if (error)
		return error;

	return __xfs_setfilesize(ip, tp, offset, size);
}

205 206
STATIC int
xfs_setfilesize_ioend(
207 208
	struct xfs_ioend	*ioend,
	int			error)
209 210 211 212 213 214 215 216 217
{
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	struct xfs_trans	*tp = ioend->io_append_trans;

	/*
	 * The transaction may have been allocated in the I/O submission thread,
	 * thus we need to mark ourselves as being in a transaction manually.
	 * Similarly for freeze protection.
	 */
218
	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
219
	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
220

221
	/* we abort the update if there was an IO error */
222
	if (error) {
223
		xfs_trans_cancel(tp);
224
		return error;
225 226
	}

227
	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
228 229
}

230
/*
231
 * IO write completion.
232 233
 */
STATIC void
234
xfs_end_io(
235
	struct work_struct *work)
236
{
237 238 239
	struct xfs_ioend	*ioend =
		container_of(work, struct xfs_ioend, io_work);
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
240 241
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;
242
	int			error;
243

244
	/*
245
	 * Just clean up the in-memory strutures if the fs has been shut down.
246
	 */
247
	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
248
		error = -EIO;
249 250
		goto done;
	}
251

252
	/*
253
	 * Clean up any COW blocks on an I/O error.
254
	 */
255
	error = blk_status_to_errno(ioend->io_bio->bi_status);
256 257 258 259 260
	if (unlikely(error)) {
		switch (ioend->io_type) {
		case XFS_IO_COW:
			xfs_reflink_cancel_cow_range(ip, offset, size, true);
			break;
261
		}
262 263

		goto done;
264 265
	}

266
	/*
267
	 * Success:  commit the COW or unwritten blocks if needed.
268
	 */
269 270 271 272 273
	switch (ioend->io_type) {
	case XFS_IO_COW:
		error = xfs_reflink_end_cow(ip, offset, size);
		break;
	case XFS_IO_UNWRITTEN:
274 275
		/* writeback should never update isize */
		error = xfs_iomap_write_unwritten(ip, offset, size, false);
276 277 278 279
		break;
	default:
		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
		break;
280
	}
281

282
done:
283 284
	if (ioend->io_append_trans)
		error = xfs_setfilesize_ioend(ioend, error);
285
	xfs_destroy_ioend(ioend, error);
286 287
}

288 289 290
STATIC void
xfs_end_bio(
	struct bio		*bio)
291
{
292 293
	struct xfs_ioend	*ioend = bio->bi_private;
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
294

295
	if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
296 297 298 299
		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
	else if (ioend->io_append_trans)
		queue_work(mp->m_data_workqueue, &ioend->io_work);
	else
300
		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
301 302
}

L
Linus Torvalds 已提交
303 304
STATIC int
xfs_map_blocks(
C
Christoph Hellwig 已提交
305
	struct xfs_writepage_ctx *wpc,
L
Linus Torvalds 已提交
306
	struct inode		*inode,
C
Christoph Hellwig 已提交
307
	loff_t			offset)
L
Linus Torvalds 已提交
308
{
C
Christoph Hellwig 已提交
309 310
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
F
Fabian Frederick 已提交
311
	ssize_t			count = i_blocksize(inode);
312
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset), end_fsb;
C
Christoph Hellwig 已提交
313 314
	struct xfs_bmbt_irec	imap;
	int			whichfork = XFS_DATA_FORK;
315
	struct xfs_iext_cursor	icur;
316
	bool			imap_valid;
C
Christoph Hellwig 已提交
317 318
	int			error = 0;

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
	/*
	 * We have to make sure the cached mapping is within EOF to protect
	 * against eofblocks trimming on file release leaving us with a stale
	 * mapping. Otherwise, a page for a subsequent file extending buffered
	 * write could get picked up by this writeback cycle and written to the
	 * wrong blocks.
	 *
	 * Note that what we really want here is a generic mapping invalidation
	 * mechanism to protect us from arbitrary extent modifying contexts, not
	 * just eofblocks.
	 */
	xfs_trim_extent_eof(&wpc->imap, ip);

	/*
	 * COW fork blocks can overlap data fork blocks even if the blocks
	 * aren't shared.  COW I/O always takes precedent, so we must always
	 * check for overlap on reflink inodes unless the mapping is already a
	 * COW one.
	 */
	imap_valid = offset_fsb >= wpc->imap.br_startoff &&
		     offset_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount;
	if (imap_valid &&
341
	    (!xfs_inode_has_cow_data(ip) || wpc->io_type == XFS_IO_COW))
342 343
		return 0;

C
Christoph Hellwig 已提交
344
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
345
		return -EIO;
C
Christoph Hellwig 已提交
346

347 348 349 350 351 352
	/*
	 * If we don't have a valid map, now it's time to get a new one for this
	 * offset.  This will convert delayed allocations (including COW ones)
	 * into real extents.  If we return without a valid map, it means we
	 * landed in a hole and we skip the block.
	 */
353
	xfs_ilock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
354 355
	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
	       (ip->i_df.if_flags & XFS_IFEXTENTS));
D
Dave Chinner 已提交
356
	ASSERT(offset <= mp->m_super->s_maxbytes);
C
Christoph Hellwig 已提交
357

358 359 360 361 362 363 364 365
	if (offset > mp->m_super->s_maxbytes - count)
		count = mp->m_super->s_maxbytes - offset;
	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);

	/*
	 * Check if this is offset is covered by a COW extents, and if yes use
	 * it directly instead of looking up anything in the data fork.
	 */
366
	if (xfs_inode_has_cow_data(ip) &&
367 368
	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap) &&
	    imap.br_startoff <= offset_fsb) {
C
Christoph Hellwig 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		/*
		 * Truncate can race with writeback since writeback doesn't
		 * take the iolock and truncate decreases the file size before
		 * it starts truncating the pages between new_size and old_size.
		 * Therefore, we can end up in the situation where writeback
		 * gets a CoW fork mapping but the truncate makes the mapping
		 * invalid and we end up in here trying to get a new mapping.
		 * bail out here so that we simply never get a valid mapping
		 * and so we drop the write altogether.  The page truncation
		 * will kill the contents anyway.
		 */
		if (offset > i_size_read(inode)) {
			wpc->io_type = XFS_IO_HOLE;
			return 0;
		}
		whichfork = XFS_COW_FORK;
		wpc->io_type = XFS_IO_COW;
		goto allocate_blocks;
	}

	/*
	 * Map valid and no COW extent in the way?  We're done.
	 */
393
	if (imap_valid) {
C
Christoph Hellwig 已提交
394 395 396 397 398 399 400 401 402
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return 0;
	}

	/*
	 * If we don't have a valid map, now it's time to get a new one for this
	 * offset.  This will convert delayed allocations (including COW ones)
	 * into real extents.
	 */
403 404
	if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
		imap.br_startoff = end_fsb;	/* fake a hole past EOF */
C
Christoph Hellwig 已提交
405
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
406

407 408 409
	if (imap.br_startoff > offset_fsb) {
		/* landed in a hole or beyond EOF */
		imap.br_blockcount = imap.br_startoff - offset_fsb;
C
Christoph Hellwig 已提交
410 411 412
		imap.br_startoff = offset_fsb;
		imap.br_startblock = HOLESTARTBLOCK;
		wpc->io_type = XFS_IO_HOLE;
413 414 415 416 417 418
	} else {
		if (isnullstartblock(imap.br_startblock)) {
			/* got a delalloc extent */
			wpc->io_type = XFS_IO_DELALLOC;
			goto allocate_blocks;
		}
C
Christoph Hellwig 已提交
419

420 421 422 423
		if (imap.br_state == XFS_EXT_UNWRITTEN)
			wpc->io_type = XFS_IO_UNWRITTEN;
		else
			wpc->io_type = XFS_IO_OVERWRITE;
C
Christoph Hellwig 已提交
424
	}
425

C
Christoph Hellwig 已提交
426 427 428 429 430 431 432 433 434
	wpc->imap = imap;
	trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
	return 0;
allocate_blocks:
	error = xfs_iomap_write_allocate(ip, whichfork, offset, &imap);
	if (error)
		return error;
	wpc->imap = imap;
	trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
C
Christoph Hellwig 已提交
435
	return 0;
L
Linus Torvalds 已提交
436 437
}

438
/*
439 440 441 442 443 444
 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
 * it, and we submit that bio. The ioend may be used for multiple bio
 * submissions, so we only want to allocate an append transaction for the ioend
 * once. In the case of multiple bio submission, each bio will take an IO
 * reference to the ioend to ensure that the ioend completion is only done once
 * all bios have been submitted and the ioend is really done.
445 446 447
 *
 * If @fail is non-zero, it means that we have a situation where some part of
 * the submission process has failed after we have marked paged for writeback
448 449 450
 * and unlocked them. In this situation, we need to fail the bio and ioend
 * rather than submit it to IO. This typically only happens on a filesystem
 * shutdown.
451
 */
452
STATIC int
453
xfs_submit_ioend(
454
	struct writeback_control *wbc,
455
	struct xfs_ioend	*ioend,
456
	int			status)
457
{
458 459
	/* Convert CoW extents to regular */
	if (!status && ioend->io_type == XFS_IO_COW) {
460 461 462 463 464 465 466 467 468 469
		/*
		 * Yuk. This can do memory allocation, but is not a
		 * transactional operation so everything is done in GFP_KERNEL
		 * context. That can deadlock, because we hold pages in
		 * writeback state and GFP_KERNEL allocations can block on them.
		 * Hence we must operate in nofs conditions here.
		 */
		unsigned nofs_flag;

		nofs_flag = memalloc_nofs_save();
470 471
		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
				ioend->io_offset, ioend->io_size);
472
		memalloc_nofs_restore(nofs_flag);
473 474
	}

475 476
	/* Reserve log space if we might write beyond the on-disk inode size. */
	if (!status &&
477
	    ioend->io_type != XFS_IO_UNWRITTEN &&
478 479
	    xfs_ioend_is_append(ioend) &&
	    !ioend->io_append_trans)
480
		status = xfs_setfilesize_trans_alloc(ioend);
481

482 483
	ioend->io_bio->bi_private = ioend;
	ioend->io_bio->bi_end_io = xfs_end_bio;
J
Jens Axboe 已提交
484
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
485

486 487 488 489 490 491 492
	/*
	 * If we are failing the IO now, just mark the ioend with an
	 * error and finish it. This will run IO completion immediately
	 * as there is only one reference to the ioend at this point in
	 * time.
	 */
	if (status) {
493
		ioend->io_bio->bi_status = errno_to_blk_status(status);
494
		bio_endio(ioend->io_bio);
495 496
		return status;
	}
497

498
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
499
	submit_bio(ioend->io_bio);
500
	return 0;
501 502
}

503 504 505 506 507
static struct xfs_ioend *
xfs_alloc_ioend(
	struct inode		*inode,
	unsigned int		type,
	xfs_off_t		offset,
508 509
	struct block_device	*bdev,
	sector_t		sector)
510 511 512
{
	struct xfs_ioend	*ioend;
	struct bio		*bio;
513

514
	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
515 516
	bio_set_dev(bio, bdev);
	bio->bi_iter.bi_sector = sector;
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540

	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
	INIT_LIST_HEAD(&ioend->io_list);
	ioend->io_type = type;
	ioend->io_inode = inode;
	ioend->io_size = 0;
	ioend->io_offset = offset;
	INIT_WORK(&ioend->io_work, xfs_end_io);
	ioend->io_append_trans = NULL;
	ioend->io_bio = bio;
	return ioend;
}

/*
 * Allocate a new bio, and chain the old bio to the new one.
 *
 * Note that we have to do perform the chaining in this unintuitive order
 * so that the bi_private linkage is set up in the right direction for the
 * traversal in xfs_destroy_ioend().
 */
static void
xfs_chain_bio(
	struct xfs_ioend	*ioend,
	struct writeback_control *wbc,
541 542
	struct block_device	*bdev,
	sector_t		sector)
543 544 545 546
{
	struct bio *new;

	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
547 548
	bio_set_dev(new, bdev);
	new->bi_iter.bi_sector = sector;
549 550
	bio_chain(ioend->io_bio, new);
	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
J
Jens Axboe 已提交
551
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
552
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
553
	submit_bio(ioend->io_bio);
554
	ioend->io_bio = new;
555 556 557
}

/*
558 559
 * Test to see if we have an existing ioend structure that we could append to
 * first, otherwise finish off the current ioend and start another.
560 561 562 563
 */
STATIC void
xfs_add_to_ioend(
	struct inode		*inode,
564
	xfs_off_t		offset,
565
	struct page		*page,
566
	struct iomap_page	*iop,
567
	struct xfs_writepage_ctx *wpc,
568
	struct writeback_control *wbc,
569
	struct list_head	*iolist)
570
{
571 572 573 574 575 576 577 578 579 580
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
	unsigned		len = i_blocksize(inode);
	unsigned		poff = offset & (PAGE_SIZE - 1);
	sector_t		sector;

	sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
		((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);

581
	if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
582
	    sector != bio_end_sector(wpc->ioend->io_bio) ||
583
	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
584 585
		if (wpc->ioend)
			list_add(&wpc->ioend->io_list, iolist);
586 587
		wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset,
				bdev, sector);
588 589
	}

590 591 592 593 594 595 596
	if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff)) {
		if (iop)
			atomic_inc(&iop->write_count);
		if (bio_full(wpc->ioend->io_bio))
			xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
		__bio_add_page(wpc->ioend->io_bio, page, len, poff);
	}
597

598
	wpc->ioend->io_size += len;
599 600
}

601 602 603
STATIC void
xfs_vm_invalidatepage(
	struct page		*page,
604 605
	unsigned int		offset,
	unsigned int		length)
606
{
607 608
	trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
	iomap_invalidatepage(page, offset, length);
609 610 611
}

/*
612 613 614
 * If the page has delalloc blocks on it, we need to punch them out before we
 * invalidate the page.  If we don't, we leave a stale delalloc mapping on the
 * inode that can trip up a later direct I/O read operation on the same region.
615
 *
616 617 618 619 620
 * We prevent this by truncating away the delalloc regions on the page.  Because
 * they are delalloc, we can do this without needing a transaction. Indeed - if
 * we get ENOSPC errors, we have to be able to do this truncation without a
 * transaction as there is no space left for block reservation (typically why we
 * see a ENOSPC in writeback).
621 622 623 624 625 626 627
 */
STATIC void
xfs_aops_discard_page(
	struct page		*page)
{
	struct inode		*inode = page->mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
628
	struct xfs_mount	*mp = ip->i_mount;
629
	loff_t			offset = page_offset(page);
630 631
	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, offset);
	int			error;
632

633
	if (XFS_FORCED_SHUTDOWN(mp))
634 635
		goto out_invalidate;

636
	xfs_alert(mp,
637
		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
638 639
			page, ip->i_ino, offset);

640 641 642 643
	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
			PAGE_SIZE / i_blocksize(inode));
	if (error && !XFS_FORCED_SHUTDOWN(mp))
		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
644
out_invalidate:
645
	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
646 647
}

648 649 650 651
/*
 * We implement an immediate ioend submission policy here to avoid needing to
 * chain multiple ioends and hence nest mempool allocations which can violate
 * forward progress guarantees we need to provide. The current ioend we are
652
 * adding blocks to is cached on the writepage context, and if the new block
653 654 655 656 657 658 659 660 661 662 663
 * does not append to the cached ioend it will create a new ioend and cache that
 * instead.
 *
 * If a new ioend is created and cached, the old ioend is returned and queued
 * locally for submission once the entire page is processed or an error has been
 * detected.  While ioends are submitted immediately after they are completed,
 * batching optimisations are provided by higher level block plugging.
 *
 * At the end of a writeback pass, there will be a cached ioend remaining on the
 * writepage context that the caller will need to submit.
 */
664 665 666
static int
xfs_writepage_map(
	struct xfs_writepage_ctx *wpc,
667
	struct writeback_control *wbc,
668 669
	struct inode		*inode,
	struct page		*page,
670
	uint64_t		end_offset)
671
{
672
	LIST_HEAD(submit_list);
673 674
	struct iomap_page	*iop = to_iomap_page(page);
	unsigned		len = i_blocksize(inode);
675
	struct xfs_ioend	*ioend, *next;
676
	uint64_t		file_offset;	/* file offset of page */
677
	int			error = 0, count = 0, i;
678

679 680
	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
	ASSERT(!iop || atomic_read(&iop->write_count) == 0);
681

682
	/*
683 684 685
	 * Walk through the page to find areas to write back. If we run off the
	 * end of the current map or find the current map invalid, grab a new
	 * one.
686
	 */
687 688 689 690
	for (i = 0, file_offset = page_offset(page);
	     i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
	     i++, file_offset += len) {
		if (iop && !test_bit(i, iop->uptodate))
691 692
			continue;

693 694 695
		error = xfs_map_blocks(wpc, inode, file_offset);
		if (error)
			break;
696
		if (wpc->io_type == XFS_IO_HOLE)
C
Christoph Hellwig 已提交
697
			continue;
698 699
		xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
				 &submit_list);
C
Christoph Hellwig 已提交
700
		count++;
701
	}
702

703
	ASSERT(wpc->ioend || list_empty(&submit_list));
704 705
	ASSERT(PageLocked(page));
	ASSERT(!PageWriteback(page));
706 707

	/*
708 709 710 711 712 713
	 * On error, we have to fail the ioend here because we may have set
	 * pages under writeback, we have to make sure we run IO completion to
	 * mark the error state of the IO appropriately, so we can't cancel the
	 * ioend directly here.  That means we have to mark this page as under
	 * writeback if we included any blocks from it in the ioend chain so
	 * that completion treats it correctly.
714
	 *
715 716
	 * If we didn't include the page in the ioend, the on error we can
	 * simply discard and unlock it as there are no other users of the page
717 718 719
	 * now.  The caller will still need to trigger submission of outstanding
	 * ioends on the writepage context so they are treated correctly on
	 * error.
720
	 */
721 722 723 724 725 726 727 728
	if (unlikely(error)) {
		if (!count) {
			xfs_aops_discard_page(page);
			ClearPageUptodate(page);
			unlock_page(page);
			goto done;
		}

729 730 731 732 733 734 735 736
		/*
		 * If the page was not fully cleaned, we need to ensure that the
		 * higher layers come back to it correctly.  That means we need
		 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
		 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
		 * so another attempt to write this page in this writeback sweep
		 * will be made.
		 */
737
		set_page_writeback_keepwrite(page);
738
	} else {
739 740
		clear_page_dirty_for_io(page);
		set_page_writeback(page);
741
	}
742

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
	unlock_page(page);

	/*
	 * Preserve the original error if there was one, otherwise catch
	 * submission errors here and propagate into subsequent ioend
	 * submissions.
	 */
	list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
		int error2;

		list_del_init(&ioend->io_list);
		error2 = xfs_submit_ioend(wbc, ioend, error);
		if (error2 && !error)
			error = error2;
	}

	/*
760 761
	 * We can end up here with no error and nothing to write only if we race
	 * with a partial page truncate on a sub-page block sized filesystem.
762 763 764 765
	 */
	if (!count)
		end_page_writeback(page);
done:
766 767 768 769
	mapping_set_error(page->mapping, error);
	return error;
}

L
Linus Torvalds 已提交
770
/*
771 772 773 774 775
 * Write out a dirty page.
 *
 * For delalloc space on the page we need to allocate space and flush it.
 * For unwritten space on the page we need to start the conversion to
 * regular allocated space.
L
Linus Torvalds 已提交
776 777
 */
STATIC int
778
xfs_do_writepage(
779
	struct page		*page,
780 781
	struct writeback_control *wbc,
	void			*data)
L
Linus Torvalds 已提交
782
{
783
	struct xfs_writepage_ctx *wpc = data;
784
	struct inode		*inode = page->mapping->host;
L
Linus Torvalds 已提交
785
	loff_t			offset;
786
	uint64_t              end_offset;
787
	pgoff_t                 end_index;
788

789
	trace_xfs_writepage(inode, page, 0, 0);
790 791 792 793

	/*
	 * Refuse to write the page out if we are called from reclaim context.
	 *
794 795 796
	 * This avoids stack overflows when called from deeply used stacks in
	 * random callers for direct reclaim or memcg reclaim.  We explicitly
	 * allow reclaim from kswapd as the stack usage there is relatively low.
797
	 *
798 799
	 * This should never happen except in the case of a VM regression so
	 * warn about it.
800
	 */
801 802
	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
			PF_MEMALLOC))
803
		goto redirty;
L
Linus Torvalds 已提交
804

805
	/*
806 807
	 * Given that we do not allow direct reclaim to call us, we should
	 * never be called while in a filesystem transaction.
808
	 */
809
	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
810
		goto redirty;
811

812
	/*
813 814
	 * Is this page beyond the end of the file?
	 *
815 816 817 818 819 820 821 822 823 824
	 * The page index is less than the end_index, adjust the end_offset
	 * to the highest offset that this page should represent.
	 * -----------------------------------------------------
	 * |			file mapping	       | <EOF> |
	 * -----------------------------------------------------
	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
	 * ^--------------------------------^----------|--------
	 * |     desired writeback range    |      see else    |
	 * ---------------------------------^------------------|
	 */
825
	offset = i_size_read(inode);
826
	end_index = offset >> PAGE_SHIFT;
827
	if (page->index < end_index)
828
		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
829 830 831 832 833 834 835 836 837 838 839 840
	else {
		/*
		 * Check whether the page to write out is beyond or straddles
		 * i_size or not.
		 * -------------------------------------------------------
		 * |		file mapping		        | <EOF>  |
		 * -------------------------------------------------------
		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
		 * ^--------------------------------^-----------|---------
		 * |				    |      Straddles     |
		 * ---------------------------------^-----------|--------|
		 */
841
		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
842 843

		/*
844 845 846 847
		 * Skip the page if it is fully outside i_size, e.g. due to a
		 * truncate operation that is in progress. We must redirty the
		 * page so that reclaim stops reclaiming it. Otherwise
		 * xfs_vm_releasepage() is called on it and gets confused.
848 849 850 851 852 853 854 855 856 857 858
		 *
		 * Note that the end_index is unsigned long, it would overflow
		 * if the given offset is greater than 16TB on 32-bit system
		 * and if we do check the page is fully outside i_size or not
		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
		 * will be evaluated to 0.  Hence this page will be redirtied
		 * and be written out repeatedly which would result in an
		 * infinite loop, the user program that perform this operation
		 * will hang.  Instead, we can verify this situation by checking
		 * if the page to write is totally beyond the i_size or if it's
		 * offset is just equal to the EOF.
859
		 */
860 861
		if (page->index > end_index ||
		    (page->index == end_index && offset_into_page == 0))
862
			goto redirty;
863 864 865 866 867

		/*
		 * The page straddles i_size.  It must be zeroed out on each
		 * and every writepage invocation because it may be mmapped.
		 * "A file is mapped in multiples of the page size.  For a file
868
		 * that is not a multiple of the page size, the remaining
869 870 871
		 * memory is zeroed when mapped, and writes to that region are
		 * not written out to the file."
		 */
872
		zero_user_segment(page, offset_into_page, PAGE_SIZE);
873 874 875

		/* Adjust the end_offset to the end of file */
		end_offset = offset;
L
Linus Torvalds 已提交
876 877
	}

878
	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
879

880
redirty:
881 882 883 884 885
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);
	return 0;
}

886 887 888 889 890 891 892 893 894 895 896
STATIC int
xfs_vm_writepage(
	struct page		*page,
	struct writeback_control *wbc)
{
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

	ret = xfs_do_writepage(page, wbc, &wpc);
897 898 899
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
900 901
}

902 903 904 905 906
STATIC int
xfs_vm_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
907 908 909 910 911
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

912
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
913
	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
914 915 916
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
917 918
}

D
Dan Williams 已提交
919 920 921 922 923 924 925 926 927 928
STATIC int
xfs_dax_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
	return dax_writeback_mapping_range(mapping,
			xfs_find_bdev_for_inode(mapping->host), wbc);
}

929
STATIC int
930
xfs_vm_releasepage(
931 932 933
	struct page		*page,
	gfp_t			gfp_mask)
{
934
	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
935
	return iomap_releasepage(page, gfp_mask);
L
Linus Torvalds 已提交
936 937 938
}

STATIC sector_t
939
xfs_vm_bmap(
L
Linus Torvalds 已提交
940 941 942
	struct address_space	*mapping,
	sector_t		block)
{
C
Christoph Hellwig 已提交
943
	struct xfs_inode	*ip = XFS_I(mapping->host);
L
Linus Torvalds 已提交
944

C
Christoph Hellwig 已提交
945
	trace_xfs_vm_bmap(ip);
946 947 948

	/*
	 * The swap code (ab-)uses ->bmap to get a block mapping and then
949
	 * bypasses the file system for actual I/O.  We really can't allow
950
	 * that on reflinks inodes, so we have to skip out here.  And yes,
951 952 953 954
	 * 0 is the magic code for a bmap error.
	 *
	 * Since we don't pass back blockdev info, we can't return bmap
	 * information for rt files either.
955
	 */
956
	if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
957
		return 0;
C
Christoph Hellwig 已提交
958
	return iomap_bmap(mapping, block, &xfs_iomap_ops);
L
Linus Torvalds 已提交
959 960 961
}

STATIC int
962
xfs_vm_readpage(
L
Linus Torvalds 已提交
963 964 965
	struct file		*unused,
	struct page		*page)
{
966
	trace_xfs_vm_readpage(page->mapping->host, 1);
967
	return iomap_readpage(page, &xfs_iomap_ops);
L
Linus Torvalds 已提交
968 969 970
}

STATIC int
971
xfs_vm_readpages(
L
Linus Torvalds 已提交
972 973 974 975 976
	struct file		*unused,
	struct address_space	*mapping,
	struct list_head	*pages,
	unsigned		nr_pages)
{
977
	trace_xfs_vm_readpages(mapping->host, nr_pages);
978
	return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
979 980
}

981 982 983 984 985 986 987 988 989 990
static int
xfs_iomap_swapfile_activate(
	struct swap_info_struct		*sis,
	struct file			*swap_file,
	sector_t			*span)
{
	sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
	return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
}

991
const struct address_space_operations xfs_address_space_operations = {
992 993 994
	.readpage		= xfs_vm_readpage,
	.readpages		= xfs_vm_readpages,
	.writepage		= xfs_vm_writepage,
995
	.writepages		= xfs_vm_writepages,
996
	.set_page_dirty		= iomap_set_page_dirty,
997 998
	.releasepage		= xfs_vm_releasepage,
	.invalidatepage		= xfs_vm_invalidatepage,
999
	.bmap			= xfs_vm_bmap,
D
Dan Williams 已提交
1000
	.direct_IO		= noop_direct_IO,
1001 1002
	.migratepage		= iomap_migrate_page,
	.is_partially_uptodate  = iomap_is_partially_uptodate,
1003
	.error_remove_page	= generic_error_remove_page,
1004
	.swap_activate		= xfs_iomap_swapfile_activate,
L
Linus Torvalds 已提交
1005
};
D
Dan Williams 已提交
1006 1007 1008 1009 1010 1011

const struct address_space_operations xfs_dax_aops = {
	.writepages		= xfs_dax_writepages,
	.direct_IO		= noop_direct_IO,
	.set_page_dirty		= noop_set_page_dirty,
	.invalidatepage		= noop_invalidatepage,
1012
	.swap_activate		= xfs_iomap_swapfile_activate,
D
Dan Williams 已提交
1013
};