xfs_aops.c 34.3 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4
 * Copyright (c) 2016-2018 Christoph Hellwig.
5
 * All Rights Reserved.
L
Linus Torvalds 已提交
6 7
 */
#include "xfs.h"
8
#include "xfs_shared.h"
9 10 11
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
L
Linus Torvalds 已提交
15
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
16
#include "xfs_trace.h"
17
#include "xfs_bmap.h"
D
Dave Chinner 已提交
18
#include "xfs_bmap_util.h"
19
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
20

21 22 23 24 25
/*
 * structure owned by writepages passed to individual writepage calls
 */
struct xfs_writepage_ctx {
	struct xfs_bmbt_irec    imap;
26
	int			fork;
27
	unsigned int		data_seq;
28
	unsigned int		cow_seq;
29 30 31
	struct xfs_ioend	*ioend;
};

32
struct block_device *
C
Christoph Hellwig 已提交
33
xfs_find_bdev_for_inode(
C
Christoph Hellwig 已提交
34
	struct inode		*inode)
C
Christoph Hellwig 已提交
35
{
C
Christoph Hellwig 已提交
36
	struct xfs_inode	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
37 38
	struct xfs_mount	*mp = ip->i_mount;

39
	if (XFS_IS_REALTIME_INODE(ip))
C
Christoph Hellwig 已提交
40 41 42 43 44
		return mp->m_rtdev_targp->bt_bdev;
	else
		return mp->m_ddev_targp->bt_bdev;
}

45 46 47 48 49 50 51 52 53 54 55 56 57
struct dax_device *
xfs_find_daxdev_for_inode(
	struct inode		*inode)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;

	if (XFS_IS_REALTIME_INODE(ip))
		return mp->m_rtdev_targp->bt_daxdev;
	else
		return mp->m_ddev_targp->bt_daxdev;
}

58 59 60
static void
xfs_finish_page_writeback(
	struct inode		*inode,
61
	struct bio_vec	*bvec,
62 63
	int			error)
{
64 65
	struct iomap_page	*iop = to_iomap_page(bvec->bv_page);

66 67 68 69 70
	if (error) {
		SetPageError(bvec->bv_page);
		mapping_set_error(inode->i_mapping, -EIO);
	}

71 72
	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
	ASSERT(!iop || atomic_read(&iop->write_count) > 0);
73

74
	if (!iop || atomic_dec_and_test(&iop->write_count))
75
		end_page_writeback(bvec->bv_page);
76 77 78 79 80 81
}

/*
 * We're now finished for good with this ioend structure.  Update the page
 * state, release holds on bios, and finally free up memory.  Do not use the
 * ioend after this.
82
 */
83 84
STATIC void
xfs_destroy_ioend(
85 86
	struct xfs_ioend	*ioend,
	int			error)
87
{
88
	struct inode		*inode = ioend->io_inode;
89 90 91 92
	struct bio		*bio = &ioend->io_inline_bio;
	struct bio		*last = ioend->io_bio, *next;
	u64			start = bio->bi_iter.bi_sector;
	bool			quiet = bio_flagged(bio, BIO_QUIET);
93

94
	for (bio = &ioend->io_inline_bio; bio; bio = next) {
95
		struct bio_vec	*bvec;
96
		struct bvec_iter_all iter_all;
97

98 99 100 101 102 103 104 105
		/*
		 * For the last bio, bi_private points to the ioend, so we
		 * need to explicitly end the iteration here.
		 */
		if (bio == last)
			next = NULL;
		else
			next = bio->bi_private;
C
Christoph Hellwig 已提交
106

107
		/* walk each page on bio, ending page IO on them */
108
		bio_for_each_segment_all(bvec, bio, iter_all)
109
			xfs_finish_page_writeback(inode, bvec, error);
110
		bio_put(bio);
111
	}
112 113 114 115 116

	if (unlikely(error && !quiet)) {
		xfs_err_ratelimited(XFS_I(inode)->i_mount,
			"writeback error on sector %llu", start);
	}
117 118
}

C
Christoph Hellwig 已提交
119 120 121 122 123 124 125 126 127
/*
 * Fast and loose check if this write could update the on-disk inode size.
 */
static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
{
	return ioend->io_offset + ioend->io_size >
		XFS_I(ioend->io_inode)->i_d.di_size;
}

128 129 130 131 132 133 134 135
STATIC int
xfs_setfilesize_trans_alloc(
	struct xfs_ioend	*ioend)
{
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
	struct xfs_trans	*tp;
	int			error;

C
Christoph Hellwig 已提交
136
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
137
	if (error)
138 139 140 141
		return error;

	ioend->io_append_trans = tp;

J
Jan Kara 已提交
142
	/*
143
	 * We may pass freeze protection with a transaction.  So tell lockdep
J
Jan Kara 已提交
144 145
	 * we released it.
	 */
146
	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
147 148 149 150
	/*
	 * We hand off the transaction to the completion thread now, so
	 * clear the flag here.
	 */
151
	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
152 153 154
	return 0;
}

155
/*
156
 * Update on-disk file size now that data has been written to disk.
157
 */
158
STATIC int
159
__xfs_setfilesize(
160 161 162 163
	struct xfs_inode	*ip,
	struct xfs_trans	*tp,
	xfs_off_t		offset,
	size_t			size)
164 165 166
{
	xfs_fsize_t		isize;

167
	xfs_ilock(ip, XFS_ILOCK_EXCL);
168
	isize = xfs_new_eof(ip, offset + size);
169 170
	if (!isize) {
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
171
		xfs_trans_cancel(tp);
172
		return 0;
173 174
	}

175
	trace_xfs_setfilesize(ip, offset, size);
176 177 178 179 180

	ip->i_d.di_size = isize;
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

181
	return xfs_trans_commit(tp);
182 183
}

184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
int
xfs_setfilesize(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	size_t			size)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error;

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
	if (error)
		return error;

	return __xfs_setfilesize(ip, tp, offset, size);
}

201 202
STATIC int
xfs_setfilesize_ioend(
203 204
	struct xfs_ioend	*ioend,
	int			error)
205 206 207 208 209 210 211 212 213
{
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	struct xfs_trans	*tp = ioend->io_append_trans;

	/*
	 * The transaction may have been allocated in the I/O submission thread,
	 * thus we need to mark ourselves as being in a transaction manually.
	 * Similarly for freeze protection.
	 */
214
	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
215
	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
216

217
	/* we abort the update if there was an IO error */
218
	if (error) {
219
		xfs_trans_cancel(tp);
220
		return error;
221 222
	}

223
	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
224 225
}

226
/*
227
 * IO write completion.
228 229
 */
STATIC void
230 231
xfs_end_ioend(
	struct xfs_ioend	*ioend)
232
{
233
	struct list_head	ioend_list;
234
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
235 236
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;
C
Christoph Hellwig 已提交
237
	unsigned int		nofs_flag;
238
	int			error;
239

C
Christoph Hellwig 已提交
240 241 242 243 244 245 246
	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

247
	/*
248
	 * Just clean up the in-memory strutures if the fs has been shut down.
249
	 */
250
	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
251
		error = -EIO;
252 253
		goto done;
	}
254

255
	/*
256
	 * Clean up any COW blocks on an I/O error.
257
	 */
258
	error = blk_status_to_errno(ioend->io_bio->bi_status);
259
	if (unlikely(error)) {
260
		if (ioend->io_fork == XFS_COW_FORK)
261 262
			xfs_reflink_cancel_cow_range(ip, offset, size, true);
		goto done;
263 264
	}

265
	/*
266
	 * Success: commit the COW or unwritten blocks if needed.
267
	 */
268
	if (ioend->io_fork == XFS_COW_FORK)
269
		error = xfs_reflink_end_cow(ip, offset, size);
270
	else if (ioend->io_state == XFS_EXT_UNWRITTEN)
271
		error = xfs_iomap_write_unwritten(ip, offset, size, false);
272
	else
273
		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
274

275
done:
276 277
	if (ioend->io_append_trans)
		error = xfs_setfilesize_ioend(ioend, error);
278
	list_replace_init(&ioend->io_list, &ioend_list);
279
	xfs_destroy_ioend(ioend, error);
280 281 282 283 284 285 286

	while (!list_empty(&ioend_list)) {
		ioend = list_first_entry(&ioend_list, struct xfs_ioend,
				io_list);
		list_del_init(&ioend->io_list);
		xfs_destroy_ioend(ioend, error);
	}
C
Christoph Hellwig 已提交
287 288

	memalloc_nofs_restore(nofs_flag);
289 290 291 292 293 294 295 296 297 298
}

/*
 * We can merge two adjacent ioends if they have the same set of work to do.
 */
static bool
xfs_ioend_can_merge(
	struct xfs_ioend	*ioend,
	struct xfs_ioend	*next)
{
299
	if (ioend->io_bio->bi_status != next->io_bio->bi_status)
300 301 302 303 304 305 306 307 308 309 310
		return false;
	if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
		return false;
	if ((ioend->io_state == XFS_EXT_UNWRITTEN) ^
	    (next->io_state == XFS_EXT_UNWRITTEN))
		return false;
	if (ioend->io_offset + ioend->io_size != next->io_offset)
		return false;
	return true;
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
/*
 * If the to be merged ioend has a preallocated transaction for file
 * size updates we need to ensure the ioend it is merged into also
 * has one.  If it already has one we can simply cancel the transaction
 * as it is guaranteed to be clean.
 */
static void
xfs_ioend_merge_append_transactions(
	struct xfs_ioend	*ioend,
	struct xfs_ioend	*next)
{
	if (!ioend->io_append_trans) {
		ioend->io_append_trans = next->io_append_trans;
		next->io_append_trans = NULL;
	} else {
		xfs_setfilesize_ioend(next, -ECANCELED);
	}
}

330 331 332 333 334 335 336 337 338 339 340
/* Try to merge adjacent completions. */
STATIC void
xfs_ioend_try_merge(
	struct xfs_ioend	*ioend,
	struct list_head	*more_ioends)
{
	struct xfs_ioend	*next_ioend;

	while (!list_empty(more_ioends)) {
		next_ioend = list_first_entry(more_ioends, struct xfs_ioend,
				io_list);
341
		if (!xfs_ioend_can_merge(ioend, next_ioend))
342 343 344
			break;
		list_move_tail(&next_ioend->io_list, &ioend->io_list);
		ioend->io_size += next_ioend->io_size;
345 346
		if (next_ioend->io_append_trans)
			xfs_ioend_merge_append_transactions(ioend, next_ioend);
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	}
}

/* list_sort compare function for ioends */
static int
xfs_ioend_compare(
	void			*priv,
	struct list_head	*a,
	struct list_head	*b)
{
	struct xfs_ioend	*ia;
	struct xfs_ioend	*ib;

	ia = container_of(a, struct xfs_ioend, io_list);
	ib = container_of(b, struct xfs_ioend, io_list);
	if (ia->io_offset < ib->io_offset)
		return -1;
	else if (ia->io_offset > ib->io_offset)
		return 1;
	return 0;
367 368
}

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
/* Finish all pending io completions. */
void
xfs_end_io(
	struct work_struct	*work)
{
	struct xfs_inode	*ip;
	struct xfs_ioend	*ioend;
	struct list_head	completion_list;
	unsigned long		flags;

	ip = container_of(work, struct xfs_inode, i_ioend_work);

	spin_lock_irqsave(&ip->i_ioend_lock, flags);
	list_replace_init(&ip->i_ioend_list, &completion_list);
	spin_unlock_irqrestore(&ip->i_ioend_lock, flags);

385 386
	list_sort(NULL, &completion_list, xfs_ioend_compare);

387 388 389 390
	while (!list_empty(&completion_list)) {
		ioend = list_first_entry(&completion_list, struct xfs_ioend,
				io_list);
		list_del_init(&ioend->io_list);
391
		xfs_ioend_try_merge(ioend, &completion_list);
392 393 394 395
		xfs_end_ioend(ioend);
	}
}

396 397 398
STATIC void
xfs_end_bio(
	struct bio		*bio)
399
{
400
	struct xfs_ioend	*ioend = bio->bi_private;
401 402 403
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	struct xfs_mount	*mp = ip->i_mount;
	unsigned long		flags;
404

405
	if (ioend->io_fork == XFS_COW_FORK ||
406 407 408 409 410 411 412 413 414
	    ioend->io_state == XFS_EXT_UNWRITTEN ||
	    ioend->io_append_trans != NULL) {
		spin_lock_irqsave(&ip->i_ioend_lock, flags);
		if (list_empty(&ip->i_ioend_list))
			WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
						 &ip->i_ioend_work));
		list_add_tail(&ioend->io_list, &ip->i_ioend_list);
		spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
	} else
415
		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
416 417
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/*
 * Fast revalidation of the cached writeback mapping. Return true if the current
 * mapping is valid, false otherwise.
 */
static bool
xfs_imap_valid(
	struct xfs_writepage_ctx	*wpc,
	struct xfs_inode		*ip,
	xfs_fileoff_t			offset_fsb)
{
	if (offset_fsb < wpc->imap.br_startoff ||
	    offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
		return false;
	/*
	 * If this is a COW mapping, it is sufficient to check that the mapping
	 * covers the offset. Be careful to check this first because the caller
	 * can revalidate a COW mapping without updating the data seqno.
	 */
436
	if (wpc->fork == XFS_COW_FORK)
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
		return true;

	/*
	 * This is not a COW mapping. Check the sequence number of the data fork
	 * because concurrent changes could have invalidated the extent. Check
	 * the COW fork because concurrent changes since the last time we
	 * checked (and found nothing at this offset) could have added
	 * overlapping blocks.
	 */
	if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
		return false;
	if (xfs_inode_has_cow_data(ip) &&
	    wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
		return false;
	return true;
}

454 455 456 457 458
/*
 * Pass in a dellalloc extent and convert it to real extents, return the real
 * extent that maps offset_fsb in wpc->imap.
 *
 * The current page is held locked so nothing could have removed the block
459 460
 * backing offset_fsb, although it could have moved from the COW to the data
 * fork by another thread.
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
 */
static int
xfs_convert_blocks(
	struct xfs_writepage_ctx *wpc,
	struct xfs_inode	*ip,
	xfs_fileoff_t		offset_fsb)
{
	int			error;

	/*
	 * Attempt to allocate whatever delalloc extent currently backs
	 * offset_fsb and put the result into wpc->imap.  Allocate in a loop
	 * because it may take several attempts to allocate real blocks for a
	 * contiguous delalloc extent if free space is sufficiently fragmented.
	 */
	do {
		error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
				&wpc->imap, wpc->fork == XFS_COW_FORK ?
					&wpc->cow_seq : &wpc->data_seq);
		if (error)
			return error;
	} while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);

	return 0;
}

L
Linus Torvalds 已提交
487 488
STATIC int
xfs_map_blocks(
C
Christoph Hellwig 已提交
489
	struct xfs_writepage_ctx *wpc,
L
Linus Torvalds 已提交
490
	struct inode		*inode,
C
Christoph Hellwig 已提交
491
	loff_t			offset)
L
Linus Torvalds 已提交
492
{
C
Christoph Hellwig 已提交
493 494
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
F
Fabian Frederick 已提交
495
	ssize_t			count = i_blocksize(inode);
496 497
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
498
	xfs_fileoff_t		cow_fsb = NULLFILEOFF;
C
Christoph Hellwig 已提交
499
	struct xfs_bmbt_irec	imap;
500
	struct xfs_iext_cursor	icur;
501
	int			retries = 0;
C
Christoph Hellwig 已提交
502 503
	int			error = 0;

504 505 506
	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

507 508 509 510
	/*
	 * COW fork blocks can overlap data fork blocks even if the blocks
	 * aren't shared.  COW I/O always takes precedent, so we must always
	 * check for overlap on reflink inodes unless the mapping is already a
511 512 513 514 515 516 517 518 519 520
	 * COW one, or the COW fork hasn't changed from the last time we looked
	 * at it.
	 *
	 * It's safe to check the COW fork if_seq here without the ILOCK because
	 * we've indirectly protected against concurrent updates: writeback has
	 * the page locked, which prevents concurrent invalidations by reflink
	 * and directio and prevents concurrent buffered writes to the same
	 * page.  Changes to if_seq always happen under i_lock, which protects
	 * against concurrent updates and provides a memory barrier on the way
	 * out that ensures that we always see the current value.
521
	 */
522
	if (xfs_imap_valid(wpc, ip, offset_fsb))
523 524 525 526 527 528 529 530
		return 0;

	/*
	 * If we don't have a valid map, now it's time to get a new one for this
	 * offset.  This will convert delayed allocations (including COW ones)
	 * into real extents.  If we return without a valid map, it means we
	 * landed in a hole and we skip the block.
	 */
531
retry:
532
	xfs_ilock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
533 534
	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
	       (ip->i_df.if_flags & XFS_IFEXTENTS));
535 536 537 538 539

	/*
	 * Check if this is offset is covered by a COW extents, and if yes use
	 * it directly instead of looking up anything in the data fork.
	 */
540
	if (xfs_inode_has_cow_data(ip) &&
541 542 543
	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
		cow_fsb = imap.br_startoff;
	if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
544
		wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
C
Christoph Hellwig 已提交
545
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
546 547

		wpc->fork = XFS_COW_FORK;
C
Christoph Hellwig 已提交
548 549 550 551
		goto allocate_blocks;
	}

	/*
552 553
	 * No COW extent overlap. Revalidate now that we may have updated
	 * ->cow_seq. If the data mapping is still valid, we're done.
C
Christoph Hellwig 已提交
554
	 */
555
	if (xfs_imap_valid(wpc, ip, offset_fsb)) {
C
Christoph Hellwig 已提交
556 557 558 559 560 561 562 563 564
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return 0;
	}

	/*
	 * If we don't have a valid map, now it's time to get a new one for this
	 * offset.  This will convert delayed allocations (including COW ones)
	 * into real extents.
	 */
565 566
	if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
		imap.br_startoff = end_fsb;	/* fake a hole past EOF */
567
	wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
C
Christoph Hellwig 已提交
568
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
569

570 571
	wpc->fork = XFS_DATA_FORK;

572
	/* landed in a hole or beyond EOF? */
573 574
	if (imap.br_startoff > offset_fsb) {
		imap.br_blockcount = imap.br_startoff - offset_fsb;
C
Christoph Hellwig 已提交
575 576
		imap.br_startoff = offset_fsb;
		imap.br_startblock = HOLESTARTBLOCK;
577
		imap.br_state = XFS_EXT_NORM;
C
Christoph Hellwig 已提交
578
	}
579

580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
	/*
	 * Truncate to the next COW extent if there is one.  This is the only
	 * opportunity to do this because we can skip COW fork lookups for the
	 * subsequent blocks in the mapping; however, the requirement to treat
	 * the COW range separately remains.
	 */
	if (cow_fsb != NULLFILEOFF &&
	    cow_fsb < imap.br_startoff + imap.br_blockcount)
		imap.br_blockcount = cow_fsb - imap.br_startoff;

	/* got a delalloc extent? */
	if (imap.br_startblock != HOLESTARTBLOCK &&
	    isnullstartblock(imap.br_startblock))
		goto allocate_blocks;

C
Christoph Hellwig 已提交
595
	wpc->imap = imap;
596
	trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
C
Christoph Hellwig 已提交
597 598
	return 0;
allocate_blocks:
599
	error = xfs_convert_blocks(wpc, ip, offset_fsb);
600 601 602 603 604 605 606 607 608 609 610
	if (error) {
		/*
		 * If we failed to find the extent in the COW fork we might have
		 * raced with a COW to data fork conversion or truncate.
		 * Restart the lookup to catch the extent in the data fork for
		 * the former case, but prevent additional retries to avoid
		 * looping forever for the latter case.
		 */
		if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
			goto retry;
		ASSERT(error != -EAGAIN);
C
Christoph Hellwig 已提交
611
		return error;
612
	}
613 614 615 616 617 618 619 620 621 622 623 624

	/*
	 * Due to merging the return real extent might be larger than the
	 * original delalloc one.  Trim the return extent to the next COW
	 * boundary again to force a re-lookup.
	 */
	if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
	    cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
		wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;

	ASSERT(wpc->imap.br_startoff <= offset_fsb);
	ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
625
	trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
C
Christoph Hellwig 已提交
626
	return 0;
L
Linus Torvalds 已提交
627 628
}

629
/*
630 631 632 633 634 635
 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
 * it, and we submit that bio. The ioend may be used for multiple bio
 * submissions, so we only want to allocate an append transaction for the ioend
 * once. In the case of multiple bio submission, each bio will take an IO
 * reference to the ioend to ensure that the ioend completion is only done once
 * all bios have been submitted and the ioend is really done.
636
 *
637
 * If @status is non-zero, it means that we have a situation where some part of
638
 * the submission process has failed after we have marked paged for writeback
639 640 641
 * and unlocked them. In this situation, we need to fail the bio and ioend
 * rather than submit it to IO. This typically only happens on a filesystem
 * shutdown.
642
 */
643
STATIC int
644
xfs_submit_ioend(
645
	struct writeback_control *wbc,
646
	struct xfs_ioend	*ioend,
647
	int			status)
648
{
C
Christoph Hellwig 已提交
649 650 651 652 653 654 655 656 657
	unsigned int		nofs_flag;

	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

658
	/* Convert CoW extents to regular */
659
	if (!status && ioend->io_fork == XFS_COW_FORK) {
660 661 662 663
		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
				ioend->io_offset, ioend->io_size);
	}

664 665
	/* Reserve log space if we might write beyond the on-disk inode size. */
	if (!status &&
666 667
	    (ioend->io_fork == XFS_COW_FORK ||
	     ioend->io_state != XFS_EXT_UNWRITTEN) &&
668 669
	    xfs_ioend_is_append(ioend) &&
	    !ioend->io_append_trans)
670
		status = xfs_setfilesize_trans_alloc(ioend);
671

C
Christoph Hellwig 已提交
672 673
	memalloc_nofs_restore(nofs_flag);

674 675
	ioend->io_bio->bi_private = ioend;
	ioend->io_bio->bi_end_io = xfs_end_bio;
676

677 678 679 680 681 682 683
	/*
	 * If we are failing the IO now, just mark the ioend with an
	 * error and finish it. This will run IO completion immediately
	 * as there is only one reference to the ioend at this point in
	 * time.
	 */
	if (status) {
684
		ioend->io_bio->bi_status = errno_to_blk_status(status);
685
		bio_endio(ioend->io_bio);
686 687
		return status;
	}
688

689
	submit_bio(ioend->io_bio);
690
	return 0;
691 692
}

693 694 695
static struct xfs_ioend *
xfs_alloc_ioend(
	struct inode		*inode,
696 697
	int			fork,
	xfs_exntst_t		state,
698
	xfs_off_t		offset,
699
	struct block_device	*bdev,
C
Christoph Hellwig 已提交
700 701
	sector_t		sector,
	struct writeback_control *wbc)
702 703 704
{
	struct xfs_ioend	*ioend;
	struct bio		*bio;
705

706
	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
707 708
	bio_set_dev(bio, bdev);
	bio->bi_iter.bi_sector = sector;
C
Christoph Hellwig 已提交
709 710
	bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
	bio->bi_write_hint = inode->i_write_hint;
711
	wbc_init_bio(wbc, bio);
712 713 714

	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
	INIT_LIST_HEAD(&ioend->io_list);
715 716
	ioend->io_fork = fork;
	ioend->io_state = state;
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
	ioend->io_inode = inode;
	ioend->io_size = 0;
	ioend->io_offset = offset;
	ioend->io_append_trans = NULL;
	ioend->io_bio = bio;
	return ioend;
}

/*
 * Allocate a new bio, and chain the old bio to the new one.
 *
 * Note that we have to do perform the chaining in this unintuitive order
 * so that the bi_private linkage is set up in the right direction for the
 * traversal in xfs_destroy_ioend().
 */
C
Christoph Hellwig 已提交
732
static struct bio *
733
xfs_chain_bio(
C
Christoph Hellwig 已提交
734
	struct bio		*prev)
735 736 737 738
{
	struct bio *new;

	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
739
	bio_copy_dev(new, prev);/* also copies over blkcg information */
C
Christoph Hellwig 已提交
740 741 742 743 744 745 746 747
	new->bi_iter.bi_sector = bio_end_sector(prev);
	new->bi_opf = prev->bi_opf;
	new->bi_write_hint = prev->bi_write_hint;

	bio_chain(prev, new);
	bio_get(prev);		/* for xfs_destroy_ioend */
	submit_bio(prev);
	return new;
748 749 750
}

/*
751 752
 * Test to see if we have an existing ioend structure that we could append to
 * first, otherwise finish off the current ioend and start another.
753 754 755 756
 */
STATIC void
xfs_add_to_ioend(
	struct inode		*inode,
757
	xfs_off_t		offset,
758
	struct page		*page,
759
	struct iomap_page	*iop,
760
	struct xfs_writepage_ctx *wpc,
761
	struct writeback_control *wbc,
762
	struct list_head	*iolist)
763
{
764 765 766 767 768
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
	unsigned		len = i_blocksize(inode);
	unsigned		poff = offset & (PAGE_SIZE - 1);
769
	bool			merged, same_page = false;
770 771 772 773 774
	sector_t		sector;

	sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
		((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);

775 776 777
	if (!wpc->ioend ||
	    wpc->fork != wpc->ioend->io_fork ||
	    wpc->imap.br_state != wpc->ioend->io_state ||
778
	    sector != bio_end_sector(wpc->ioend->io_bio) ||
779
	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
780 781
		if (wpc->ioend)
			list_add(&wpc->ioend->io_list, iolist);
782
		wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
C
Christoph Hellwig 已提交
783
				wpc->imap.br_state, offset, bdev, sector, wbc);
784 785
	}

786 787 788 789 790 791 792
	merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
			&same_page);

	if (iop && !same_page)
		atomic_inc(&iop->write_count);

	if (!merged) {
M
Ming Lei 已提交
793
		if (bio_full(wpc->ioend->io_bio, len))
C
Christoph Hellwig 已提交
794
			wpc->ioend->io_bio = xfs_chain_bio(wpc->ioend->io_bio);
M
Ming Lei 已提交
795
		bio_add_page(wpc->ioend->io_bio, page, len, poff);
796
	}
797

798
	wpc->ioend->io_size += len;
799
	wbc_account_cgroup_owner(wbc, page, len);
800 801
}

802 803 804
STATIC void
xfs_vm_invalidatepage(
	struct page		*page,
805 806
	unsigned int		offset,
	unsigned int		length)
807
{
808 809
	trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
	iomap_invalidatepage(page, offset, length);
810 811 812
}

/*
813 814 815
 * If the page has delalloc blocks on it, we need to punch them out before we
 * invalidate the page.  If we don't, we leave a stale delalloc mapping on the
 * inode that can trip up a later direct I/O read operation on the same region.
816
 *
817 818 819 820 821
 * We prevent this by truncating away the delalloc regions on the page.  Because
 * they are delalloc, we can do this without needing a transaction. Indeed - if
 * we get ENOSPC errors, we have to be able to do this truncation without a
 * transaction as there is no space left for block reservation (typically why we
 * see a ENOSPC in writeback).
822 823 824 825 826 827 828
 */
STATIC void
xfs_aops_discard_page(
	struct page		*page)
{
	struct inode		*inode = page->mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
829
	struct xfs_mount	*mp = ip->i_mount;
830
	loff_t			offset = page_offset(page);
831 832
	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, offset);
	int			error;
833

834
	if (XFS_FORCED_SHUTDOWN(mp))
835 836
		goto out_invalidate;

837
	xfs_alert(mp,
838
		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
839 840
			page, ip->i_ino, offset);

841 842 843 844
	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
			PAGE_SIZE / i_blocksize(inode));
	if (error && !XFS_FORCED_SHUTDOWN(mp))
		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
845
out_invalidate:
846
	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
847 848
}

849 850 851 852
/*
 * We implement an immediate ioend submission policy here to avoid needing to
 * chain multiple ioends and hence nest mempool allocations which can violate
 * forward progress guarantees we need to provide. The current ioend we are
853
 * adding blocks to is cached on the writepage context, and if the new block
854 855 856 857 858 859 860 861 862 863 864
 * does not append to the cached ioend it will create a new ioend and cache that
 * instead.
 *
 * If a new ioend is created and cached, the old ioend is returned and queued
 * locally for submission once the entire page is processed or an error has been
 * detected.  While ioends are submitted immediately after they are completed,
 * batching optimisations are provided by higher level block plugging.
 *
 * At the end of a writeback pass, there will be a cached ioend remaining on the
 * writepage context that the caller will need to submit.
 */
865 866 867
static int
xfs_writepage_map(
	struct xfs_writepage_ctx *wpc,
868
	struct writeback_control *wbc,
869 870
	struct inode		*inode,
	struct page		*page,
871
	uint64_t		end_offset)
872
{
873
	LIST_HEAD(submit_list);
874 875
	struct iomap_page	*iop = to_iomap_page(page);
	unsigned		len = i_blocksize(inode);
876
	struct xfs_ioend	*ioend, *next;
877
	uint64_t		file_offset;	/* file offset of page */
878
	int			error = 0, count = 0, i;
879

880 881
	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
	ASSERT(!iop || atomic_read(&iop->write_count) == 0);
882

883
	/*
884 885 886
	 * Walk through the page to find areas to write back. If we run off the
	 * end of the current map or find the current map invalid, grab a new
	 * one.
887
	 */
888 889 890 891
	for (i = 0, file_offset = page_offset(page);
	     i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
	     i++, file_offset += len) {
		if (iop && !test_bit(i, iop->uptodate))
892 893
			continue;

894 895 896
		error = xfs_map_blocks(wpc, inode, file_offset);
		if (error)
			break;
897
		if (wpc->imap.br_startblock == HOLESTARTBLOCK)
C
Christoph Hellwig 已提交
898
			continue;
899 900
		xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
				 &submit_list);
C
Christoph Hellwig 已提交
901
		count++;
902
	}
903

904
	ASSERT(wpc->ioend || list_empty(&submit_list));
905 906
	ASSERT(PageLocked(page));
	ASSERT(!PageWriteback(page));
907 908

	/*
909 910 911 912 913 914
	 * On error, we have to fail the ioend here because we may have set
	 * pages under writeback, we have to make sure we run IO completion to
	 * mark the error state of the IO appropriately, so we can't cancel the
	 * ioend directly here.  That means we have to mark this page as under
	 * writeback if we included any blocks from it in the ioend chain so
	 * that completion treats it correctly.
915
	 *
916 917
	 * If we didn't include the page in the ioend, the on error we can
	 * simply discard and unlock it as there are no other users of the page
918 919 920
	 * now.  The caller will still need to trigger submission of outstanding
	 * ioends on the writepage context so they are treated correctly on
	 * error.
921
	 */
922 923 924 925 926 927 928 929
	if (unlikely(error)) {
		if (!count) {
			xfs_aops_discard_page(page);
			ClearPageUptodate(page);
			unlock_page(page);
			goto done;
		}

930 931 932 933 934 935 936 937
		/*
		 * If the page was not fully cleaned, we need to ensure that the
		 * higher layers come back to it correctly.  That means we need
		 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
		 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
		 * so another attempt to write this page in this writeback sweep
		 * will be made.
		 */
938
		set_page_writeback_keepwrite(page);
939
	} else {
940 941
		clear_page_dirty_for_io(page);
		set_page_writeback(page);
942
	}
943

944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
	unlock_page(page);

	/*
	 * Preserve the original error if there was one, otherwise catch
	 * submission errors here and propagate into subsequent ioend
	 * submissions.
	 */
	list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
		int error2;

		list_del_init(&ioend->io_list);
		error2 = xfs_submit_ioend(wbc, ioend, error);
		if (error2 && !error)
			error = error2;
	}

	/*
961 962
	 * We can end up here with no error and nothing to write only if we race
	 * with a partial page truncate on a sub-page block sized filesystem.
963 964 965 966
	 */
	if (!count)
		end_page_writeback(page);
done:
967 968 969 970
	mapping_set_error(page->mapping, error);
	return error;
}

L
Linus Torvalds 已提交
971
/*
972 973 974 975 976
 * Write out a dirty page.
 *
 * For delalloc space on the page we need to allocate space and flush it.
 * For unwritten space on the page we need to start the conversion to
 * regular allocated space.
L
Linus Torvalds 已提交
977 978
 */
STATIC int
979
xfs_do_writepage(
980
	struct page		*page,
981 982
	struct writeback_control *wbc,
	void			*data)
L
Linus Torvalds 已提交
983
{
984
	struct xfs_writepage_ctx *wpc = data;
985
	struct inode		*inode = page->mapping->host;
L
Linus Torvalds 已提交
986
	loff_t			offset;
987
	uint64_t              end_offset;
988
	pgoff_t                 end_index;
989

990
	trace_xfs_writepage(inode, page, 0, 0);
991 992 993 994

	/*
	 * Refuse to write the page out if we are called from reclaim context.
	 *
995 996 997
	 * This avoids stack overflows when called from deeply used stacks in
	 * random callers for direct reclaim or memcg reclaim.  We explicitly
	 * allow reclaim from kswapd as the stack usage there is relatively low.
998
	 *
999 1000
	 * This should never happen except in the case of a VM regression so
	 * warn about it.
1001
	 */
1002 1003
	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
			PF_MEMALLOC))
1004
		goto redirty;
L
Linus Torvalds 已提交
1005

1006
	/*
1007 1008
	 * Given that we do not allow direct reclaim to call us, we should
	 * never be called while in a filesystem transaction.
1009
	 */
1010
	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1011
		goto redirty;
1012

1013
	/*
1014 1015
	 * Is this page beyond the end of the file?
	 *
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
	 * The page index is less than the end_index, adjust the end_offset
	 * to the highest offset that this page should represent.
	 * -----------------------------------------------------
	 * |			file mapping	       | <EOF> |
	 * -----------------------------------------------------
	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
	 * ^--------------------------------^----------|--------
	 * |     desired writeback range    |      see else    |
	 * ---------------------------------^------------------|
	 */
1026
	offset = i_size_read(inode);
1027
	end_index = offset >> PAGE_SHIFT;
1028
	if (page->index < end_index)
1029
		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	else {
		/*
		 * Check whether the page to write out is beyond or straddles
		 * i_size or not.
		 * -------------------------------------------------------
		 * |		file mapping		        | <EOF>  |
		 * -------------------------------------------------------
		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
		 * ^--------------------------------^-----------|---------
		 * |				    |      Straddles     |
		 * ---------------------------------^-----------|--------|
		 */
1042
		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1043 1044

		/*
1045 1046 1047 1048
		 * Skip the page if it is fully outside i_size, e.g. due to a
		 * truncate operation that is in progress. We must redirty the
		 * page so that reclaim stops reclaiming it. Otherwise
		 * xfs_vm_releasepage() is called on it and gets confused.
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
		 *
		 * Note that the end_index is unsigned long, it would overflow
		 * if the given offset is greater than 16TB on 32-bit system
		 * and if we do check the page is fully outside i_size or not
		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
		 * will be evaluated to 0.  Hence this page will be redirtied
		 * and be written out repeatedly which would result in an
		 * infinite loop, the user program that perform this operation
		 * will hang.  Instead, we can verify this situation by checking
		 * if the page to write is totally beyond the i_size or if it's
		 * offset is just equal to the EOF.
1060
		 */
1061 1062
		if (page->index > end_index ||
		    (page->index == end_index && offset_into_page == 0))
1063
			goto redirty;
1064 1065 1066 1067 1068

		/*
		 * The page straddles i_size.  It must be zeroed out on each
		 * and every writepage invocation because it may be mmapped.
		 * "A file is mapped in multiples of the page size.  For a file
1069
		 * that is not a multiple of the page size, the remaining
1070 1071 1072
		 * memory is zeroed when mapped, and writes to that region are
		 * not written out to the file."
		 */
1073
		zero_user_segment(page, offset_into_page, PAGE_SIZE);
1074 1075 1076

		/* Adjust the end_offset to the end of file */
		end_offset = offset;
L
Linus Torvalds 已提交
1077 1078
	}

1079
	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
1080

1081
redirty:
1082 1083 1084 1085 1086
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);
	return 0;
}

1087 1088 1089 1090 1091
STATIC int
xfs_vm_writepage(
	struct page		*page,
	struct writeback_control *wbc)
{
1092
	struct xfs_writepage_ctx wpc = { };
1093 1094 1095
	int			ret;

	ret = xfs_do_writepage(page, wbc, &wpc);
1096 1097 1098
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1099 1100
}

1101 1102 1103 1104 1105
STATIC int
xfs_vm_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
1106
	struct xfs_writepage_ctx wpc = { };
1107 1108
	int			ret;

1109
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1110
	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1111 1112 1113
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1114 1115
}

D
Dan Williams 已提交
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
STATIC int
xfs_dax_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
	return dax_writeback_mapping_range(mapping,
			xfs_find_bdev_for_inode(mapping->host), wbc);
}

1126
STATIC int
1127
xfs_vm_releasepage(
1128 1129 1130
	struct page		*page,
	gfp_t			gfp_mask)
{
1131
	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1132
	return iomap_releasepage(page, gfp_mask);
L
Linus Torvalds 已提交
1133 1134 1135
}

STATIC sector_t
1136
xfs_vm_bmap(
L
Linus Torvalds 已提交
1137 1138 1139
	struct address_space	*mapping,
	sector_t		block)
{
C
Christoph Hellwig 已提交
1140
	struct xfs_inode	*ip = XFS_I(mapping->host);
L
Linus Torvalds 已提交
1141

C
Christoph Hellwig 已提交
1142
	trace_xfs_vm_bmap(ip);
1143 1144 1145

	/*
	 * The swap code (ab-)uses ->bmap to get a block mapping and then
1146
	 * bypasses the file system for actual I/O.  We really can't allow
1147
	 * that on reflinks inodes, so we have to skip out here.  And yes,
1148 1149 1150 1151
	 * 0 is the magic code for a bmap error.
	 *
	 * Since we don't pass back blockdev info, we can't return bmap
	 * information for rt files either.
1152
	 */
1153
	if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1154
		return 0;
C
Christoph Hellwig 已提交
1155
	return iomap_bmap(mapping, block, &xfs_iomap_ops);
L
Linus Torvalds 已提交
1156 1157 1158
}

STATIC int
1159
xfs_vm_readpage(
L
Linus Torvalds 已提交
1160 1161 1162
	struct file		*unused,
	struct page		*page)
{
1163
	trace_xfs_vm_readpage(page->mapping->host, 1);
1164
	return iomap_readpage(page, &xfs_iomap_ops);
L
Linus Torvalds 已提交
1165 1166 1167
}

STATIC int
1168
xfs_vm_readpages(
L
Linus Torvalds 已提交
1169 1170 1171 1172 1173
	struct file		*unused,
	struct address_space	*mapping,
	struct list_head	*pages,
	unsigned		nr_pages)
{
1174
	trace_xfs_vm_readpages(mapping->host, nr_pages);
1175
	return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
1176 1177
}

1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
static int
xfs_iomap_swapfile_activate(
	struct swap_info_struct		*sis,
	struct file			*swap_file,
	sector_t			*span)
{
	sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
	return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
}

1188
const struct address_space_operations xfs_address_space_operations = {
1189 1190 1191
	.readpage		= xfs_vm_readpage,
	.readpages		= xfs_vm_readpages,
	.writepage		= xfs_vm_writepage,
1192
	.writepages		= xfs_vm_writepages,
1193
	.set_page_dirty		= iomap_set_page_dirty,
1194 1195
	.releasepage		= xfs_vm_releasepage,
	.invalidatepage		= xfs_vm_invalidatepage,
1196
	.bmap			= xfs_vm_bmap,
D
Dan Williams 已提交
1197
	.direct_IO		= noop_direct_IO,
1198 1199
	.migratepage		= iomap_migrate_page,
	.is_partially_uptodate  = iomap_is_partially_uptodate,
1200
	.error_remove_page	= generic_error_remove_page,
1201
	.swap_activate		= xfs_iomap_swapfile_activate,
L
Linus Torvalds 已提交
1202
};
D
Dan Williams 已提交
1203 1204 1205 1206 1207 1208

const struct address_space_operations xfs_dax_aops = {
	.writepages		= xfs_dax_writepages,
	.direct_IO		= noop_direct_IO,
	.set_page_dirty		= noop_set_page_dirty,
	.invalidatepage		= noop_invalidatepage,
1209
	.swap_activate		= xfs_iomap_swapfile_activate,
D
Dan Williams 已提交
1210
};