xfs_aops.c 34.4 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4
 * Copyright (c) 2016-2018 Christoph Hellwig.
5
 * All Rights Reserved.
L
Linus Torvalds 已提交
6 7
 */
#include "xfs.h"
8
#include "xfs_shared.h"
9 10 11
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
L
Linus Torvalds 已提交
15
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
16
#include "xfs_trace.h"
17
#include "xfs_bmap.h"
D
Dave Chinner 已提交
18
#include "xfs_bmap_util.h"
19
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
20

21 22 23 24
/*
 * structure owned by writepages passed to individual writepage calls
 */
struct xfs_writepage_ctx {
25
	struct iomap		iomap;
26
	unsigned int		data_seq;
27
	unsigned int		cow_seq;
28 29 30
	struct xfs_ioend	*ioend;
};

31
struct block_device *
C
Christoph Hellwig 已提交
32
xfs_find_bdev_for_inode(
C
Christoph Hellwig 已提交
33
	struct inode		*inode)
C
Christoph Hellwig 已提交
34
{
C
Christoph Hellwig 已提交
35
	struct xfs_inode	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
36 37
	struct xfs_mount	*mp = ip->i_mount;

38
	if (XFS_IS_REALTIME_INODE(ip))
C
Christoph Hellwig 已提交
39 40 41 42 43
		return mp->m_rtdev_targp->bt_bdev;
	else
		return mp->m_ddev_targp->bt_bdev;
}

44 45 46 47 48 49 50 51 52 53 54 55 56
struct dax_device *
xfs_find_daxdev_for_inode(
	struct inode		*inode)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;

	if (XFS_IS_REALTIME_INODE(ip))
		return mp->m_rtdev_targp->bt_daxdev;
	else
		return mp->m_ddev_targp->bt_daxdev;
}

57 58 59
static void
xfs_finish_page_writeback(
	struct inode		*inode,
60
	struct bio_vec	*bvec,
61 62
	int			error)
{
63 64
	struct iomap_page	*iop = to_iomap_page(bvec->bv_page);

65 66 67 68 69
	if (error) {
		SetPageError(bvec->bv_page);
		mapping_set_error(inode->i_mapping, -EIO);
	}

70 71
	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
	ASSERT(!iop || atomic_read(&iop->write_count) > 0);
72

73
	if (!iop || atomic_dec_and_test(&iop->write_count))
74
		end_page_writeback(bvec->bv_page);
75 76 77 78 79 80
}

/*
 * We're now finished for good with this ioend structure.  Update the page
 * state, release holds on bios, and finally free up memory.  Do not use the
 * ioend after this.
81
 */
82 83
STATIC void
xfs_destroy_ioend(
84 85
	struct xfs_ioend	*ioend,
	int			error)
86
{
87
	struct inode		*inode = ioend->io_inode;
88 89 90 91
	struct bio		*bio = &ioend->io_inline_bio;
	struct bio		*last = ioend->io_bio, *next;
	u64			start = bio->bi_iter.bi_sector;
	bool			quiet = bio_flagged(bio, BIO_QUIET);
92

93
	for (bio = &ioend->io_inline_bio; bio; bio = next) {
94
		struct bio_vec	*bvec;
95
		struct bvec_iter_all iter_all;
96

97 98 99 100 101 102 103 104
		/*
		 * For the last bio, bi_private points to the ioend, so we
		 * need to explicitly end the iteration here.
		 */
		if (bio == last)
			next = NULL;
		else
			next = bio->bi_private;
C
Christoph Hellwig 已提交
105

106
		/* walk each page on bio, ending page IO on them */
107
		bio_for_each_segment_all(bvec, bio, iter_all)
108
			xfs_finish_page_writeback(inode, bvec, error);
109
		bio_put(bio);
110
	}
111 112 113 114 115

	if (unlikely(error && !quiet)) {
		xfs_err_ratelimited(XFS_I(inode)->i_mount,
			"writeback error on sector %llu", start);
	}
116 117
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
static void
xfs_destroy_ioends(
	struct xfs_ioend	*ioend,
	int			error)
{
	struct list_head	tmp;

	list_replace_init(&ioend->io_list, &tmp);
	xfs_destroy_ioend(ioend, error);
	while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
			io_list))) {
		list_del_init(&ioend->io_list);
		xfs_destroy_ioend(ioend, error);
	}
}

C
Christoph Hellwig 已提交
134 135 136 137 138 139 140 141 142
/*
 * Fast and loose check if this write could update the on-disk inode size.
 */
static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
{
	return ioend->io_offset + ioend->io_size >
		XFS_I(ioend->io_inode)->i_d.di_size;
}

143 144 145 146 147 148 149 150
STATIC int
xfs_setfilesize_trans_alloc(
	struct xfs_ioend	*ioend)
{
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
	struct xfs_trans	*tp;
	int			error;

C
Christoph Hellwig 已提交
151
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
152
	if (error)
153 154
		return error;

155
	ioend->io_private = tp;
156

J
Jan Kara 已提交
157
	/*
158
	 * We may pass freeze protection with a transaction.  So tell lockdep
J
Jan Kara 已提交
159 160
	 * we released it.
	 */
161
	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
162 163 164 165
	/*
	 * We hand off the transaction to the completion thread now, so
	 * clear the flag here.
	 */
166
	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
167 168 169
	return 0;
}

170
/*
171
 * Update on-disk file size now that data has been written to disk.
172
 */
173
STATIC int
174
__xfs_setfilesize(
175 176 177 178
	struct xfs_inode	*ip,
	struct xfs_trans	*tp,
	xfs_off_t		offset,
	size_t			size)
179 180 181
{
	xfs_fsize_t		isize;

182
	xfs_ilock(ip, XFS_ILOCK_EXCL);
183
	isize = xfs_new_eof(ip, offset + size);
184 185
	if (!isize) {
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
186
		xfs_trans_cancel(tp);
187
		return 0;
188 189
	}

190
	trace_xfs_setfilesize(ip, offset, size);
191 192 193 194 195

	ip->i_d.di_size = isize;
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

196
	return xfs_trans_commit(tp);
197 198
}

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
int
xfs_setfilesize(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	size_t			size)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error;

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
	if (error)
		return error;

	return __xfs_setfilesize(ip, tp, offset, size);
}

216 217
STATIC int
xfs_setfilesize_ioend(
218 219
	struct xfs_ioend	*ioend,
	int			error)
220 221
{
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
222
	struct xfs_trans	*tp = ioend->io_private;
223 224 225 226 227 228

	/*
	 * The transaction may have been allocated in the I/O submission thread,
	 * thus we need to mark ourselves as being in a transaction manually.
	 * Similarly for freeze protection.
	 */
229
	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
230
	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
231

232
	/* we abort the update if there was an IO error */
233
	if (error) {
234
		xfs_trans_cancel(tp);
235
		return error;
236 237
	}

238
	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
239 240
}

241
/*
242
 * IO write completion.
243 244
 */
STATIC void
245 246
xfs_end_ioend(
	struct xfs_ioend	*ioend)
247
{
248
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
249 250
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;
C
Christoph Hellwig 已提交
251
	unsigned int		nofs_flag;
252
	int			error;
253

C
Christoph Hellwig 已提交
254 255 256 257 258 259 260
	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

261
	/*
262
	 * Just clean up the in-memory strutures if the fs has been shut down.
263
	 */
264
	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
265
		error = -EIO;
266 267
		goto done;
	}
268

269
	/*
270
	 * Clean up any COW blocks on an I/O error.
271
	 */
272
	error = blk_status_to_errno(ioend->io_bio->bi_status);
273
	if (unlikely(error)) {
274
		if (ioend->io_flags & IOMAP_F_SHARED)
275 276
			xfs_reflink_cancel_cow_range(ip, offset, size, true);
		goto done;
277 278
	}

279
	/*
280
	 * Success: commit the COW or unwritten blocks if needed.
281
	 */
282
	if (ioend->io_flags & IOMAP_F_SHARED)
283
		error = xfs_reflink_end_cow(ip, offset, size);
284
	else if (ioend->io_type == IOMAP_UNWRITTEN)
285
		error = xfs_iomap_write_unwritten(ip, offset, size, false);
286
	else
287
		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
288

289
done:
290
	if (ioend->io_private)
291
		error = xfs_setfilesize_ioend(ioend, error);
292
	xfs_destroy_ioends(ioend, error);
C
Christoph Hellwig 已提交
293
	memalloc_nofs_restore(nofs_flag);
294 295 296 297 298 299 300 301 302 303
}

/*
 * We can merge two adjacent ioends if they have the same set of work to do.
 */
static bool
xfs_ioend_can_merge(
	struct xfs_ioend	*ioend,
	struct xfs_ioend	*next)
{
304
	if (ioend->io_bio->bi_status != next->io_bio->bi_status)
305
		return false;
306 307
	if ((ioend->io_flags & IOMAP_F_SHARED) ^
	    (next->io_flags & IOMAP_F_SHARED))
308
		return false;
309 310
	if ((ioend->io_type == IOMAP_UNWRITTEN) ^
	    (next->io_type == IOMAP_UNWRITTEN))
311 312 313 314 315 316
		return false;
	if (ioend->io_offset + ioend->io_size != next->io_offset)
		return false;
	return true;
}

317 318 319 320 321 322 323
/*
 * If the to be merged ioend has a preallocated transaction for file
 * size updates we need to ensure the ioend it is merged into also
 * has one.  If it already has one we can simply cancel the transaction
 * as it is guaranteed to be clean.
 */
static void
324
xfs_ioend_merge_private(
325 326 327
	struct xfs_ioend	*ioend,
	struct xfs_ioend	*next)
{
328 329 330
	if (!ioend->io_private) {
		ioend->io_private = next->io_private;
		next->io_private = NULL;
331 332 333 334 335
	} else {
		xfs_setfilesize_ioend(next, -ECANCELED);
	}
}

336 337 338 339 340 341
/* Try to merge adjacent completions. */
STATIC void
xfs_ioend_try_merge(
	struct xfs_ioend	*ioend,
	struct list_head	*more_ioends)
{
342 343 344
	struct xfs_ioend	*next;

	INIT_LIST_HEAD(&ioend->io_list);
345

346 347 348
	while ((next = list_first_entry_or_null(more_ioends, struct xfs_ioend,
			io_list))) {
		if (!xfs_ioend_can_merge(ioend, next))
349
			break;
350 351
		list_move_tail(&next->io_list, &ioend->io_list);
		ioend->io_size += next->io_size;
352 353
		if (next->io_private)
			xfs_ioend_merge_private(ioend, next);
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
	}
}

/* list_sort compare function for ioends */
static int
xfs_ioend_compare(
	void			*priv,
	struct list_head	*a,
	struct list_head	*b)
{
	struct xfs_ioend	*ia;
	struct xfs_ioend	*ib;

	ia = container_of(a, struct xfs_ioend, io_list);
	ib = container_of(b, struct xfs_ioend, io_list);
	if (ia->io_offset < ib->io_offset)
		return -1;
	else if (ia->io_offset > ib->io_offset)
		return 1;
	return 0;
374 375
}

376 377 378 379 380 381 382
static void
xfs_sort_ioends(
	struct list_head	*ioend_list)
{
	list_sort(NULL, ioend_list, xfs_ioend_compare);
}

383 384 385 386 387
/* Finish all pending io completions. */
void
xfs_end_io(
	struct work_struct	*work)
{
388 389
	struct xfs_inode	*ip =
		container_of(work, struct xfs_inode, i_ioend_work);
390
	struct xfs_ioend	*ioend;
391
	struct list_head	tmp;
392 393 394
	unsigned long		flags;

	spin_lock_irqsave(&ip->i_ioend_lock, flags);
395
	list_replace_init(&ip->i_ioend_list, &tmp);
396 397
	spin_unlock_irqrestore(&ip->i_ioend_lock, flags);

398 399 400
	xfs_sort_ioends(&tmp);
	while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
			io_list))) {
401
		list_del_init(&ioend->io_list);
402
		xfs_ioend_try_merge(ioend, &tmp);
403 404 405 406
		xfs_end_ioend(ioend);
	}
}

407 408 409 410 411 412 413
static inline bool xfs_ioend_needs_workqueue(struct xfs_ioend *ioend)
{
	return ioend->io_private ||
		ioend->io_type == IOMAP_UNWRITTEN ||
		(ioend->io_flags & IOMAP_F_SHARED);
}

414 415 416
STATIC void
xfs_end_bio(
	struct bio		*bio)
417
{
418
	struct xfs_ioend	*ioend = bio->bi_private;
419 420 421
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	struct xfs_mount	*mp = ip->i_mount;
	unsigned long		flags;
422

423
	if (xfs_ioend_needs_workqueue(ioend)) {
424 425 426 427 428 429 430
		spin_lock_irqsave(&ip->i_ioend_lock, flags);
		if (list_empty(&ip->i_ioend_list))
			WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
						 &ip->i_ioend_work));
		list_add_tail(&ioend->io_list, &ip->i_ioend_list);
		spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
	} else
431
		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
432 433
}

434 435 436 437 438 439 440 441
/*
 * Fast revalidation of the cached writeback mapping. Return true if the current
 * mapping is valid, false otherwise.
 */
static bool
xfs_imap_valid(
	struct xfs_writepage_ctx	*wpc,
	struct xfs_inode		*ip,
442
	loff_t				offset)
443
{
444 445
	if (offset < wpc->iomap.offset ||
	    offset >= wpc->iomap.offset + wpc->iomap.length)
446 447 448 449 450 451
		return false;
	/*
	 * If this is a COW mapping, it is sufficient to check that the mapping
	 * covers the offset. Be careful to check this first because the caller
	 * can revalidate a COW mapping without updating the data seqno.
	 */
452
	if (wpc->iomap.flags & IOMAP_F_SHARED)
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
		return true;

	/*
	 * This is not a COW mapping. Check the sequence number of the data fork
	 * because concurrent changes could have invalidated the extent. Check
	 * the COW fork because concurrent changes since the last time we
	 * checked (and found nothing at this offset) could have added
	 * overlapping blocks.
	 */
	if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
		return false;
	if (xfs_inode_has_cow_data(ip) &&
	    wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
		return false;
	return true;
}

470 471
/*
 * Pass in a dellalloc extent and convert it to real extents, return the real
472
 * extent that maps offset_fsb in wpc->iomap.
473 474
 *
 * The current page is held locked so nothing could have removed the block
475 476
 * backing offset_fsb, although it could have moved from the COW to the data
 * fork by another thread.
477 478 479 480 481
 */
static int
xfs_convert_blocks(
	struct xfs_writepage_ctx *wpc,
	struct xfs_inode	*ip,
482
	int			whichfork,
483
	loff_t			offset)
484 485 486 487
{
	int			error;

	/*
488 489 490 491
	 * Attempt to allocate whatever delalloc extent currently backs offset
	 * and put the result into wpc->iomap.  Allocate in a loop because it
	 * may take several attempts to allocate real blocks for a contiguous
	 * delalloc extent if free space is sufficiently fragmented.
492 493
	 */
	do {
494 495
		error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
				&wpc->iomap, whichfork == XFS_COW_FORK ?
496 497 498
					&wpc->cow_seq : &wpc->data_seq);
		if (error)
			return error;
499
	} while (wpc->iomap.offset + wpc->iomap.length <= offset);
500 501 502 503

	return 0;
}

L
Linus Torvalds 已提交
504 505
STATIC int
xfs_map_blocks(
C
Christoph Hellwig 已提交
506
	struct xfs_writepage_ctx *wpc,
L
Linus Torvalds 已提交
507
	struct inode		*inode,
C
Christoph Hellwig 已提交
508
	loff_t			offset)
L
Linus Torvalds 已提交
509
{
C
Christoph Hellwig 已提交
510 511
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
F
Fabian Frederick 已提交
512
	ssize_t			count = i_blocksize(inode);
513 514
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
515
	xfs_fileoff_t		cow_fsb = NULLFILEOFF;
516
	int			whichfork = XFS_DATA_FORK;
C
Christoph Hellwig 已提交
517
	struct xfs_bmbt_irec	imap;
518
	struct xfs_iext_cursor	icur;
519
	int			retries = 0;
C
Christoph Hellwig 已提交
520 521
	int			error = 0;

522 523 524
	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

525 526 527 528
	/*
	 * COW fork blocks can overlap data fork blocks even if the blocks
	 * aren't shared.  COW I/O always takes precedent, so we must always
	 * check for overlap on reflink inodes unless the mapping is already a
529 530 531 532 533 534 535 536 537 538
	 * COW one, or the COW fork hasn't changed from the last time we looked
	 * at it.
	 *
	 * It's safe to check the COW fork if_seq here without the ILOCK because
	 * we've indirectly protected against concurrent updates: writeback has
	 * the page locked, which prevents concurrent invalidations by reflink
	 * and directio and prevents concurrent buffered writes to the same
	 * page.  Changes to if_seq always happen under i_lock, which protects
	 * against concurrent updates and provides a memory barrier on the way
	 * out that ensures that we always see the current value.
539
	 */
540
	if (xfs_imap_valid(wpc, ip, offset))
541 542 543 544 545 546 547 548
		return 0;

	/*
	 * If we don't have a valid map, now it's time to get a new one for this
	 * offset.  This will convert delayed allocations (including COW ones)
	 * into real extents.  If we return without a valid map, it means we
	 * landed in a hole and we skip the block.
	 */
549
retry:
550
	xfs_ilock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
551 552
	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
	       (ip->i_df.if_flags & XFS_IFEXTENTS));
553 554 555 556 557

	/*
	 * Check if this is offset is covered by a COW extents, and if yes use
	 * it directly instead of looking up anything in the data fork.
	 */
558
	if (xfs_inode_has_cow_data(ip) &&
559 560 561
	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
		cow_fsb = imap.br_startoff;
	if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
562
		wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
C
Christoph Hellwig 已提交
563
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
564

565
		whichfork = XFS_COW_FORK;
C
Christoph Hellwig 已提交
566 567 568 569
		goto allocate_blocks;
	}

	/*
570 571
	 * No COW extent overlap. Revalidate now that we may have updated
	 * ->cow_seq. If the data mapping is still valid, we're done.
C
Christoph Hellwig 已提交
572
	 */
573
	if (xfs_imap_valid(wpc, ip, offset)) {
C
Christoph Hellwig 已提交
574 575 576 577 578 579 580 581 582
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return 0;
	}

	/*
	 * If we don't have a valid map, now it's time to get a new one for this
	 * offset.  This will convert delayed allocations (including COW ones)
	 * into real extents.
	 */
583 584
	if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
		imap.br_startoff = end_fsb;	/* fake a hole past EOF */
585
	wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
C
Christoph Hellwig 已提交
586
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
587

588
	/* landed in a hole or beyond EOF? */
589 590
	if (imap.br_startoff > offset_fsb) {
		imap.br_blockcount = imap.br_startoff - offset_fsb;
C
Christoph Hellwig 已提交
591 592
		imap.br_startoff = offset_fsb;
		imap.br_startblock = HOLESTARTBLOCK;
593
		imap.br_state = XFS_EXT_NORM;
C
Christoph Hellwig 已提交
594
	}
595

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
	/*
	 * Truncate to the next COW extent if there is one.  This is the only
	 * opportunity to do this because we can skip COW fork lookups for the
	 * subsequent blocks in the mapping; however, the requirement to treat
	 * the COW range separately remains.
	 */
	if (cow_fsb != NULLFILEOFF &&
	    cow_fsb < imap.br_startoff + imap.br_blockcount)
		imap.br_blockcount = cow_fsb - imap.br_startoff;

	/* got a delalloc extent? */
	if (imap.br_startblock != HOLESTARTBLOCK &&
	    isnullstartblock(imap.br_startblock))
		goto allocate_blocks;

611
	xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
612
	trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
C
Christoph Hellwig 已提交
613 614
	return 0;
allocate_blocks:
615
	error = xfs_convert_blocks(wpc, ip, whichfork, offset);
616 617 618 619 620 621 622 623
	if (error) {
		/*
		 * If we failed to find the extent in the COW fork we might have
		 * raced with a COW to data fork conversion or truncate.
		 * Restart the lookup to catch the extent in the data fork for
		 * the former case, but prevent additional retries to avoid
		 * looping forever for the latter case.
		 */
624
		if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
625 626
			goto retry;
		ASSERT(error != -EAGAIN);
C
Christoph Hellwig 已提交
627
		return error;
628
	}
629 630 631 632 633 634

	/*
	 * Due to merging the return real extent might be larger than the
	 * original delalloc one.  Trim the return extent to the next COW
	 * boundary again to force a re-lookup.
	 */
635
	if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
636 637 638 639 640
		loff_t		cow_offset = XFS_FSB_TO_B(mp, cow_fsb);

		if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
			wpc->iomap.length = cow_offset - wpc->iomap.offset;
	}
641

642 643
	ASSERT(wpc->iomap.offset <= offset);
	ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
644
	trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
C
Christoph Hellwig 已提交
645
	return 0;
L
Linus Torvalds 已提交
646 647
}

648
/*
649 650 651 652 653 654
 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
 * it, and we submit that bio. The ioend may be used for multiple bio
 * submissions, so we only want to allocate an append transaction for the ioend
 * once. In the case of multiple bio submission, each bio will take an IO
 * reference to the ioend to ensure that the ioend completion is only done once
 * all bios have been submitted and the ioend is really done.
655
 *
656
 * If @status is non-zero, it means that we have a situation where some part of
657
 * the submission process has failed after we have marked paged for writeback
658 659 660
 * and unlocked them. In this situation, we need to fail the bio and ioend
 * rather than submit it to IO. This typically only happens on a filesystem
 * shutdown.
661
 */
662
STATIC int
663
xfs_submit_ioend(
664
	struct writeback_control *wbc,
665
	struct xfs_ioend	*ioend,
666
	int			status)
667
{
C
Christoph Hellwig 已提交
668 669 670 671 672 673 674 675 676
	unsigned int		nofs_flag;

	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

677
	/* Convert CoW extents to regular */
678
	if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
679 680 681 682
		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
				ioend->io_offset, ioend->io_size);
	}

683 684
	/* Reserve log space if we might write beyond the on-disk inode size. */
	if (!status &&
685
	    ((ioend->io_flags & IOMAP_F_SHARED) ||
686
	     ioend->io_type != IOMAP_UNWRITTEN) &&
687
	    xfs_ioend_is_append(ioend) &&
688
	    !ioend->io_private)
689
		status = xfs_setfilesize_trans_alloc(ioend);
690

C
Christoph Hellwig 已提交
691 692
	memalloc_nofs_restore(nofs_flag);

693 694
	ioend->io_bio->bi_private = ioend;
	ioend->io_bio->bi_end_io = xfs_end_bio;
695

696 697 698 699 700 701 702
	/*
	 * If we are failing the IO now, just mark the ioend with an
	 * error and finish it. This will run IO completion immediately
	 * as there is only one reference to the ioend at this point in
	 * time.
	 */
	if (status) {
703
		ioend->io_bio->bi_status = errno_to_blk_status(status);
704
		bio_endio(ioend->io_bio);
705 706
		return status;
	}
707

708
	submit_bio(ioend->io_bio);
709
	return 0;
710 711
}

712 713 714
static struct xfs_ioend *
xfs_alloc_ioend(
	struct inode		*inode,
715
	struct xfs_writepage_ctx *wpc,
716
	xfs_off_t		offset,
C
Christoph Hellwig 已提交
717 718
	sector_t		sector,
	struct writeback_control *wbc)
719 720 721
{
	struct xfs_ioend	*ioend;
	struct bio		*bio;
722

723
	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
724
	bio_set_dev(bio, wpc->iomap.bdev);
725
	bio->bi_iter.bi_sector = sector;
C
Christoph Hellwig 已提交
726 727
	bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
	bio->bi_write_hint = inode->i_write_hint;
728
	wbc_init_bio(wbc, bio);
729 730 731

	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
	INIT_LIST_HEAD(&ioend->io_list);
732
	ioend->io_type = wpc->iomap.type;
733
	ioend->io_flags = wpc->iomap.flags;
734 735 736
	ioend->io_inode = inode;
	ioend->io_size = 0;
	ioend->io_offset = offset;
737
	ioend->io_private = NULL;
738 739 740 741 742 743 744 745 746 747 748
	ioend->io_bio = bio;
	return ioend;
}

/*
 * Allocate a new bio, and chain the old bio to the new one.
 *
 * Note that we have to do perform the chaining in this unintuitive order
 * so that the bi_private linkage is set up in the right direction for the
 * traversal in xfs_destroy_ioend().
 */
C
Christoph Hellwig 已提交
749
static struct bio *
750
xfs_chain_bio(
C
Christoph Hellwig 已提交
751
	struct bio		*prev)
752 753 754 755
{
	struct bio *new;

	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
756
	bio_copy_dev(new, prev);/* also copies over blkcg information */
C
Christoph Hellwig 已提交
757 758 759 760 761 762 763 764
	new->bi_iter.bi_sector = bio_end_sector(prev);
	new->bi_opf = prev->bi_opf;
	new->bi_write_hint = prev->bi_write_hint;

	bio_chain(prev, new);
	bio_get(prev);		/* for xfs_destroy_ioend */
	submit_bio(prev);
	return new;
765 766
}

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
static bool
xfs_can_add_to_ioend(
	struct xfs_writepage_ctx *wpc,
	xfs_off_t		offset,
	sector_t		sector)
{
	if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
	    (wpc->ioend->io_flags & IOMAP_F_SHARED))
		return false;
	if (wpc->iomap.type != wpc->ioend->io_type)
		return false;
	if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
		return false;
	if (sector != bio_end_sector(wpc->ioend->io_bio))
		return false;
	return true;
}

785
/*
786 787
 * Test to see if we have an existing ioend structure that we could append to
 * first, otherwise finish off the current ioend and start another.
788 789 790 791
 */
STATIC void
xfs_add_to_ioend(
	struct inode		*inode,
792
	xfs_off_t		offset,
793
	struct page		*page,
794
	struct iomap_page	*iop,
795
	struct xfs_writepage_ctx *wpc,
796
	struct writeback_control *wbc,
797
	struct list_head	*iolist)
798
{
799
	sector_t		sector = iomap_sector(&wpc->iomap, offset);
800 801
	unsigned		len = i_blocksize(inode);
	unsigned		poff = offset & (PAGE_SIZE - 1);
802
	bool			merged, same_page = false;
803

804
	if (!wpc->ioend || !xfs_can_add_to_ioend(wpc, offset, sector)) {
805 806
		if (wpc->ioend)
			list_add(&wpc->ioend->io_list, iolist);
807
		wpc->ioend = xfs_alloc_ioend(inode, wpc, offset, sector, wbc);
808 809
	}

810 811 812 813 814 815 816
	merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
			&same_page);

	if (iop && !same_page)
		atomic_inc(&iop->write_count);

	if (!merged) {
M
Ming Lei 已提交
817
		if (bio_full(wpc->ioend->io_bio, len))
C
Christoph Hellwig 已提交
818
			wpc->ioend->io_bio = xfs_chain_bio(wpc->ioend->io_bio);
M
Ming Lei 已提交
819
		bio_add_page(wpc->ioend->io_bio, page, len, poff);
820
	}
821

822
	wpc->ioend->io_size += len;
823
	wbc_account_cgroup_owner(wbc, page, len);
824 825
}

826 827 828
STATIC void
xfs_vm_invalidatepage(
	struct page		*page,
829 830
	unsigned int		offset,
	unsigned int		length)
831
{
832 833
	trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
	iomap_invalidatepage(page, offset, length);
834 835 836
}

/*
837 838 839
 * If the page has delalloc blocks on it, we need to punch them out before we
 * invalidate the page.  If we don't, we leave a stale delalloc mapping on the
 * inode that can trip up a later direct I/O read operation on the same region.
840
 *
841 842 843 844 845
 * We prevent this by truncating away the delalloc regions on the page.  Because
 * they are delalloc, we can do this without needing a transaction. Indeed - if
 * we get ENOSPC errors, we have to be able to do this truncation without a
 * transaction as there is no space left for block reservation (typically why we
 * see a ENOSPC in writeback).
846 847 848 849 850 851 852
 */
STATIC void
xfs_aops_discard_page(
	struct page		*page)
{
	struct inode		*inode = page->mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
853
	struct xfs_mount	*mp = ip->i_mount;
854
	loff_t			offset = page_offset(page);
855 856
	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, offset);
	int			error;
857

858
	if (XFS_FORCED_SHUTDOWN(mp))
859 860
		goto out_invalidate;

861
	xfs_alert(mp,
862
		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
863 864
			page, ip->i_ino, offset);

865 866 867 868
	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
			PAGE_SIZE / i_blocksize(inode));
	if (error && !XFS_FORCED_SHUTDOWN(mp))
		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
869
out_invalidate:
870
	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
871 872
}

873 874 875 876
/*
 * We implement an immediate ioend submission policy here to avoid needing to
 * chain multiple ioends and hence nest mempool allocations which can violate
 * forward progress guarantees we need to provide. The current ioend we are
877
 * adding blocks to is cached on the writepage context, and if the new block
878 879 880 881 882 883 884 885 886 887 888
 * does not append to the cached ioend it will create a new ioend and cache that
 * instead.
 *
 * If a new ioend is created and cached, the old ioend is returned and queued
 * locally for submission once the entire page is processed or an error has been
 * detected.  While ioends are submitted immediately after they are completed,
 * batching optimisations are provided by higher level block plugging.
 *
 * At the end of a writeback pass, there will be a cached ioend remaining on the
 * writepage context that the caller will need to submit.
 */
889 890 891
static int
xfs_writepage_map(
	struct xfs_writepage_ctx *wpc,
892
	struct writeback_control *wbc,
893 894
	struct inode		*inode,
	struct page		*page,
895
	uint64_t		end_offset)
896
{
897
	LIST_HEAD(submit_list);
898 899
	struct iomap_page	*iop = to_iomap_page(page);
	unsigned		len = i_blocksize(inode);
900
	struct xfs_ioend	*ioend, *next;
901
	uint64_t		file_offset;	/* file offset of page */
902
	int			error = 0, count = 0, i;
903

904 905
	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
	ASSERT(!iop || atomic_read(&iop->write_count) == 0);
906

907
	/*
908 909 910
	 * Walk through the page to find areas to write back. If we run off the
	 * end of the current map or find the current map invalid, grab a new
	 * one.
911
	 */
912 913 914 915
	for (i = 0, file_offset = page_offset(page);
	     i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
	     i++, file_offset += len) {
		if (iop && !test_bit(i, iop->uptodate))
916 917
			continue;

918 919 920
		error = xfs_map_blocks(wpc, inode, file_offset);
		if (error)
			break;
921
		if (wpc->iomap.type == IOMAP_HOLE)
C
Christoph Hellwig 已提交
922
			continue;
923 924
		xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
				 &submit_list);
C
Christoph Hellwig 已提交
925
		count++;
926
	}
927

928
	ASSERT(wpc->ioend || list_empty(&submit_list));
929 930
	ASSERT(PageLocked(page));
	ASSERT(!PageWriteback(page));
931 932

	/*
933 934 935 936 937 938
	 * On error, we have to fail the ioend here because we may have set
	 * pages under writeback, we have to make sure we run IO completion to
	 * mark the error state of the IO appropriately, so we can't cancel the
	 * ioend directly here.  That means we have to mark this page as under
	 * writeback if we included any blocks from it in the ioend chain so
	 * that completion treats it correctly.
939
	 *
940 941
	 * If we didn't include the page in the ioend, the on error we can
	 * simply discard and unlock it as there are no other users of the page
942 943 944
	 * now.  The caller will still need to trigger submission of outstanding
	 * ioends on the writepage context so they are treated correctly on
	 * error.
945
	 */
946 947 948 949 950 951 952 953
	if (unlikely(error)) {
		if (!count) {
			xfs_aops_discard_page(page);
			ClearPageUptodate(page);
			unlock_page(page);
			goto done;
		}

954 955 956 957 958 959 960 961
		/*
		 * If the page was not fully cleaned, we need to ensure that the
		 * higher layers come back to it correctly.  That means we need
		 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
		 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
		 * so another attempt to write this page in this writeback sweep
		 * will be made.
		 */
962
		set_page_writeback_keepwrite(page);
963
	} else {
964 965
		clear_page_dirty_for_io(page);
		set_page_writeback(page);
966
	}
967

968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
	unlock_page(page);

	/*
	 * Preserve the original error if there was one, otherwise catch
	 * submission errors here and propagate into subsequent ioend
	 * submissions.
	 */
	list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
		int error2;

		list_del_init(&ioend->io_list);
		error2 = xfs_submit_ioend(wbc, ioend, error);
		if (error2 && !error)
			error = error2;
	}

	/*
985 986
	 * We can end up here with no error and nothing to write only if we race
	 * with a partial page truncate on a sub-page block sized filesystem.
987 988 989 990
	 */
	if (!count)
		end_page_writeback(page);
done:
991 992 993 994
	mapping_set_error(page->mapping, error);
	return error;
}

L
Linus Torvalds 已提交
995
/*
996 997 998 999 1000
 * Write out a dirty page.
 *
 * For delalloc space on the page we need to allocate space and flush it.
 * For unwritten space on the page we need to start the conversion to
 * regular allocated space.
L
Linus Torvalds 已提交
1001 1002
 */
STATIC int
1003
xfs_do_writepage(
1004
	struct page		*page,
1005 1006
	struct writeback_control *wbc,
	void			*data)
L
Linus Torvalds 已提交
1007
{
1008
	struct xfs_writepage_ctx *wpc = data;
1009
	struct inode		*inode = page->mapping->host;
L
Linus Torvalds 已提交
1010
	loff_t			offset;
1011
	uint64_t              end_offset;
1012
	pgoff_t                 end_index;
1013

1014
	trace_xfs_writepage(inode, page, 0, 0);
1015 1016 1017 1018

	/*
	 * Refuse to write the page out if we are called from reclaim context.
	 *
1019 1020 1021
	 * This avoids stack overflows when called from deeply used stacks in
	 * random callers for direct reclaim or memcg reclaim.  We explicitly
	 * allow reclaim from kswapd as the stack usage there is relatively low.
1022
	 *
1023 1024
	 * This should never happen except in the case of a VM regression so
	 * warn about it.
1025
	 */
1026 1027
	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
			PF_MEMALLOC))
1028
		goto redirty;
L
Linus Torvalds 已提交
1029

1030
	/*
1031 1032
	 * Given that we do not allow direct reclaim to call us, we should
	 * never be called while in a filesystem transaction.
1033
	 */
1034
	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1035
		goto redirty;
1036

1037
	/*
1038 1039
	 * Is this page beyond the end of the file?
	 *
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	 * The page index is less than the end_index, adjust the end_offset
	 * to the highest offset that this page should represent.
	 * -----------------------------------------------------
	 * |			file mapping	       | <EOF> |
	 * -----------------------------------------------------
	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
	 * ^--------------------------------^----------|--------
	 * |     desired writeback range    |      see else    |
	 * ---------------------------------^------------------|
	 */
1050
	offset = i_size_read(inode);
1051
	end_index = offset >> PAGE_SHIFT;
1052
	if (page->index < end_index)
1053
		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
	else {
		/*
		 * Check whether the page to write out is beyond or straddles
		 * i_size or not.
		 * -------------------------------------------------------
		 * |		file mapping		        | <EOF>  |
		 * -------------------------------------------------------
		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
		 * ^--------------------------------^-----------|---------
		 * |				    |      Straddles     |
		 * ---------------------------------^-----------|--------|
		 */
1066
		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1067 1068

		/*
1069 1070 1071 1072
		 * Skip the page if it is fully outside i_size, e.g. due to a
		 * truncate operation that is in progress. We must redirty the
		 * page so that reclaim stops reclaiming it. Otherwise
		 * xfs_vm_releasepage() is called on it and gets confused.
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
		 *
		 * Note that the end_index is unsigned long, it would overflow
		 * if the given offset is greater than 16TB on 32-bit system
		 * and if we do check the page is fully outside i_size or not
		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
		 * will be evaluated to 0.  Hence this page will be redirtied
		 * and be written out repeatedly which would result in an
		 * infinite loop, the user program that perform this operation
		 * will hang.  Instead, we can verify this situation by checking
		 * if the page to write is totally beyond the i_size or if it's
		 * offset is just equal to the EOF.
1084
		 */
1085 1086
		if (page->index > end_index ||
		    (page->index == end_index && offset_into_page == 0))
1087
			goto redirty;
1088 1089 1090 1091 1092

		/*
		 * The page straddles i_size.  It must be zeroed out on each
		 * and every writepage invocation because it may be mmapped.
		 * "A file is mapped in multiples of the page size.  For a file
1093
		 * that is not a multiple of the page size, the remaining
1094 1095 1096
		 * memory is zeroed when mapped, and writes to that region are
		 * not written out to the file."
		 */
1097
		zero_user_segment(page, offset_into_page, PAGE_SIZE);
1098 1099 1100

		/* Adjust the end_offset to the end of file */
		end_offset = offset;
L
Linus Torvalds 已提交
1101 1102
	}

1103
	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
1104

1105
redirty:
1106 1107 1108 1109 1110
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);
	return 0;
}

1111 1112 1113 1114 1115
STATIC int
xfs_vm_writepage(
	struct page		*page,
	struct writeback_control *wbc)
{
1116
	struct xfs_writepage_ctx wpc = { };
1117 1118 1119
	int			ret;

	ret = xfs_do_writepage(page, wbc, &wpc);
1120 1121 1122
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1123 1124
}

1125 1126 1127 1128 1129
STATIC int
xfs_vm_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
1130
	struct xfs_writepage_ctx wpc = { };
1131 1132
	int			ret;

1133
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1134
	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1135 1136 1137
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1138 1139
}

D
Dan Williams 已提交
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
STATIC int
xfs_dax_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
	return dax_writeback_mapping_range(mapping,
			xfs_find_bdev_for_inode(mapping->host), wbc);
}

1150
STATIC int
1151
xfs_vm_releasepage(
1152 1153 1154
	struct page		*page,
	gfp_t			gfp_mask)
{
1155
	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1156
	return iomap_releasepage(page, gfp_mask);
L
Linus Torvalds 已提交
1157 1158 1159
}

STATIC sector_t
1160
xfs_vm_bmap(
L
Linus Torvalds 已提交
1161 1162 1163
	struct address_space	*mapping,
	sector_t		block)
{
C
Christoph Hellwig 已提交
1164
	struct xfs_inode	*ip = XFS_I(mapping->host);
L
Linus Torvalds 已提交
1165

C
Christoph Hellwig 已提交
1166
	trace_xfs_vm_bmap(ip);
1167 1168 1169

	/*
	 * The swap code (ab-)uses ->bmap to get a block mapping and then
1170
	 * bypasses the file system for actual I/O.  We really can't allow
1171
	 * that on reflinks inodes, so we have to skip out here.  And yes,
1172 1173 1174 1175
	 * 0 is the magic code for a bmap error.
	 *
	 * Since we don't pass back blockdev info, we can't return bmap
	 * information for rt files either.
1176
	 */
1177
	if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1178
		return 0;
C
Christoph Hellwig 已提交
1179
	return iomap_bmap(mapping, block, &xfs_iomap_ops);
L
Linus Torvalds 已提交
1180 1181 1182
}

STATIC int
1183
xfs_vm_readpage(
L
Linus Torvalds 已提交
1184 1185 1186
	struct file		*unused,
	struct page		*page)
{
1187
	trace_xfs_vm_readpage(page->mapping->host, 1);
1188
	return iomap_readpage(page, &xfs_iomap_ops);
L
Linus Torvalds 已提交
1189 1190 1191
}

STATIC int
1192
xfs_vm_readpages(
L
Linus Torvalds 已提交
1193 1194 1195 1196 1197
	struct file		*unused,
	struct address_space	*mapping,
	struct list_head	*pages,
	unsigned		nr_pages)
{
1198
	trace_xfs_vm_readpages(mapping->host, nr_pages);
1199
	return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
1200 1201
}

1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
static int
xfs_iomap_swapfile_activate(
	struct swap_info_struct		*sis,
	struct file			*swap_file,
	sector_t			*span)
{
	sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
	return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
}

1212
const struct address_space_operations xfs_address_space_operations = {
1213 1214 1215
	.readpage		= xfs_vm_readpage,
	.readpages		= xfs_vm_readpages,
	.writepage		= xfs_vm_writepage,
1216
	.writepages		= xfs_vm_writepages,
1217
	.set_page_dirty		= iomap_set_page_dirty,
1218 1219
	.releasepage		= xfs_vm_releasepage,
	.invalidatepage		= xfs_vm_invalidatepage,
1220
	.bmap			= xfs_vm_bmap,
D
Dan Williams 已提交
1221
	.direct_IO		= noop_direct_IO,
1222 1223
	.migratepage		= iomap_migrate_page,
	.is_partially_uptodate  = iomap_is_partially_uptodate,
1224
	.error_remove_page	= generic_error_remove_page,
1225
	.swap_activate		= xfs_iomap_swapfile_activate,
L
Linus Torvalds 已提交
1226
};
D
Dan Williams 已提交
1227 1228 1229 1230 1231 1232

const struct address_space_operations xfs_dax_aops = {
	.writepages		= xfs_dax_writepages,
	.direct_IO		= noop_direct_IO,
	.set_page_dirty		= noop_set_page_dirty,
	.invalidatepage		= noop_invalidatepage,
1233
	.swap_activate		= xfs_iomap_swapfile_activate,
D
Dan Williams 已提交
1234
};