xfs_aops.c 41.2 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3 4
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
5 6
 */
#include "xfs.h"
7
#include "xfs_shared.h"
8 9 10
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
11 12
#include "xfs_mount.h"
#include "xfs_inode.h"
13
#include "xfs_trans.h"
14
#include "xfs_inode_item.h"
15
#include "xfs_alloc.h"
L
Linus Torvalds 已提交
16 17
#include "xfs_error.h"
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
18
#include "xfs_trace.h"
19
#include "xfs_bmap.h"
D
Dave Chinner 已提交
20
#include "xfs_bmap_util.h"
21
#include "xfs_bmap_btree.h"
22
#include "xfs_reflink.h"
23
#include <linux/gfp.h>
L
Linus Torvalds 已提交
24
#include <linux/mpage.h>
25
#include <linux/pagevec.h>
L
Linus Torvalds 已提交
26 27
#include <linux/writeback.h>

28 29 30 31 32 33 34 35 36 37 38
/*
 * structure owned by writepages passed to individual writepage calls
 */
struct xfs_writepage_ctx {
	struct xfs_bmbt_irec    imap;
	bool			imap_valid;
	unsigned int		io_type;
	struct xfs_ioend	*ioend;
	sector_t		last_block;
};

C
Christoph Hellwig 已提交
39
void
40 41 42 43 44 45 46
xfs_count_page_state(
	struct page		*page,
	int			*delalloc,
	int			*unwritten)
{
	struct buffer_head	*bh, *head;

47
	*delalloc = *unwritten = 0;
48 49 50

	bh = head = page_buffers(page);
	do {
51
		if (buffer_unwritten(bh))
52 53 54 55 56 57
			(*unwritten) = 1;
		else if (buffer_delay(bh))
			(*delalloc) = 1;
	} while ((bh = bh->b_this_page) != head);
}

58
struct block_device *
C
Christoph Hellwig 已提交
59
xfs_find_bdev_for_inode(
C
Christoph Hellwig 已提交
60
	struct inode		*inode)
C
Christoph Hellwig 已提交
61
{
C
Christoph Hellwig 已提交
62
	struct xfs_inode	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
63 64
	struct xfs_mount	*mp = ip->i_mount;

65
	if (XFS_IS_REALTIME_INODE(ip))
C
Christoph Hellwig 已提交
66 67 68 69 70
		return mp->m_rtdev_targp->bt_bdev;
	else
		return mp->m_ddev_targp->bt_bdev;
}

71 72 73 74 75 76 77 78 79 80 81 82 83
struct dax_device *
xfs_find_daxdev_for_inode(
	struct inode		*inode)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;

	if (XFS_IS_REALTIME_INODE(ip))
		return mp->m_rtdev_targp->bt_daxdev;
	else
		return mp->m_ddev_targp->bt_daxdev;
}

84
/*
85 86 87
 * We're now finished for good with this page.  Update the page state via the
 * associated buffer_heads, paying attention to the start and end offsets that
 * we need to process on the page.
88
 *
89 90 91 92 93
 * Note that we open code the action in end_buffer_async_write here so that we
 * only have to iterate over the buffers attached to the page once.  This is not
 * only more efficient, but also ensures that we only calls end_page_writeback
 * at the end of the iteration, and thus avoids the pitfall of having the page
 * and buffers potentially freed after every call to end_buffer_async_write.
94 95 96 97 98 99 100
 */
static void
xfs_finish_page_writeback(
	struct inode		*inode,
	struct bio_vec		*bvec,
	int			error)
{
101 102
	struct buffer_head	*head = page_buffers(bvec->bv_page), *bh = head;
	bool			busy = false;
103
	unsigned int		off = 0;
104
	unsigned long		flags;
105 106

	ASSERT(bvec->bv_offset < PAGE_SIZE);
F
Fabian Frederick 已提交
107
	ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
108
	ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
F
Fabian Frederick 已提交
109
	ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
110

111 112
	local_irq_save(flags);
	bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
113
	do {
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
		if (off >= bvec->bv_offset &&
		    off < bvec->bv_offset + bvec->bv_len) {
			ASSERT(buffer_async_write(bh));
			ASSERT(bh->b_end_io == NULL);

			if (error) {
				mark_buffer_write_io_error(bh);
				clear_buffer_uptodate(bh);
				SetPageError(bvec->bv_page);
			} else {
				set_buffer_uptodate(bh);
			}
			clear_buffer_async_write(bh);
			unlock_buffer(bh);
		} else if (buffer_async_write(bh)) {
			ASSERT(buffer_locked(bh));
			busy = true;
		}
		off += bh->b_size;
	} while ((bh = bh->b_this_page) != head);
	bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
	local_irq_restore(flags);

	if (!busy)
		end_page_writeback(bvec->bv_page);
139 140 141 142 143 144
}

/*
 * We're now finished for good with this ioend structure.  Update the page
 * state, release holds on bios, and finally free up memory.  Do not use the
 * ioend after this.
145
 */
146 147
STATIC void
xfs_destroy_ioend(
148 149
	struct xfs_ioend	*ioend,
	int			error)
150
{
151
	struct inode		*inode = ioend->io_inode;
152 153 154 155
	struct bio		*bio = &ioend->io_inline_bio;
	struct bio		*last = ioend->io_bio, *next;
	u64			start = bio->bi_iter.bi_sector;
	bool			quiet = bio_flagged(bio, BIO_QUIET);
156

157
	for (bio = &ioend->io_inline_bio; bio; bio = next) {
158 159 160
		struct bio_vec	*bvec;
		int		i;

161 162 163 164 165 166 167 168
		/*
		 * For the last bio, bi_private points to the ioend, so we
		 * need to explicitly end the iteration here.
		 */
		if (bio == last)
			next = NULL;
		else
			next = bio->bi_private;
C
Christoph Hellwig 已提交
169

170 171 172 173 174
		/* walk each page on bio, ending page IO on them */
		bio_for_each_segment_all(bvec, bio, i)
			xfs_finish_page_writeback(inode, bvec, error);

		bio_put(bio);
175
	}
176 177 178 179 180

	if (unlikely(error && !quiet)) {
		xfs_err_ratelimited(XFS_I(inode)->i_mount,
			"writeback error on sector %llu", start);
	}
181 182
}

C
Christoph Hellwig 已提交
183 184 185 186 187 188 189 190 191
/*
 * Fast and loose check if this write could update the on-disk inode size.
 */
static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
{
	return ioend->io_offset + ioend->io_size >
		XFS_I(ioend->io_inode)->i_d.di_size;
}

192 193 194 195 196 197 198 199
STATIC int
xfs_setfilesize_trans_alloc(
	struct xfs_ioend	*ioend)
{
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
	struct xfs_trans	*tp;
	int			error;

200 201
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0,
				XFS_TRANS_NOFS, &tp);
202
	if (error)
203 204 205 206
		return error;

	ioend->io_append_trans = tp;

J
Jan Kara 已提交
207
	/*
208
	 * We may pass freeze protection with a transaction.  So tell lockdep
J
Jan Kara 已提交
209 210
	 * we released it.
	 */
211
	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
212 213 214 215
	/*
	 * We hand off the transaction to the completion thread now, so
	 * clear the flag here.
	 */
216
	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
217 218 219
	return 0;
}

220
/*
221
 * Update on-disk file size now that data has been written to disk.
222
 */
223
STATIC int
224
__xfs_setfilesize(
225 226 227 228
	struct xfs_inode	*ip,
	struct xfs_trans	*tp,
	xfs_off_t		offset,
	size_t			size)
229 230 231
{
	xfs_fsize_t		isize;

232
	xfs_ilock(ip, XFS_ILOCK_EXCL);
233
	isize = xfs_new_eof(ip, offset + size);
234 235
	if (!isize) {
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
236
		xfs_trans_cancel(tp);
237
		return 0;
238 239
	}

240
	trace_xfs_setfilesize(ip, offset, size);
241 242 243 244 245

	ip->i_d.di_size = isize;
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

246
	return xfs_trans_commit(tp);
247 248
}

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
int
xfs_setfilesize(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	size_t			size)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error;

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
	if (error)
		return error;

	return __xfs_setfilesize(ip, tp, offset, size);
}

266 267
STATIC int
xfs_setfilesize_ioend(
268 269
	struct xfs_ioend	*ioend,
	int			error)
270 271 272 273 274 275 276 277 278
{
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	struct xfs_trans	*tp = ioend->io_append_trans;

	/*
	 * The transaction may have been allocated in the I/O submission thread,
	 * thus we need to mark ourselves as being in a transaction manually.
	 * Similarly for freeze protection.
	 */
279
	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
280
	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
281

282
	/* we abort the update if there was an IO error */
283
	if (error) {
284
		xfs_trans_cancel(tp);
285
		return error;
286 287
	}

288
	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
289 290
}

291
/*
292
 * IO write completion.
293 294
 */
STATIC void
295
xfs_end_io(
296
	struct work_struct *work)
297
{
298 299 300
	struct xfs_ioend	*ioend =
		container_of(work, struct xfs_ioend, io_work);
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
301 302
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;
303
	int			error;
304

305
	/*
306
	 * Just clean up the in-memory strutures if the fs has been shut down.
307
	 */
308
	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
309
		error = -EIO;
310 311
		goto done;
	}
312

313
	/*
314
	 * Clean up any COW blocks on an I/O error.
315
	 */
316
	error = blk_status_to_errno(ioend->io_bio->bi_status);
317 318 319 320 321
	if (unlikely(error)) {
		switch (ioend->io_type) {
		case XFS_IO_COW:
			xfs_reflink_cancel_cow_range(ip, offset, size, true);
			break;
322
		}
323 324

		goto done;
325 326
	}

327
	/*
328
	 * Success:  commit the COW or unwritten blocks if needed.
329
	 */
330 331 332 333 334
	switch (ioend->io_type) {
	case XFS_IO_COW:
		error = xfs_reflink_end_cow(ip, offset, size);
		break;
	case XFS_IO_UNWRITTEN:
335 336
		/* writeback should never update isize */
		error = xfs_iomap_write_unwritten(ip, offset, size, false);
337 338 339 340
		break;
	default:
		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
		break;
341
	}
342

343
done:
344 345
	if (ioend->io_append_trans)
		error = xfs_setfilesize_ioend(ioend, error);
346
	xfs_destroy_ioend(ioend, error);
347 348
}

349 350 351
STATIC void
xfs_end_bio(
	struct bio		*bio)
352
{
353 354
	struct xfs_ioend	*ioend = bio->bi_private;
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
355

356
	if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
357 358 359 360
		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
	else if (ioend->io_append_trans)
		queue_work(mp->m_data_workqueue, &ioend->io_work);
	else
361
		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
362 363
}

L
Linus Torvalds 已提交
364 365 366 367
STATIC int
xfs_map_blocks(
	struct inode		*inode,
	loff_t			offset,
C
Christoph Hellwig 已提交
368
	struct xfs_bmbt_irec	*imap,
369
	int			type)
L
Linus Torvalds 已提交
370
{
C
Christoph Hellwig 已提交
371 372
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
F
Fabian Frederick 已提交
373
	ssize_t			count = i_blocksize(inode);
C
Christoph Hellwig 已提交
374 375 376 377 378 379
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			error = 0;
	int			bmapi_flags = XFS_BMAPI_ENTIRE;
	int			nimaps = 1;

	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
380
		return -EIO;
C
Christoph Hellwig 已提交
381

382 383 384 385 386 387 388 389 390 391 392 393 394
	/*
	 * Truncate can race with writeback since writeback doesn't take the
	 * iolock and truncate decreases the file size before it starts
	 * truncating the pages between new_size and old_size.  Therefore, we
	 * can end up in the situation where writeback gets a CoW fork mapping
	 * but the truncate makes the mapping invalid and we end up in here
	 * trying to get a new mapping.  Bail out here so that we simply never
	 * get a valid mapping and so we drop the write altogether.  The page
	 * truncation will kill the contents anyway.
	 */
	if (type == XFS_IO_COW && offset > i_size_read(inode))
		return 0;

395
	ASSERT(type != XFS_IO_COW);
396
	if (type == XFS_IO_UNWRITTEN)
C
Christoph Hellwig 已提交
397
		bmapi_flags |= XFS_BMAPI_IGSTATE;
C
Christoph Hellwig 已提交
398

399
	xfs_ilock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
400 401
	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
	       (ip->i_df.if_flags & XFS_IFEXTENTS));
D
Dave Chinner 已提交
402
	ASSERT(offset <= mp->m_super->s_maxbytes);
C
Christoph Hellwig 已提交
403

404
	if (offset > mp->m_super->s_maxbytes - count)
D
Dave Chinner 已提交
405
		count = mp->m_super->s_maxbytes - offset;
C
Christoph Hellwig 已提交
406 407
	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
D
Dave Chinner 已提交
408 409
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
				imap, &nimaps, bmapi_flags);
410 411 412 413 414 415 416
	/*
	 * Truncate an overwrite extent if there's a pending CoW
	 * reservation before the end of this extent.  This forces us
	 * to come back to writepage to take care of the CoW.
	 */
	if (nimaps && type == XFS_IO_OVERWRITE)
		xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
C
Christoph Hellwig 已提交
417
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
418

C
Christoph Hellwig 已提交
419
	if (error)
D
Dave Chinner 已提交
420
		return error;
C
Christoph Hellwig 已提交
421

422
	if (type == XFS_IO_DELALLOC &&
C
Christoph Hellwig 已提交
423
	    (!nimaps || isnullstartblock(imap->br_startblock))) {
424 425
		error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
				imap);
C
Christoph Hellwig 已提交
426
		if (!error)
427
			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
D
Dave Chinner 已提交
428
		return error;
C
Christoph Hellwig 已提交
429 430
	}

C
Christoph Hellwig 已提交
431
#ifdef DEBUG
432
	if (type == XFS_IO_UNWRITTEN) {
C
Christoph Hellwig 已提交
433 434 435 436 437 438 439 440
		ASSERT(nimaps);
		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
	}
#endif
	if (nimaps)
		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
	return 0;
L
Linus Torvalds 已提交
441 442
}

443
STATIC bool
444
xfs_imap_valid(
445
	struct inode		*inode,
C
Christoph Hellwig 已提交
446
	struct xfs_bmbt_irec	*imap,
447
	xfs_off_t		offset)
L
Linus Torvalds 已提交
448
{
449
	offset >>= inode->i_blkbits;
450

451 452 453 454 455 456 457 458 459 460 461 462 463
	/*
	 * We have to make sure the cached mapping is within EOF to protect
	 * against eofblocks trimming on file release leaving us with a stale
	 * mapping. Otherwise, a page for a subsequent file extending buffered
	 * write could get picked up by this writeback cycle and written to the
	 * wrong blocks.
	 *
	 * Note that what we really want here is a generic mapping invalidation
	 * mechanism to protect us from arbitrary extent modifying contexts, not
	 * just eofblocks.
	 */
	xfs_trim_extent_eof(imap, XFS_I(inode));

464 465
	return offset >= imap->br_startoff &&
		offset < imap->br_startoff + imap->br_blockcount;
L
Linus Torvalds 已提交
466 467
}

468 469 470 471 472 473 474 475 476
STATIC void
xfs_start_buffer_writeback(
	struct buffer_head	*bh)
{
	ASSERT(buffer_mapped(bh));
	ASSERT(buffer_locked(bh));
	ASSERT(!buffer_delay(bh));
	ASSERT(!buffer_unwritten(bh));

477 478
	bh->b_end_io = NULL;
	set_buffer_async_write(bh);
479 480 481 482 483 484 485
	set_buffer_uptodate(bh);
	clear_buffer_dirty(bh);
}

STATIC void
xfs_start_page_writeback(
	struct page		*page,
486
	int			clear_dirty)
487 488 489
{
	ASSERT(PageLocked(page));
	ASSERT(!PageWriteback(page));
490 491 492 493 494 495 496 497 498

	/*
	 * if the page was not fully cleaned, we need to ensure that the higher
	 * layers come back to it correctly. That means we need to keep the page
	 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
	 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
	 * write this page in this writeback sweep will be made.
	 */
	if (clear_dirty) {
499
		clear_page_dirty_for_io(page);
500 501 502 503
		set_page_writeback(page);
	} else
		set_page_writeback_keepwrite(page);

504 505 506
	unlock_page(page);
}

507
static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
508 509 510 511 512
{
	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
}

/*
513 514 515 516 517 518
 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
 * it, and we submit that bio. The ioend may be used for multiple bio
 * submissions, so we only want to allocate an append transaction for the ioend
 * once. In the case of multiple bio submission, each bio will take an IO
 * reference to the ioend to ensure that the ioend completion is only done once
 * all bios have been submitted and the ioend is really done.
519 520 521
 *
 * If @fail is non-zero, it means that we have a situation where some part of
 * the submission process has failed after we have marked paged for writeback
522 523 524
 * and unlocked them. In this situation, we need to fail the bio and ioend
 * rather than submit it to IO. This typically only happens on a filesystem
 * shutdown.
525
 */
526
STATIC int
527
xfs_submit_ioend(
528
	struct writeback_control *wbc,
529
	struct xfs_ioend	*ioend,
530
	int			status)
531
{
532 533
	/* Convert CoW extents to regular */
	if (!status && ioend->io_type == XFS_IO_COW) {
534 535 536 537 538 539 540 541 542 543
		/*
		 * Yuk. This can do memory allocation, but is not a
		 * transactional operation so everything is done in GFP_KERNEL
		 * context. That can deadlock, because we hold pages in
		 * writeback state and GFP_KERNEL allocations can block on them.
		 * Hence we must operate in nofs conditions here.
		 */
		unsigned nofs_flag;

		nofs_flag = memalloc_nofs_save();
544 545
		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
				ioend->io_offset, ioend->io_size);
546
		memalloc_nofs_restore(nofs_flag);
547 548
	}

549 550
	/* Reserve log space if we might write beyond the on-disk inode size. */
	if (!status &&
551
	    ioend->io_type != XFS_IO_UNWRITTEN &&
552 553
	    xfs_ioend_is_append(ioend) &&
	    !ioend->io_append_trans)
554
		status = xfs_setfilesize_trans_alloc(ioend);
555

556 557
	ioend->io_bio->bi_private = ioend;
	ioend->io_bio->bi_end_io = xfs_end_bio;
J
Jens Axboe 已提交
558
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
559

560 561 562 563 564 565 566
	/*
	 * If we are failing the IO now, just mark the ioend with an
	 * error and finish it. This will run IO completion immediately
	 * as there is only one reference to the ioend at this point in
	 * time.
	 */
	if (status) {
567
		ioend->io_bio->bi_status = errno_to_blk_status(status);
568
		bio_endio(ioend->io_bio);
569 570
		return status;
	}
571

572
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
573
	submit_bio(ioend->io_bio);
574
	return 0;
575 576
}

577 578 579 580 581 582
static void
xfs_init_bio_from_bh(
	struct bio		*bio,
	struct buffer_head	*bh)
{
	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
583
	bio_set_dev(bio, bh->b_bdev);
584
}
585

586 587 588 589 590 591 592 593 594
static struct xfs_ioend *
xfs_alloc_ioend(
	struct inode		*inode,
	unsigned int		type,
	xfs_off_t		offset,
	struct buffer_head	*bh)
{
	struct xfs_ioend	*ioend;
	struct bio		*bio;
595

596
	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	xfs_init_bio_from_bh(bio, bh);

	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
	INIT_LIST_HEAD(&ioend->io_list);
	ioend->io_type = type;
	ioend->io_inode = inode;
	ioend->io_size = 0;
	ioend->io_offset = offset;
	INIT_WORK(&ioend->io_work, xfs_end_io);
	ioend->io_append_trans = NULL;
	ioend->io_bio = bio;
	return ioend;
}

/*
 * Allocate a new bio, and chain the old bio to the new one.
 *
 * Note that we have to do perform the chaining in this unintuitive order
 * so that the bi_private linkage is set up in the right direction for the
 * traversal in xfs_destroy_ioend().
 */
static void
xfs_chain_bio(
	struct xfs_ioend	*ioend,
	struct writeback_control *wbc,
	struct buffer_head	*bh)
{
	struct bio *new;

	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
	xfs_init_bio_from_bh(new, bh);

	bio_chain(ioend->io_bio, new);
	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
J
Jens Axboe 已提交
631
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
632
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
633
	submit_bio(ioend->io_bio);
634
	ioend->io_bio = new;
635 636 637 638 639 640
}

/*
 * Test to see if we've been building up a completion structure for
 * earlier buffers -- if so, we try to append to this ioend if we
 * can, otherwise we finish off any current ioend and start another.
641 642
 * Return the ioend we finished off so that the caller can submit it
 * once it has finished processing the dirty page.
643 644 645 646 647
 */
STATIC void
xfs_add_to_ioend(
	struct inode		*inode,
	struct buffer_head	*bh,
648
	xfs_off_t		offset,
649
	struct xfs_writepage_ctx *wpc,
650
	struct writeback_control *wbc,
651
	struct list_head	*iolist)
652
{
653
	if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
654 655
	    bh->b_blocknr != wpc->last_block + 1 ||
	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
656 657
		if (wpc->ioend)
			list_add(&wpc->ioend->io_list, iolist);
658
		wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
659 660
	}

661 662 663 664 665 666
	/*
	 * If the buffer doesn't fit into the bio we need to allocate a new
	 * one.  This shouldn't happen more than once for a given buffer.
	 */
	while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
		xfs_chain_bio(wpc->ioend, wbc, bh);
667

668 669
	wpc->ioend->io_size += bh->b_size;
	wpc->last_block = bh->b_blocknr;
670
	xfs_start_buffer_writeback(bh);
671 672
}

673 674
STATIC void
xfs_map_buffer(
C
Christoph Hellwig 已提交
675
	struct inode		*inode,
676
	struct buffer_head	*bh,
C
Christoph Hellwig 已提交
677
	struct xfs_bmbt_irec	*imap,
C
Christoph Hellwig 已提交
678
	xfs_off_t		offset)
679 680
{
	sector_t		bn;
681
	struct xfs_mount	*m = XFS_I(inode)->i_mount;
C
Christoph Hellwig 已提交
682 683
	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
684

C
Christoph Hellwig 已提交
685 686
	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
687

688
	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
689
	      ((offset - iomap_offset) >> inode->i_blkbits);
690

C
Christoph Hellwig 已提交
691
	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
692 693 694 695 696

	bh->b_blocknr = bn;
	set_buffer_mapped(bh);
}

L
Linus Torvalds 已提交
697 698
STATIC void
xfs_map_at_offset(
C
Christoph Hellwig 已提交
699
	struct inode		*inode,
L
Linus Torvalds 已提交
700
	struct buffer_head	*bh,
C
Christoph Hellwig 已提交
701
	struct xfs_bmbt_irec	*imap,
C
Christoph Hellwig 已提交
702
	xfs_off_t		offset)
L
Linus Torvalds 已提交
703
{
C
Christoph Hellwig 已提交
704 705
	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
L
Linus Torvalds 已提交
706

C
Christoph Hellwig 已提交
707
	xfs_map_buffer(inode, bh, imap, offset);
L
Linus Torvalds 已提交
708 709
	set_buffer_mapped(bh);
	clear_buffer_delay(bh);
710
	clear_buffer_unwritten(bh);
L
Linus Torvalds 已提交
711 712 713
}

/*
714 715 716 717
 * Test if a given page contains at least one buffer of a given @type.
 * If @check_all_buffers is true, then we walk all the buffers in the page to
 * try to find one of the type passed in. If it is not set, then the caller only
 * needs to check the first buffer on the page for a match.
L
Linus Torvalds 已提交
718
 */
719
STATIC bool
720
xfs_check_page_type(
721
	struct page		*page,
722 723
	unsigned int		type,
	bool			check_all_buffers)
L
Linus Torvalds 已提交
724
{
725 726
	struct buffer_head	*bh;
	struct buffer_head	*head;
L
Linus Torvalds 已提交
727

728 729 730 731 732 733
	if (PageWriteback(page))
		return false;
	if (!page->mapping)
		return false;
	if (!page_has_buffers(page))
		return false;
L
Linus Torvalds 已提交
734

735 736 737 738 739 740
	bh = head = page_buffers(page);
	do {
		if (buffer_unwritten(bh)) {
			if (type == XFS_IO_UNWRITTEN)
				return true;
		} else if (buffer_delay(bh)) {
741
			if (type == XFS_IO_DELALLOC)
742 743
				return true;
		} else if (buffer_dirty(bh) && buffer_mapped(bh)) {
744
			if (type == XFS_IO_OVERWRITE)
745 746
				return true;
		}
L
Linus Torvalds 已提交
747

748 749 750 751
		/* If we are only checking the first buffer, we are done now. */
		if (!check_all_buffers)
			break;
	} while ((bh = bh->b_this_page) != head);
L
Linus Torvalds 已提交
752

753
	return false;
L
Linus Torvalds 已提交
754 755
}

756 757 758
STATIC void
xfs_vm_invalidatepage(
	struct page		*page,
759 760
	unsigned int		offset,
	unsigned int		length)
761
{
762 763
	trace_xfs_invalidatepage(page->mapping->host, page, offset,
				 length);
764 765 766 767 768 769 770 771

	/*
	 * If we are invalidating the entire page, clear the dirty state from it
	 * so that we can check for attempts to release dirty cached pages in
	 * xfs_vm_releasepage().
	 */
	if (offset == 0 && length >= PAGE_SIZE)
		cancel_dirty_page(page);
772
	block_invalidatepage(page, offset, length);
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
}

/*
 * If the page has delalloc buffers on it, we need to punch them out before we
 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
 * is done on that same region - the delalloc extent is returned when none is
 * supposed to be there.
 *
 * We prevent this by truncating away the delalloc regions on the page before
 * invalidating it. Because they are delalloc, we can do this without needing a
 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
 * truncation without a transaction as there is no space left for block
 * reservation (typically why we see a ENOSPC in writeback).
 *
 * This is not a performance critical path, so for now just do the punching a
 * buffer head at a time.
 */
STATIC void
xfs_aops_discard_page(
	struct page		*page)
{
	struct inode		*inode = page->mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct buffer_head	*bh, *head;
	loff_t			offset = page_offset(page);

800
	if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
801 802
		goto out_invalidate;

803 804 805
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		goto out_invalidate;

806
	xfs_alert(ip->i_mount,
807
		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
808 809 810 811 812 813
			page, ip->i_ino, offset);

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	bh = head = page_buffers(page);
	do {
		int		error;
814
		xfs_fileoff_t	start_fsb;
815 816 817 818

		if (!buffer_delay(bh))
			goto next_buffer;

819 820
		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
821 822
		if (error) {
			/* something screwed, just bail */
823
			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
824
				xfs_alert(ip->i_mount,
825
			"page discard unable to remove delalloc mapping.");
826
			}
827 828 829
			break;
		}
next_buffer:
F
Fabian Frederick 已提交
830
		offset += i_blocksize(inode);
831 832 833 834 835

	} while ((bh = bh->b_this_page) != head);

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_invalidate:
836
	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
837 838 839
	return;
}

840 841 842 843 844 845 846 847 848
static int
xfs_map_cow(
	struct xfs_writepage_ctx *wpc,
	struct inode		*inode,
	loff_t			offset,
	unsigned int		*new_type)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_bmbt_irec	imap;
849
	bool			is_cow = false;
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	int			error;

	/*
	 * If we already have a valid COW mapping keep using it.
	 */
	if (wpc->io_type == XFS_IO_COW) {
		wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
		if (wpc->imap_valid) {
			*new_type = XFS_IO_COW;
			return 0;
		}
	}

	/*
	 * Else we need to check if there is a COW mapping at this offset.
	 */
	xfs_ilock(ip, XFS_ILOCK_SHARED);
867
	is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
868 869 870 871 872 873 874 875 876
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!is_cow)
		return 0;

	/*
	 * And if the COW mapping has a delayed extent here we need to
	 * allocate real space for it now.
	 */
877
	if (isnullstartblock(imap.br_startblock)) {
878 879 880 881 882 883 884 885 886 887 888 889
		error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
				&imap);
		if (error)
			return error;
	}

	wpc->io_type = *new_type = XFS_IO_COW;
	wpc->imap_valid = true;
	wpc->imap = imap;
	return 0;
}

890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
/*
 * We implement an immediate ioend submission policy here to avoid needing to
 * chain multiple ioends and hence nest mempool allocations which can violate
 * forward progress guarantees we need to provide. The current ioend we are
 * adding buffers to is cached on the writepage context, and if the new buffer
 * does not append to the cached ioend it will create a new ioend and cache that
 * instead.
 *
 * If a new ioend is created and cached, the old ioend is returned and queued
 * locally for submission once the entire page is processed or an error has been
 * detected.  While ioends are submitted immediately after they are completed,
 * batching optimisations are provided by higher level block plugging.
 *
 * At the end of a writeback pass, there will be a cached ioend remaining on the
 * writepage context that the caller will need to submit.
 */
906 907 908
static int
xfs_writepage_map(
	struct xfs_writepage_ctx *wpc,
909
	struct writeback_control *wbc,
910 911
	struct inode		*inode,
	struct page		*page,
912
	uint64_t		end_offset)
913
{
914 915
	LIST_HEAD(submit_list);
	struct xfs_ioend	*ioend, *next;
916
	struct buffer_head	*bh, *head;
F
Fabian Frederick 已提交
917
	ssize_t			len = i_blocksize(inode);
918
	uint64_t		offset;
919 920
	int			error = 0;
	int			count = 0;
921
	int			uptodate = 1;
922
	unsigned int		new_type;
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942

	bh = head = page_buffers(page);
	offset = page_offset(page);
	do {
		if (offset >= end_offset)
			break;
		if (!buffer_uptodate(bh))
			uptodate = 0;

		/*
		 * set_page_dirty dirties all buffers in a page, independent
		 * of their state.  The dirty state however is entirely
		 * meaningless for holes (!mapped && uptodate), so skip
		 * buffers covering holes here.
		 */
		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
			wpc->imap_valid = false;
			continue;
		}

943 944 945 946 947 948 949
		if (buffer_unwritten(bh))
			new_type = XFS_IO_UNWRITTEN;
		else if (buffer_delay(bh))
			new_type = XFS_IO_DELALLOC;
		else if (buffer_uptodate(bh))
			new_type = XFS_IO_OVERWRITE;
		else {
950 951 952 953 954 955 956 957 958 959 960 961
			if (PageUptodate(page))
				ASSERT(buffer_mapped(bh));
			/*
			 * This buffer is not uptodate and will not be
			 * written to disk.  Ensure that we will put any
			 * subsequent writeable buffers into a new
			 * ioend.
			 */
			wpc->imap_valid = false;
			continue;
		}

962 963 964 965 966 967 968 969 970 971 972
		if (xfs_is_reflink_inode(XFS_I(inode))) {
			error = xfs_map_cow(wpc, inode, offset, &new_type);
			if (error)
				goto out;
		}

		if (wpc->io_type != new_type) {
			wpc->io_type = new_type;
			wpc->imap_valid = false;
		}

973 974 975 976 977 978 979
		if (wpc->imap_valid)
			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
							 offset);
		if (!wpc->imap_valid) {
			error = xfs_map_blocks(inode, offset, &wpc->imap,
					     wpc->io_type);
			if (error)
980
				goto out;
981 982 983 984 985 986 987
			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
							 offset);
		}
		if (wpc->imap_valid) {
			lock_buffer(bh);
			if (wpc->io_type != XFS_IO_OVERWRITE)
				xfs_map_at_offset(inode, bh, &wpc->imap, offset);
988
			xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
989 990 991 992 993 994 995 996
			count++;
		}

	} while (offset += len, ((bh = bh->b_this_page) != head));

	if (uptodate && bh == head)
		SetPageUptodate(page);

997
	ASSERT(wpc->ioend || list_empty(&submit_list));
998

999
out:
1000
	/*
1001 1002 1003 1004 1005 1006 1007 1008 1009
	 * On error, we have to fail the ioend here because we have locked
	 * buffers in the ioend. If we don't do this, we'll deadlock
	 * invalidating the page as that tries to lock the buffers on the page.
	 * Also, because we may have set pages under writeback, we have to make
	 * sure we run IO completion to mark the error state of the IO
	 * appropriately, so we can't cancel the ioend directly here. That means
	 * we have to mark this page as under writeback if we included any
	 * buffers from it in the ioend chain so that completion treats it
	 * correctly.
1010
	 *
1011 1012 1013 1014 1015
	 * If we didn't include the page in the ioend, the on error we can
	 * simply discard and unlock it as there are no other users of the page
	 * or it's buffers right now. The caller will still need to trigger
	 * submission of outstanding ioends on the writepage context so they are
	 * treated correctly on error.
1016
	 */
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
	if (count) {
		xfs_start_page_writeback(page, !error);

		/*
		 * Preserve the original error if there was one, otherwise catch
		 * submission errors here and propagate into subsequent ioend
		 * submissions.
		 */
		list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
			int error2;

			list_del_init(&ioend->io_list);
			error2 = xfs_submit_ioend(wbc, ioend, error);
			if (error2 && !error)
				error = error2;
		}
	} else if (error) {
1034 1035 1036
		xfs_aops_discard_page(page);
		ClearPageUptodate(page);
		unlock_page(page);
1037 1038 1039 1040 1041 1042 1043 1044
	} else {
		/*
		 * We can end up here with no error and nothing to write if we
		 * race with a partial page truncate on a sub-page block sized
		 * filesystem. In that case we need to mark the page clean.
		 */
		xfs_start_page_writeback(page, 1);
		end_page_writeback(page);
1045
	}
1046

1047 1048 1049 1050
	mapping_set_error(page->mapping, error);
	return error;
}

L
Linus Torvalds 已提交
1051
/*
1052 1053 1054 1055 1056 1057
 * Write out a dirty page.
 *
 * For delalloc space on the page we need to allocate space and flush it.
 * For unwritten space on the page we need to start the conversion to
 * regular allocated space.
 * For any other dirty buffer heads on the page we should flush them.
L
Linus Torvalds 已提交
1058 1059
 */
STATIC int
1060
xfs_do_writepage(
1061
	struct page		*page,
1062 1063
	struct writeback_control *wbc,
	void			*data)
L
Linus Torvalds 已提交
1064
{
1065
	struct xfs_writepage_ctx *wpc = data;
1066
	struct inode		*inode = page->mapping->host;
L
Linus Torvalds 已提交
1067
	loff_t			offset;
1068
	uint64_t              end_offset;
1069
	pgoff_t                 end_index;
1070

1071
	trace_xfs_writepage(inode, page, 0, 0);
1072

1073 1074
	ASSERT(page_has_buffers(page));

1075 1076 1077
	/*
	 * Refuse to write the page out if we are called from reclaim context.
	 *
1078 1079 1080
	 * This avoids stack overflows when called from deeply used stacks in
	 * random callers for direct reclaim or memcg reclaim.  We explicitly
	 * allow reclaim from kswapd as the stack usage there is relatively low.
1081
	 *
1082 1083
	 * This should never happen except in the case of a VM regression so
	 * warn about it.
1084
	 */
1085 1086
	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
			PF_MEMALLOC))
1087
		goto redirty;
L
Linus Torvalds 已提交
1088

1089
	/*
1090 1091
	 * Given that we do not allow direct reclaim to call us, we should
	 * never be called while in a filesystem transaction.
1092
	 */
1093
	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1094
		goto redirty;
1095

1096
	/*
1097 1098
	 * Is this page beyond the end of the file?
	 *
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
	 * The page index is less than the end_index, adjust the end_offset
	 * to the highest offset that this page should represent.
	 * -----------------------------------------------------
	 * |			file mapping	       | <EOF> |
	 * -----------------------------------------------------
	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
	 * ^--------------------------------^----------|--------
	 * |     desired writeback range    |      see else    |
	 * ---------------------------------^------------------|
	 */
1109
	offset = i_size_read(inode);
1110
	end_index = offset >> PAGE_SHIFT;
1111
	if (page->index < end_index)
1112
		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
	else {
		/*
		 * Check whether the page to write out is beyond or straddles
		 * i_size or not.
		 * -------------------------------------------------------
		 * |		file mapping		        | <EOF>  |
		 * -------------------------------------------------------
		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
		 * ^--------------------------------^-----------|---------
		 * |				    |      Straddles     |
		 * ---------------------------------^-----------|--------|
		 */
1125
		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1126 1127

		/*
1128 1129 1130 1131
		 * Skip the page if it is fully outside i_size, e.g. due to a
		 * truncate operation that is in progress. We must redirty the
		 * page so that reclaim stops reclaiming it. Otherwise
		 * xfs_vm_releasepage() is called on it and gets confused.
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
		 *
		 * Note that the end_index is unsigned long, it would overflow
		 * if the given offset is greater than 16TB on 32-bit system
		 * and if we do check the page is fully outside i_size or not
		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
		 * will be evaluated to 0.  Hence this page will be redirtied
		 * and be written out repeatedly which would result in an
		 * infinite loop, the user program that perform this operation
		 * will hang.  Instead, we can verify this situation by checking
		 * if the page to write is totally beyond the i_size or if it's
		 * offset is just equal to the EOF.
1143
		 */
1144 1145
		if (page->index > end_index ||
		    (page->index == end_index && offset_into_page == 0))
1146
			goto redirty;
1147 1148 1149 1150 1151

		/*
		 * The page straddles i_size.  It must be zeroed out on each
		 * and every writepage invocation because it may be mmapped.
		 * "A file is mapped in multiples of the page size.  For a file
1152
		 * that is not a multiple of the page size, the remaining
1153 1154 1155
		 * memory is zeroed when mapped, and writes to that region are
		 * not written out to the file."
		 */
1156
		zero_user_segment(page, offset_into_page, PAGE_SIZE);
1157 1158 1159

		/* Adjust the end_offset to the end of file */
		end_offset = offset;
L
Linus Torvalds 已提交
1160 1161
	}

1162
	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
1163

1164
redirty:
1165 1166 1167 1168 1169
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);
	return 0;
}

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
STATIC int
xfs_vm_writepage(
	struct page		*page,
	struct writeback_control *wbc)
{
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

	ret = xfs_do_writepage(page, wbc, &wpc);
1181 1182 1183
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1184 1185
}

1186 1187 1188 1189 1190
STATIC int
xfs_vm_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
1191 1192 1193 1194 1195
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

1196
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1197
	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1198 1199 1200
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1201 1202
}

D
Dan Williams 已提交
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
STATIC int
xfs_dax_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
	return dax_writeback_mapping_range(mapping,
			xfs_find_bdev_for_inode(mapping->host), wbc);
}

1213 1214
/*
 * Called to move a page into cleanable state - and from there
1215
 * to be released. The page should already be clean. We always
1216 1217
 * have buffer heads in this call.
 *
1218
 * Returns 1 if the page is ok to release, 0 otherwise.
1219 1220
 */
STATIC int
1221
xfs_vm_releasepage(
1222 1223 1224
	struct page		*page,
	gfp_t			gfp_mask)
{
1225
	int			delalloc, unwritten;
1226

1227
	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1228

1229 1230 1231 1232
	/*
	 * mm accommodates an old ext3 case where clean pages might not have had
	 * the dirty bit cleared. Thus, it can send actual dirty pages to
	 * ->releasepage() via shrink_active_list(). Conversely,
1233 1234
	 * block_invalidatepage() can send pages that are still marked dirty but
	 * otherwise have invalidated buffers.
1235
	 *
1236
	 * We want to release the latter to avoid unnecessary buildup of the
1237 1238 1239 1240 1241 1242 1243
	 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
	 * that are entirely invalidated and need to be released.  Hence the
	 * only time we should get dirty pages here is through
	 * shrink_active_list() and so we can simply skip those now.
	 *
	 * warn if we've left any lingering delalloc/unwritten buffers on clean
	 * or invalidated pages we are about to release.
1244
	 */
1245 1246 1247
	if (PageDirty(page))
		return 0;

1248
	xfs_count_page_state(page, &delalloc, &unwritten);
1249

1250
	if (WARN_ON_ONCE(delalloc))
1251
		return 0;
1252
	if (WARN_ON_ONCE(unwritten))
1253 1254 1255 1256 1257
		return 0;

	return try_to_free_buffers(page);
}

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
/*
 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
 * is, so that we can avoid repeated get_blocks calls.
 *
 * If the mapping spans EOF, then we have to break the mapping up as the mapping
 * for blocks beyond EOF must be marked new so that sub block regions can be
 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
 * was just allocated or is unwritten, otherwise the callers would overwrite
 * existing data with zeros. Hence we have to split the mapping into a range up
 * to and including EOF, and a second mapping for beyond EOF.
 */
static void
xfs_map_trim_size(
	struct inode		*inode,
	sector_t		iblock,
	struct buffer_head	*bh_result,
	struct xfs_bmbt_irec	*imap,
	xfs_off_t		offset,
	ssize_t			size)
{
	xfs_off_t		mapping_size;

	mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
	mapping_size <<= inode->i_blkbits;

	ASSERT(mapping_size > 0);
	if (mapping_size > size)
		mapping_size = size;
	if (offset < i_size_read(inode) &&
D
Darrick J. Wong 已提交
1287
	    (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
1288 1289
		/* limit mapping to block that spans EOF */
		mapping_size = roundup_64(i_size_read(inode) - offset,
F
Fabian Frederick 已提交
1290
					  i_blocksize(inode));
1291 1292 1293 1294 1295 1296 1297
	}
	if (mapping_size > LONG_MAX)
		mapping_size = LONG_MAX;

	bh_result->b_size = mapping_size;
}

1298
static int
C
Christoph Hellwig 已提交
1299
xfs_get_blocks(
L
Linus Torvalds 已提交
1300 1301 1302
	struct inode		*inode,
	sector_t		iblock,
	struct buffer_head	*bh_result,
C
Christoph Hellwig 已提交
1303
	int			create)
L
Linus Torvalds 已提交
1304
{
C
Christoph Hellwig 已提交
1305 1306 1307 1308 1309
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			error = 0;
	int			lockmode = 0;
C
Christoph Hellwig 已提交
1310
	struct xfs_bmbt_irec	imap;
C
Christoph Hellwig 已提交
1311
	int			nimaps = 1;
1312 1313
	xfs_off_t		offset;
	ssize_t			size;
C
Christoph Hellwig 已提交
1314

C
Christoph Hellwig 已提交
1315
	BUG_ON(create);
1316

C
Christoph Hellwig 已提交
1317
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
1318
		return -EIO;
L
Linus Torvalds 已提交
1319

1320
	offset = (xfs_off_t)iblock << inode->i_blkbits;
F
Fabian Frederick 已提交
1321
	ASSERT(bh_result->b_size >= i_blocksize(inode));
1322
	size = bh_result->b_size;
1323

C
Christoph Hellwig 已提交
1324
	if (offset >= i_size_read(inode))
1325 1326
		return 0;

1327 1328
	/*
	 * Direct I/O is usually done on preallocated files, so try getting
1329
	 * a block mapping without an exclusive lock first.
1330
	 */
1331
	lockmode = xfs_ilock_data_map_shared(ip);
1332

D
Dave Chinner 已提交
1333
	ASSERT(offset <= mp->m_super->s_maxbytes);
1334
	if (offset > mp->m_super->s_maxbytes - size)
D
Dave Chinner 已提交
1335
		size = mp->m_super->s_maxbytes - offset;
C
Christoph Hellwig 已提交
1336 1337 1338
	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
	offset_fsb = XFS_B_TO_FSBT(mp, offset);

1339 1340
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
			&nimaps, 0);
L
Linus Torvalds 已提交
1341
	if (error)
C
Christoph Hellwig 已提交
1342
		goto out_unlock;
1343
	if (!nimaps) {
C
Christoph Hellwig 已提交
1344 1345 1346
		trace_xfs_get_blocks_notfound(ip, offset, size);
		goto out_unlock;
	}
L
Linus Torvalds 已提交
1347

1348 1349 1350 1351 1352
	trace_xfs_get_blocks_found(ip, offset, size,
		imap.br_state == XFS_EXT_UNWRITTEN ?
			XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
	xfs_iunlock(ip, lockmode);

1353
	/* trim mapping down to size requested */
1354
	xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1355

1356 1357 1358 1359
	/*
	 * For unwritten extents do not report a disk address in the buffered
	 * read case (treat as if we're reading into a hole).
	 */
1360
	if (xfs_bmap_is_real_extent(&imap))
1361
		xfs_map_buffer(inode, bh_result, &imap, offset);
L
Linus Torvalds 已提交
1362

1363 1364 1365 1366
	/*
	 * If this is a realtime file, data may be on a different device.
	 * to that pointed to from the buffer_head b_bdev currently.
	 */
C
Christoph Hellwig 已提交
1367
	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
L
Linus Torvalds 已提交
1368
	return 0;
C
Christoph Hellwig 已提交
1369 1370 1371

out_unlock:
	xfs_iunlock(ip, lockmode);
D
Dave Chinner 已提交
1372
	return error;
L
Linus Torvalds 已提交
1373 1374 1375
}

STATIC sector_t
1376
xfs_vm_bmap(
L
Linus Torvalds 已提交
1377 1378 1379
	struct address_space	*mapping,
	sector_t		block)
{
C
Christoph Hellwig 已提交
1380
	struct xfs_inode	*ip = XFS_I(mapping->host);
L
Linus Torvalds 已提交
1381

C
Christoph Hellwig 已提交
1382
	trace_xfs_vm_bmap(ip);
1383 1384 1385

	/*
	 * The swap code (ab-)uses ->bmap to get a block mapping and then
1386
	 * bypasses the file system for actual I/O.  We really can't allow
1387
	 * that on reflinks inodes, so we have to skip out here.  And yes,
1388 1389 1390 1391
	 * 0 is the magic code for a bmap error.
	 *
	 * Since we don't pass back blockdev info, we can't return bmap
	 * information for rt files either.
1392
	 */
1393
	if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1394
		return 0;
C
Christoph Hellwig 已提交
1395
	return iomap_bmap(mapping, block, &xfs_iomap_ops);
L
Linus Torvalds 已提交
1396 1397 1398
}

STATIC int
1399
xfs_vm_readpage(
L
Linus Torvalds 已提交
1400 1401 1402
	struct file		*unused,
	struct page		*page)
{
1403
	trace_xfs_vm_readpage(page->mapping->host, 1);
1404 1405
	if (i_blocksize(page->mapping->host) == PAGE_SIZE)
		return iomap_readpage(page, &xfs_iomap_ops);
1406
	return mpage_readpage(page, xfs_get_blocks);
L
Linus Torvalds 已提交
1407 1408 1409
}

STATIC int
1410
xfs_vm_readpages(
L
Linus Torvalds 已提交
1411 1412 1413 1414 1415
	struct file		*unused,
	struct address_space	*mapping,
	struct list_head	*pages,
	unsigned		nr_pages)
{
1416
	trace_xfs_vm_readpages(mapping->host, nr_pages);
1417 1418
	if (i_blocksize(mapping->host) == PAGE_SIZE)
		return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
1419
	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
L
Linus Torvalds 已提交
1420 1421
}

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
/*
 * This is basically a copy of __set_page_dirty_buffers() with one
 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
 * dirty, we'll never be able to clean them because we don't write buffers
 * beyond EOF, and that means we can't invalidate pages that span EOF
 * that have been marked dirty. Further, the dirty state can leak into
 * the file interior if the file is extended, resulting in all sorts of
 * bad things happening as the state does not match the underlying data.
 *
 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
 * this only exist because of bufferheads and how the generic code manages them.
 */
STATIC int
xfs_vm_set_page_dirty(
	struct page		*page)
{
	struct address_space	*mapping = page->mapping;
	struct inode		*inode = mapping->host;
	loff_t			end_offset;
	loff_t			offset;
	int			newly_dirty;

	if (unlikely(!mapping))
		return !TestSetPageDirty(page);

	end_offset = i_size_read(inode);
	offset = page_offset(page);

	spin_lock(&mapping->private_lock);
	if (page_has_buffers(page)) {
		struct buffer_head *head = page_buffers(page);
		struct buffer_head *bh = head;

		do {
			if (offset < end_offset)
				set_buffer_dirty(bh);
			bh = bh->b_this_page;
F
Fabian Frederick 已提交
1459
			offset += i_blocksize(inode);
1460 1461
		} while (bh != head);
	}
1462
	/*
1463 1464
	 * Lock out page->mem_cgroup migration to keep PageDirty
	 * synchronized with per-memcg dirty page counters.
1465
	 */
J
Johannes Weiner 已提交
1466
	lock_page_memcg(page);
1467 1468 1469
	newly_dirty = !TestSetPageDirty(page);
	spin_unlock(&mapping->private_lock);

M
Matthew Wilcox 已提交
1470 1471
	if (newly_dirty)
		__set_page_dirty(page, mapping, 1);
J
Johannes Weiner 已提交
1472
	unlock_page_memcg(page);
1473 1474
	if (newly_dirty)
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1475 1476 1477
	return newly_dirty;
}

1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
static int
xfs_iomap_swapfile_activate(
	struct swap_info_struct		*sis,
	struct file			*swap_file,
	sector_t			*span)
{
	sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
	return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
}

1488
const struct address_space_operations xfs_address_space_operations = {
1489 1490 1491
	.readpage		= xfs_vm_readpage,
	.readpages		= xfs_vm_readpages,
	.writepage		= xfs_vm_writepage,
1492
	.writepages		= xfs_vm_writepages,
1493
	.set_page_dirty		= xfs_vm_set_page_dirty,
1494 1495
	.releasepage		= xfs_vm_releasepage,
	.invalidatepage		= xfs_vm_invalidatepage,
1496
	.bmap			= xfs_vm_bmap,
D
Dan Williams 已提交
1497
	.direct_IO		= noop_direct_IO,
1498
	.migratepage		= buffer_migrate_page,
1499
	.is_partially_uptodate  = block_is_partially_uptodate,
1500
	.error_remove_page	= generic_error_remove_page,
1501
	.swap_activate		= xfs_iomap_swapfile_activate,
L
Linus Torvalds 已提交
1502
};
D
Dan Williams 已提交
1503 1504 1505 1506 1507 1508

const struct address_space_operations xfs_dax_aops = {
	.writepages		= xfs_dax_writepages,
	.direct_IO		= noop_direct_IO,
	.set_page_dirty		= noop_set_page_dirty,
	.invalidatepage		= noop_invalidatepage,
1509
	.swap_activate		= xfs_iomap_swapfile_activate,
D
Dan Williams 已提交
1510
};