xfs_aops.c 40.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17 18
 */
#include "xfs.h"
19
#include "xfs_shared.h"
20 21 22
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
23 24
#include "xfs_mount.h"
#include "xfs_inode.h"
25
#include "xfs_trans.h"
26
#include "xfs_inode_item.h"
27
#include "xfs_alloc.h"
L
Linus Torvalds 已提交
28 29
#include "xfs_error.h"
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
30
#include "xfs_trace.h"
31
#include "xfs_bmap.h"
D
Dave Chinner 已提交
32
#include "xfs_bmap_util.h"
33
#include "xfs_bmap_btree.h"
34
#include "xfs_reflink.h"
35
#include <linux/gfp.h>
L
Linus Torvalds 已提交
36
#include <linux/mpage.h>
37
#include <linux/pagevec.h>
L
Linus Torvalds 已提交
38 39
#include <linux/writeback.h>

40 41 42 43 44 45 46 47 48 49 50
/*
 * structure owned by writepages passed to individual writepage calls
 */
struct xfs_writepage_ctx {
	struct xfs_bmbt_irec    imap;
	bool			imap_valid;
	unsigned int		io_type;
	struct xfs_ioend	*ioend;
	sector_t		last_block;
};

C
Christoph Hellwig 已提交
51
void
52 53 54 55 56 57 58
xfs_count_page_state(
	struct page		*page,
	int			*delalloc,
	int			*unwritten)
{
	struct buffer_head	*bh, *head;

59
	*delalloc = *unwritten = 0;
60 61 62

	bh = head = page_buffers(page);
	do {
63
		if (buffer_unwritten(bh))
64 65 66 67 68 69
			(*unwritten) = 1;
		else if (buffer_delay(bh))
			(*delalloc) = 1;
	} while ((bh = bh->b_this_page) != head);
}

70
struct block_device *
C
Christoph Hellwig 已提交
71
xfs_find_bdev_for_inode(
C
Christoph Hellwig 已提交
72
	struct inode		*inode)
C
Christoph Hellwig 已提交
73
{
C
Christoph Hellwig 已提交
74
	struct xfs_inode	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
75 76
	struct xfs_mount	*mp = ip->i_mount;

77
	if (XFS_IS_REALTIME_INODE(ip))
C
Christoph Hellwig 已提交
78 79 80 81 82
		return mp->m_rtdev_targp->bt_bdev;
	else
		return mp->m_ddev_targp->bt_bdev;
}

83 84 85 86 87 88 89 90 91 92 93 94 95
struct dax_device *
xfs_find_daxdev_for_inode(
	struct inode		*inode)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;

	if (XFS_IS_REALTIME_INODE(ip))
		return mp->m_rtdev_targp->bt_daxdev;
	else
		return mp->m_ddev_targp->bt_daxdev;
}

96
/*
97 98 99
 * We're now finished for good with this page.  Update the page state via the
 * associated buffer_heads, paying attention to the start and end offsets that
 * we need to process on the page.
100
 *
101 102 103 104 105
 * Note that we open code the action in end_buffer_async_write here so that we
 * only have to iterate over the buffers attached to the page once.  This is not
 * only more efficient, but also ensures that we only calls end_page_writeback
 * at the end of the iteration, and thus avoids the pitfall of having the page
 * and buffers potentially freed after every call to end_buffer_async_write.
106 107 108 109 110 111 112
 */
static void
xfs_finish_page_writeback(
	struct inode		*inode,
	struct bio_vec		*bvec,
	int			error)
{
113 114
	struct buffer_head	*head = page_buffers(bvec->bv_page), *bh = head;
	bool			busy = false;
115
	unsigned int		off = 0;
116
	unsigned long		flags;
117 118

	ASSERT(bvec->bv_offset < PAGE_SIZE);
F
Fabian Frederick 已提交
119
	ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
120
	ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
F
Fabian Frederick 已提交
121
	ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
122

123 124
	local_irq_save(flags);
	bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
125
	do {
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
		if (off >= bvec->bv_offset &&
		    off < bvec->bv_offset + bvec->bv_len) {
			ASSERT(buffer_async_write(bh));
			ASSERT(bh->b_end_io == NULL);

			if (error) {
				mark_buffer_write_io_error(bh);
				clear_buffer_uptodate(bh);
				SetPageError(bvec->bv_page);
			} else {
				set_buffer_uptodate(bh);
			}
			clear_buffer_async_write(bh);
			unlock_buffer(bh);
		} else if (buffer_async_write(bh)) {
			ASSERT(buffer_locked(bh));
			busy = true;
		}
		off += bh->b_size;
	} while ((bh = bh->b_this_page) != head);
	bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
	local_irq_restore(flags);

	if (!busy)
		end_page_writeback(bvec->bv_page);
151 152 153 154 155 156
}

/*
 * We're now finished for good with this ioend structure.  Update the page
 * state, release holds on bios, and finally free up memory.  Do not use the
 * ioend after this.
157
 */
158 159
STATIC void
xfs_destroy_ioend(
160 161
	struct xfs_ioend	*ioend,
	int			error)
162
{
163
	struct inode		*inode = ioend->io_inode;
164 165 166 167
	struct bio		*bio = &ioend->io_inline_bio;
	struct bio		*last = ioend->io_bio, *next;
	u64			start = bio->bi_iter.bi_sector;
	bool			quiet = bio_flagged(bio, BIO_QUIET);
168

169
	for (bio = &ioend->io_inline_bio; bio; bio = next) {
170 171 172
		struct bio_vec	*bvec;
		int		i;

173 174 175 176 177 178 179 180
		/*
		 * For the last bio, bi_private points to the ioend, so we
		 * need to explicitly end the iteration here.
		 */
		if (bio == last)
			next = NULL;
		else
			next = bio->bi_private;
C
Christoph Hellwig 已提交
181

182 183 184 185 186
		/* walk each page on bio, ending page IO on them */
		bio_for_each_segment_all(bvec, bio, i)
			xfs_finish_page_writeback(inode, bvec, error);

		bio_put(bio);
187
	}
188 189 190 191 192

	if (unlikely(error && !quiet)) {
		xfs_err_ratelimited(XFS_I(inode)->i_mount,
			"writeback error on sector %llu", start);
	}
193 194
}

C
Christoph Hellwig 已提交
195 196 197 198 199 200 201 202 203
/*
 * Fast and loose check if this write could update the on-disk inode size.
 */
static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
{
	return ioend->io_offset + ioend->io_size >
		XFS_I(ioend->io_inode)->i_d.di_size;
}

204 205 206 207 208 209 210 211
STATIC int
xfs_setfilesize_trans_alloc(
	struct xfs_ioend	*ioend)
{
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
	struct xfs_trans	*tp;
	int			error;

212 213
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0,
				XFS_TRANS_NOFS, &tp);
214
	if (error)
215 216 217 218
		return error;

	ioend->io_append_trans = tp;

J
Jan Kara 已提交
219
	/*
220
	 * We may pass freeze protection with a transaction.  So tell lockdep
J
Jan Kara 已提交
221 222
	 * we released it.
	 */
223
	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
224 225 226 227
	/*
	 * We hand off the transaction to the completion thread now, so
	 * clear the flag here.
	 */
228
	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
229 230 231
	return 0;
}

232
/*
233
 * Update on-disk file size now that data has been written to disk.
234
 */
235
STATIC int
236
__xfs_setfilesize(
237 238 239 240
	struct xfs_inode	*ip,
	struct xfs_trans	*tp,
	xfs_off_t		offset,
	size_t			size)
241 242 243
{
	xfs_fsize_t		isize;

244
	xfs_ilock(ip, XFS_ILOCK_EXCL);
245
	isize = xfs_new_eof(ip, offset + size);
246 247
	if (!isize) {
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
248
		xfs_trans_cancel(tp);
249
		return 0;
250 251
	}

252
	trace_xfs_setfilesize(ip, offset, size);
253 254 255 256 257

	ip->i_d.di_size = isize;
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

258
	return xfs_trans_commit(tp);
259 260
}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
int
xfs_setfilesize(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	size_t			size)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error;

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
	if (error)
		return error;

	return __xfs_setfilesize(ip, tp, offset, size);
}

278 279
STATIC int
xfs_setfilesize_ioend(
280 281
	struct xfs_ioend	*ioend,
	int			error)
282 283 284 285 286 287 288 289 290
{
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	struct xfs_trans	*tp = ioend->io_append_trans;

	/*
	 * The transaction may have been allocated in the I/O submission thread,
	 * thus we need to mark ourselves as being in a transaction manually.
	 * Similarly for freeze protection.
	 */
291
	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
292
	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
293

294
	/* we abort the update if there was an IO error */
295
	if (error) {
296
		xfs_trans_cancel(tp);
297
		return error;
298 299
	}

300
	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
301 302
}

303
/*
304
 * IO write completion.
305 306
 */
STATIC void
307
xfs_end_io(
308
	struct work_struct *work)
309
{
310 311 312
	struct xfs_ioend	*ioend =
		container_of(work, struct xfs_ioend, io_work);
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
313 314
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;
315
	int			error;
316

317
	/*
318
	 * Just clean up the in-memory strutures if the fs has been shut down.
319
	 */
320
	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
321
		error = -EIO;
322 323
		goto done;
	}
324

325
	/*
326
	 * Clean up any COW blocks on an I/O error.
327
	 */
328
	error = blk_status_to_errno(ioend->io_bio->bi_status);
329 330 331 332 333
	if (unlikely(error)) {
		switch (ioend->io_type) {
		case XFS_IO_COW:
			xfs_reflink_cancel_cow_range(ip, offset, size, true);
			break;
334
		}
335 336

		goto done;
337 338
	}

339
	/*
340
	 * Success:  commit the COW or unwritten blocks if needed.
341
	 */
342 343 344 345 346
	switch (ioend->io_type) {
	case XFS_IO_COW:
		error = xfs_reflink_end_cow(ip, offset, size);
		break;
	case XFS_IO_UNWRITTEN:
347 348
		/* writeback should never update isize */
		error = xfs_iomap_write_unwritten(ip, offset, size, false);
349 350 351 352
		break;
	default:
		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
		break;
353
	}
354

355
done:
356 357
	if (ioend->io_append_trans)
		error = xfs_setfilesize_ioend(ioend, error);
358
	xfs_destroy_ioend(ioend, error);
359 360
}

361 362 363
STATIC void
xfs_end_bio(
	struct bio		*bio)
364
{
365 366
	struct xfs_ioend	*ioend = bio->bi_private;
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
367

368
	if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
369 370 371 372
		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
	else if (ioend->io_append_trans)
		queue_work(mp->m_data_workqueue, &ioend->io_work);
	else
373
		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
374 375
}

L
Linus Torvalds 已提交
376 377 378 379
STATIC int
xfs_map_blocks(
	struct inode		*inode,
	loff_t			offset,
C
Christoph Hellwig 已提交
380
	struct xfs_bmbt_irec	*imap,
381
	int			type)
L
Linus Torvalds 已提交
382
{
C
Christoph Hellwig 已提交
383 384
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
F
Fabian Frederick 已提交
385
	ssize_t			count = i_blocksize(inode);
C
Christoph Hellwig 已提交
386 387 388 389 390 391
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			error = 0;
	int			bmapi_flags = XFS_BMAPI_ENTIRE;
	int			nimaps = 1;

	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
392
		return -EIO;
C
Christoph Hellwig 已提交
393

394 395 396 397 398 399 400 401 402 403 404 405 406
	/*
	 * Truncate can race with writeback since writeback doesn't take the
	 * iolock and truncate decreases the file size before it starts
	 * truncating the pages between new_size and old_size.  Therefore, we
	 * can end up in the situation where writeback gets a CoW fork mapping
	 * but the truncate makes the mapping invalid and we end up in here
	 * trying to get a new mapping.  Bail out here so that we simply never
	 * get a valid mapping and so we drop the write altogether.  The page
	 * truncation will kill the contents anyway.
	 */
	if (type == XFS_IO_COW && offset > i_size_read(inode))
		return 0;

407
	ASSERT(type != XFS_IO_COW);
408
	if (type == XFS_IO_UNWRITTEN)
C
Christoph Hellwig 已提交
409
		bmapi_flags |= XFS_BMAPI_IGSTATE;
C
Christoph Hellwig 已提交
410

411
	xfs_ilock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
412 413
	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
	       (ip->i_df.if_flags & XFS_IFEXTENTS));
D
Dave Chinner 已提交
414
	ASSERT(offset <= mp->m_super->s_maxbytes);
C
Christoph Hellwig 已提交
415

416
	if (offset > mp->m_super->s_maxbytes - count)
D
Dave Chinner 已提交
417
		count = mp->m_super->s_maxbytes - offset;
C
Christoph Hellwig 已提交
418 419
	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
D
Dave Chinner 已提交
420 421
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
				imap, &nimaps, bmapi_flags);
422 423 424 425 426 427 428
	/*
	 * Truncate an overwrite extent if there's a pending CoW
	 * reservation before the end of this extent.  This forces us
	 * to come back to writepage to take care of the CoW.
	 */
	if (nimaps && type == XFS_IO_OVERWRITE)
		xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
C
Christoph Hellwig 已提交
429
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
430

C
Christoph Hellwig 已提交
431
	if (error)
D
Dave Chinner 已提交
432
		return error;
C
Christoph Hellwig 已提交
433

434
	if (type == XFS_IO_DELALLOC &&
C
Christoph Hellwig 已提交
435
	    (!nimaps || isnullstartblock(imap->br_startblock))) {
436 437
		error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
				imap);
C
Christoph Hellwig 已提交
438
		if (!error)
439
			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
D
Dave Chinner 已提交
440
		return error;
C
Christoph Hellwig 已提交
441 442
	}

C
Christoph Hellwig 已提交
443
#ifdef DEBUG
444
	if (type == XFS_IO_UNWRITTEN) {
C
Christoph Hellwig 已提交
445 446 447 448 449 450 451 452
		ASSERT(nimaps);
		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
	}
#endif
	if (nimaps)
		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
	return 0;
L
Linus Torvalds 已提交
453 454
}

455
STATIC bool
456
xfs_imap_valid(
457
	struct inode		*inode,
C
Christoph Hellwig 已提交
458
	struct xfs_bmbt_irec	*imap,
459
	xfs_off_t		offset)
L
Linus Torvalds 已提交
460
{
461
	offset >>= inode->i_blkbits;
462

463 464 465 466 467 468 469 470 471 472 473 474 475
	/*
	 * We have to make sure the cached mapping is within EOF to protect
	 * against eofblocks trimming on file release leaving us with a stale
	 * mapping. Otherwise, a page for a subsequent file extending buffered
	 * write could get picked up by this writeback cycle and written to the
	 * wrong blocks.
	 *
	 * Note that what we really want here is a generic mapping invalidation
	 * mechanism to protect us from arbitrary extent modifying contexts, not
	 * just eofblocks.
	 */
	xfs_trim_extent_eof(imap, XFS_I(inode));

476 477
	return offset >= imap->br_startoff &&
		offset < imap->br_startoff + imap->br_blockcount;
L
Linus Torvalds 已提交
478 479
}

480 481 482 483 484 485 486 487 488
STATIC void
xfs_start_buffer_writeback(
	struct buffer_head	*bh)
{
	ASSERT(buffer_mapped(bh));
	ASSERT(buffer_locked(bh));
	ASSERT(!buffer_delay(bh));
	ASSERT(!buffer_unwritten(bh));

489 490
	bh->b_end_io = NULL;
	set_buffer_async_write(bh);
491 492 493 494 495 496 497
	set_buffer_uptodate(bh);
	clear_buffer_dirty(bh);
}

STATIC void
xfs_start_page_writeback(
	struct page		*page,
498
	int			clear_dirty)
499 500 501
{
	ASSERT(PageLocked(page));
	ASSERT(!PageWriteback(page));
502 503 504 505 506 507 508 509 510

	/*
	 * if the page was not fully cleaned, we need to ensure that the higher
	 * layers come back to it correctly. That means we need to keep the page
	 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
	 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
	 * write this page in this writeback sweep will be made.
	 */
	if (clear_dirty) {
511
		clear_page_dirty_for_io(page);
512 513 514 515
		set_page_writeback(page);
	} else
		set_page_writeback_keepwrite(page);

516 517 518
	unlock_page(page);
}

519
static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
520 521 522 523 524
{
	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
}

/*
525 526 527 528 529 530
 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
 * it, and we submit that bio. The ioend may be used for multiple bio
 * submissions, so we only want to allocate an append transaction for the ioend
 * once. In the case of multiple bio submission, each bio will take an IO
 * reference to the ioend to ensure that the ioend completion is only done once
 * all bios have been submitted and the ioend is really done.
531 532 533
 *
 * If @fail is non-zero, it means that we have a situation where some part of
 * the submission process has failed after we have marked paged for writeback
534 535 536
 * and unlocked them. In this situation, we need to fail the bio and ioend
 * rather than submit it to IO. This typically only happens on a filesystem
 * shutdown.
537
 */
538
STATIC int
539
xfs_submit_ioend(
540
	struct writeback_control *wbc,
541
	struct xfs_ioend	*ioend,
542
	int			status)
543
{
544 545 546 547 548 549
	/* Convert CoW extents to regular */
	if (!status && ioend->io_type == XFS_IO_COW) {
		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
				ioend->io_offset, ioend->io_size);
	}

550 551
	/* Reserve log space if we might write beyond the on-disk inode size. */
	if (!status &&
552
	    ioend->io_type != XFS_IO_UNWRITTEN &&
553 554
	    xfs_ioend_is_append(ioend) &&
	    !ioend->io_append_trans)
555
		status = xfs_setfilesize_trans_alloc(ioend);
556

557 558
	ioend->io_bio->bi_private = ioend;
	ioend->io_bio->bi_end_io = xfs_end_bio;
J
Jens Axboe 已提交
559
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
560

561 562 563 564 565 566 567
	/*
	 * If we are failing the IO now, just mark the ioend with an
	 * error and finish it. This will run IO completion immediately
	 * as there is only one reference to the ioend at this point in
	 * time.
	 */
	if (status) {
568
		ioend->io_bio->bi_status = errno_to_blk_status(status);
569
		bio_endio(ioend->io_bio);
570 571
		return status;
	}
572

573
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
574
	submit_bio(ioend->io_bio);
575
	return 0;
576 577
}

578 579 580 581 582 583
static void
xfs_init_bio_from_bh(
	struct bio		*bio,
	struct buffer_head	*bh)
{
	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
584
	bio_set_dev(bio, bh->b_bdev);
585
}
586

587 588 589 590 591 592 593 594 595
static struct xfs_ioend *
xfs_alloc_ioend(
	struct inode		*inode,
	unsigned int		type,
	xfs_off_t		offset,
	struct buffer_head	*bh)
{
	struct xfs_ioend	*ioend;
	struct bio		*bio;
596

597
	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
	xfs_init_bio_from_bh(bio, bh);

	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
	INIT_LIST_HEAD(&ioend->io_list);
	ioend->io_type = type;
	ioend->io_inode = inode;
	ioend->io_size = 0;
	ioend->io_offset = offset;
	INIT_WORK(&ioend->io_work, xfs_end_io);
	ioend->io_append_trans = NULL;
	ioend->io_bio = bio;
	return ioend;
}

/*
 * Allocate a new bio, and chain the old bio to the new one.
 *
 * Note that we have to do perform the chaining in this unintuitive order
 * so that the bi_private linkage is set up in the right direction for the
 * traversal in xfs_destroy_ioend().
 */
static void
xfs_chain_bio(
	struct xfs_ioend	*ioend,
	struct writeback_control *wbc,
	struct buffer_head	*bh)
{
	struct bio *new;

	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
	xfs_init_bio_from_bh(new, bh);

	bio_chain(ioend->io_bio, new);
	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
J
Jens Axboe 已提交
632
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
633
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
634
	submit_bio(ioend->io_bio);
635
	ioend->io_bio = new;
636 637 638 639 640 641
}

/*
 * Test to see if we've been building up a completion structure for
 * earlier buffers -- if so, we try to append to this ioend if we
 * can, otherwise we finish off any current ioend and start another.
642 643
 * Return the ioend we finished off so that the caller can submit it
 * once it has finished processing the dirty page.
644 645 646 647 648
 */
STATIC void
xfs_add_to_ioend(
	struct inode		*inode,
	struct buffer_head	*bh,
649
	xfs_off_t		offset,
650
	struct xfs_writepage_ctx *wpc,
651
	struct writeback_control *wbc,
652
	struct list_head	*iolist)
653
{
654
	if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
655 656
	    bh->b_blocknr != wpc->last_block + 1 ||
	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
657 658
		if (wpc->ioend)
			list_add(&wpc->ioend->io_list, iolist);
659
		wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
660 661
	}

662 663 664 665 666 667
	/*
	 * If the buffer doesn't fit into the bio we need to allocate a new
	 * one.  This shouldn't happen more than once for a given buffer.
	 */
	while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
		xfs_chain_bio(wpc->ioend, wbc, bh);
668

669 670
	wpc->ioend->io_size += bh->b_size;
	wpc->last_block = bh->b_blocknr;
671
	xfs_start_buffer_writeback(bh);
672 673
}

674 675
STATIC void
xfs_map_buffer(
C
Christoph Hellwig 已提交
676
	struct inode		*inode,
677
	struct buffer_head	*bh,
C
Christoph Hellwig 已提交
678
	struct xfs_bmbt_irec	*imap,
C
Christoph Hellwig 已提交
679
	xfs_off_t		offset)
680 681
{
	sector_t		bn;
682
	struct xfs_mount	*m = XFS_I(inode)->i_mount;
C
Christoph Hellwig 已提交
683 684
	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
685

C
Christoph Hellwig 已提交
686 687
	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
688

689
	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
690
	      ((offset - iomap_offset) >> inode->i_blkbits);
691

C
Christoph Hellwig 已提交
692
	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
693 694 695 696 697

	bh->b_blocknr = bn;
	set_buffer_mapped(bh);
}

L
Linus Torvalds 已提交
698 699
STATIC void
xfs_map_at_offset(
C
Christoph Hellwig 已提交
700
	struct inode		*inode,
L
Linus Torvalds 已提交
701
	struct buffer_head	*bh,
C
Christoph Hellwig 已提交
702
	struct xfs_bmbt_irec	*imap,
C
Christoph Hellwig 已提交
703
	xfs_off_t		offset)
L
Linus Torvalds 已提交
704
{
C
Christoph Hellwig 已提交
705 706
	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
L
Linus Torvalds 已提交
707

C
Christoph Hellwig 已提交
708
	xfs_map_buffer(inode, bh, imap, offset);
L
Linus Torvalds 已提交
709 710
	set_buffer_mapped(bh);
	clear_buffer_delay(bh);
711
	clear_buffer_unwritten(bh);
L
Linus Torvalds 已提交
712 713 714
}

/*
715 716 717 718
 * Test if a given page contains at least one buffer of a given @type.
 * If @check_all_buffers is true, then we walk all the buffers in the page to
 * try to find one of the type passed in. If it is not set, then the caller only
 * needs to check the first buffer on the page for a match.
L
Linus Torvalds 已提交
719
 */
720
STATIC bool
721
xfs_check_page_type(
722
	struct page		*page,
723 724
	unsigned int		type,
	bool			check_all_buffers)
L
Linus Torvalds 已提交
725
{
726 727
	struct buffer_head	*bh;
	struct buffer_head	*head;
L
Linus Torvalds 已提交
728

729 730 731 732 733 734
	if (PageWriteback(page))
		return false;
	if (!page->mapping)
		return false;
	if (!page_has_buffers(page))
		return false;
L
Linus Torvalds 已提交
735

736 737 738 739 740 741
	bh = head = page_buffers(page);
	do {
		if (buffer_unwritten(bh)) {
			if (type == XFS_IO_UNWRITTEN)
				return true;
		} else if (buffer_delay(bh)) {
742
			if (type == XFS_IO_DELALLOC)
743 744
				return true;
		} else if (buffer_dirty(bh) && buffer_mapped(bh)) {
745
			if (type == XFS_IO_OVERWRITE)
746 747
				return true;
		}
L
Linus Torvalds 已提交
748

749 750 751 752
		/* If we are only checking the first buffer, we are done now. */
		if (!check_all_buffers)
			break;
	} while ((bh = bh->b_this_page) != head);
L
Linus Torvalds 已提交
753

754
	return false;
L
Linus Torvalds 已提交
755 756
}

757 758 759
STATIC void
xfs_vm_invalidatepage(
	struct page		*page,
760 761
	unsigned int		offset,
	unsigned int		length)
762
{
763 764
	trace_xfs_invalidatepage(page->mapping->host, page, offset,
				 length);
765 766 767 768 769 770 771 772

	/*
	 * If we are invalidating the entire page, clear the dirty state from it
	 * so that we can check for attempts to release dirty cached pages in
	 * xfs_vm_releasepage().
	 */
	if (offset == 0 && length >= PAGE_SIZE)
		cancel_dirty_page(page);
773
	block_invalidatepage(page, offset, length);
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
}

/*
 * If the page has delalloc buffers on it, we need to punch them out before we
 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
 * is done on that same region - the delalloc extent is returned when none is
 * supposed to be there.
 *
 * We prevent this by truncating away the delalloc regions on the page before
 * invalidating it. Because they are delalloc, we can do this without needing a
 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
 * truncation without a transaction as there is no space left for block
 * reservation (typically why we see a ENOSPC in writeback).
 *
 * This is not a performance critical path, so for now just do the punching a
 * buffer head at a time.
 */
STATIC void
xfs_aops_discard_page(
	struct page		*page)
{
	struct inode		*inode = page->mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct buffer_head	*bh, *head;
	loff_t			offset = page_offset(page);

801
	if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
802 803
		goto out_invalidate;

804 805 806
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		goto out_invalidate;

807
	xfs_alert(ip->i_mount,
808
		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
809 810 811 812 813 814
			page, ip->i_ino, offset);

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	bh = head = page_buffers(page);
	do {
		int		error;
815
		xfs_fileoff_t	start_fsb;
816 817 818 819

		if (!buffer_delay(bh))
			goto next_buffer;

820 821
		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
822 823
		if (error) {
			/* something screwed, just bail */
824
			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
825
				xfs_alert(ip->i_mount,
826
			"page discard unable to remove delalloc mapping.");
827
			}
828 829 830
			break;
		}
next_buffer:
F
Fabian Frederick 已提交
831
		offset += i_blocksize(inode);
832 833 834 835 836

	} while ((bh = bh->b_this_page) != head);

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_invalidate:
837
	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
838 839 840
	return;
}

841 842 843 844 845 846 847 848 849
static int
xfs_map_cow(
	struct xfs_writepage_ctx *wpc,
	struct inode		*inode,
	loff_t			offset,
	unsigned int		*new_type)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_bmbt_irec	imap;
850
	bool			is_cow = false;
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
	int			error;

	/*
	 * If we already have a valid COW mapping keep using it.
	 */
	if (wpc->io_type == XFS_IO_COW) {
		wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
		if (wpc->imap_valid) {
			*new_type = XFS_IO_COW;
			return 0;
		}
	}

	/*
	 * Else we need to check if there is a COW mapping at this offset.
	 */
	xfs_ilock(ip, XFS_ILOCK_SHARED);
868
	is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
869 870 871 872 873 874 875 876 877
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!is_cow)
		return 0;

	/*
	 * And if the COW mapping has a delayed extent here we need to
	 * allocate real space for it now.
	 */
878
	if (isnullstartblock(imap.br_startblock)) {
879 880 881 882 883 884 885 886 887 888 889 890
		error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
				&imap);
		if (error)
			return error;
	}

	wpc->io_type = *new_type = XFS_IO_COW;
	wpc->imap_valid = true;
	wpc->imap = imap;
	return 0;
}

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
/*
 * We implement an immediate ioend submission policy here to avoid needing to
 * chain multiple ioends and hence nest mempool allocations which can violate
 * forward progress guarantees we need to provide. The current ioend we are
 * adding buffers to is cached on the writepage context, and if the new buffer
 * does not append to the cached ioend it will create a new ioend and cache that
 * instead.
 *
 * If a new ioend is created and cached, the old ioend is returned and queued
 * locally for submission once the entire page is processed or an error has been
 * detected.  While ioends are submitted immediately after they are completed,
 * batching optimisations are provided by higher level block plugging.
 *
 * At the end of a writeback pass, there will be a cached ioend remaining on the
 * writepage context that the caller will need to submit.
 */
907 908 909
static int
xfs_writepage_map(
	struct xfs_writepage_ctx *wpc,
910
	struct writeback_control *wbc,
911 912
	struct inode		*inode,
	struct page		*page,
913
	uint64_t		end_offset)
914
{
915 916
	LIST_HEAD(submit_list);
	struct xfs_ioend	*ioend, *next;
917
	struct buffer_head	*bh, *head;
F
Fabian Frederick 已提交
918
	ssize_t			len = i_blocksize(inode);
919
	uint64_t		offset;
920 921
	int			error = 0;
	int			count = 0;
922
	int			uptodate = 1;
923
	unsigned int		new_type;
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943

	bh = head = page_buffers(page);
	offset = page_offset(page);
	do {
		if (offset >= end_offset)
			break;
		if (!buffer_uptodate(bh))
			uptodate = 0;

		/*
		 * set_page_dirty dirties all buffers in a page, independent
		 * of their state.  The dirty state however is entirely
		 * meaningless for holes (!mapped && uptodate), so skip
		 * buffers covering holes here.
		 */
		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
			wpc->imap_valid = false;
			continue;
		}

944 945 946 947 948 949 950
		if (buffer_unwritten(bh))
			new_type = XFS_IO_UNWRITTEN;
		else if (buffer_delay(bh))
			new_type = XFS_IO_DELALLOC;
		else if (buffer_uptodate(bh))
			new_type = XFS_IO_OVERWRITE;
		else {
951 952 953 954 955 956 957 958 959 960 961 962
			if (PageUptodate(page))
				ASSERT(buffer_mapped(bh));
			/*
			 * This buffer is not uptodate and will not be
			 * written to disk.  Ensure that we will put any
			 * subsequent writeable buffers into a new
			 * ioend.
			 */
			wpc->imap_valid = false;
			continue;
		}

963 964 965 966 967 968 969 970 971 972 973
		if (xfs_is_reflink_inode(XFS_I(inode))) {
			error = xfs_map_cow(wpc, inode, offset, &new_type);
			if (error)
				goto out;
		}

		if (wpc->io_type != new_type) {
			wpc->io_type = new_type;
			wpc->imap_valid = false;
		}

974 975 976 977 978 979 980
		if (wpc->imap_valid)
			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
							 offset);
		if (!wpc->imap_valid) {
			error = xfs_map_blocks(inode, offset, &wpc->imap,
					     wpc->io_type);
			if (error)
981
				goto out;
982 983 984 985 986 987 988
			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
							 offset);
		}
		if (wpc->imap_valid) {
			lock_buffer(bh);
			if (wpc->io_type != XFS_IO_OVERWRITE)
				xfs_map_at_offset(inode, bh, &wpc->imap, offset);
989
			xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
990 991 992 993 994 995 996 997
			count++;
		}

	} while (offset += len, ((bh = bh->b_this_page) != head));

	if (uptodate && bh == head)
		SetPageUptodate(page);

998
	ASSERT(wpc->ioend || list_empty(&submit_list));
999

1000
out:
1001
	/*
1002 1003 1004 1005 1006 1007 1008 1009 1010
	 * On error, we have to fail the ioend here because we have locked
	 * buffers in the ioend. If we don't do this, we'll deadlock
	 * invalidating the page as that tries to lock the buffers on the page.
	 * Also, because we may have set pages under writeback, we have to make
	 * sure we run IO completion to mark the error state of the IO
	 * appropriately, so we can't cancel the ioend directly here. That means
	 * we have to mark this page as under writeback if we included any
	 * buffers from it in the ioend chain so that completion treats it
	 * correctly.
1011
	 *
1012 1013 1014 1015 1016
	 * If we didn't include the page in the ioend, the on error we can
	 * simply discard and unlock it as there are no other users of the page
	 * or it's buffers right now. The caller will still need to trigger
	 * submission of outstanding ioends on the writepage context so they are
	 * treated correctly on error.
1017
	 */
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	if (count) {
		xfs_start_page_writeback(page, !error);

		/*
		 * Preserve the original error if there was one, otherwise catch
		 * submission errors here and propagate into subsequent ioend
		 * submissions.
		 */
		list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
			int error2;

			list_del_init(&ioend->io_list);
			error2 = xfs_submit_ioend(wbc, ioend, error);
			if (error2 && !error)
				error = error2;
		}
	} else if (error) {
1035 1036 1037
		xfs_aops_discard_page(page);
		ClearPageUptodate(page);
		unlock_page(page);
1038 1039 1040 1041 1042 1043 1044 1045
	} else {
		/*
		 * We can end up here with no error and nothing to write if we
		 * race with a partial page truncate on a sub-page block sized
		 * filesystem. In that case we need to mark the page clean.
		 */
		xfs_start_page_writeback(page, 1);
		end_page_writeback(page);
1046
	}
1047

1048 1049 1050 1051
	mapping_set_error(page->mapping, error);
	return error;
}

L
Linus Torvalds 已提交
1052
/*
1053 1054 1055 1056 1057 1058
 * Write out a dirty page.
 *
 * For delalloc space on the page we need to allocate space and flush it.
 * For unwritten space on the page we need to start the conversion to
 * regular allocated space.
 * For any other dirty buffer heads on the page we should flush them.
L
Linus Torvalds 已提交
1059 1060
 */
STATIC int
1061
xfs_do_writepage(
1062
	struct page		*page,
1063 1064
	struct writeback_control *wbc,
	void			*data)
L
Linus Torvalds 已提交
1065
{
1066
	struct xfs_writepage_ctx *wpc = data;
1067
	struct inode		*inode = page->mapping->host;
L
Linus Torvalds 已提交
1068
	loff_t			offset;
1069
	uint64_t              end_offset;
1070
	pgoff_t                 end_index;
1071

1072
	trace_xfs_writepage(inode, page, 0, 0);
1073

1074 1075
	ASSERT(page_has_buffers(page));

1076 1077 1078
	/*
	 * Refuse to write the page out if we are called from reclaim context.
	 *
1079 1080 1081
	 * This avoids stack overflows when called from deeply used stacks in
	 * random callers for direct reclaim or memcg reclaim.  We explicitly
	 * allow reclaim from kswapd as the stack usage there is relatively low.
1082
	 *
1083 1084
	 * This should never happen except in the case of a VM regression so
	 * warn about it.
1085
	 */
1086 1087
	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
			PF_MEMALLOC))
1088
		goto redirty;
L
Linus Torvalds 已提交
1089

1090
	/*
1091 1092
	 * Given that we do not allow direct reclaim to call us, we should
	 * never be called while in a filesystem transaction.
1093
	 */
1094
	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1095
		goto redirty;
1096

1097
	/*
1098 1099
	 * Is this page beyond the end of the file?
	 *
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
	 * The page index is less than the end_index, adjust the end_offset
	 * to the highest offset that this page should represent.
	 * -----------------------------------------------------
	 * |			file mapping	       | <EOF> |
	 * -----------------------------------------------------
	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
	 * ^--------------------------------^----------|--------
	 * |     desired writeback range    |      see else    |
	 * ---------------------------------^------------------|
	 */
1110
	offset = i_size_read(inode);
1111
	end_index = offset >> PAGE_SHIFT;
1112
	if (page->index < end_index)
1113
		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
	else {
		/*
		 * Check whether the page to write out is beyond or straddles
		 * i_size or not.
		 * -------------------------------------------------------
		 * |		file mapping		        | <EOF>  |
		 * -------------------------------------------------------
		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
		 * ^--------------------------------^-----------|---------
		 * |				    |      Straddles     |
		 * ---------------------------------^-----------|--------|
		 */
1126
		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1127 1128

		/*
1129 1130 1131 1132
		 * Skip the page if it is fully outside i_size, e.g. due to a
		 * truncate operation that is in progress. We must redirty the
		 * page so that reclaim stops reclaiming it. Otherwise
		 * xfs_vm_releasepage() is called on it and gets confused.
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
		 *
		 * Note that the end_index is unsigned long, it would overflow
		 * if the given offset is greater than 16TB on 32-bit system
		 * and if we do check the page is fully outside i_size or not
		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
		 * will be evaluated to 0.  Hence this page will be redirtied
		 * and be written out repeatedly which would result in an
		 * infinite loop, the user program that perform this operation
		 * will hang.  Instead, we can verify this situation by checking
		 * if the page to write is totally beyond the i_size or if it's
		 * offset is just equal to the EOF.
1144
		 */
1145 1146
		if (page->index > end_index ||
		    (page->index == end_index && offset_into_page == 0))
1147
			goto redirty;
1148 1149 1150 1151 1152

		/*
		 * The page straddles i_size.  It must be zeroed out on each
		 * and every writepage invocation because it may be mmapped.
		 * "A file is mapped in multiples of the page size.  For a file
1153
		 * that is not a multiple of the page size, the remaining
1154 1155 1156
		 * memory is zeroed when mapped, and writes to that region are
		 * not written out to the file."
		 */
1157
		zero_user_segment(page, offset_into_page, PAGE_SIZE);
1158 1159 1160

		/* Adjust the end_offset to the end of file */
		end_offset = offset;
L
Linus Torvalds 已提交
1161 1162
	}

1163
	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
1164

1165
redirty:
1166 1167 1168 1169 1170
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);
	return 0;
}

1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
STATIC int
xfs_vm_writepage(
	struct page		*page,
	struct writeback_control *wbc)
{
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

	ret = xfs_do_writepage(page, wbc, &wpc);
1182 1183 1184
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1185 1186
}

1187 1188 1189 1190 1191
STATIC int
xfs_vm_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
1192 1193 1194 1195 1196
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

1197
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1198
	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1199 1200 1201
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1202 1203
}

D
Dan Williams 已提交
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
STATIC int
xfs_dax_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
	return dax_writeback_mapping_range(mapping,
			xfs_find_bdev_for_inode(mapping->host), wbc);
}

1214 1215
/*
 * Called to move a page into cleanable state - and from there
1216
 * to be released. The page should already be clean. We always
1217 1218
 * have buffer heads in this call.
 *
1219
 * Returns 1 if the page is ok to release, 0 otherwise.
1220 1221
 */
STATIC int
1222
xfs_vm_releasepage(
1223 1224 1225
	struct page		*page,
	gfp_t			gfp_mask)
{
1226
	int			delalloc, unwritten;
1227

1228
	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1229

1230 1231 1232 1233
	/*
	 * mm accommodates an old ext3 case where clean pages might not have had
	 * the dirty bit cleared. Thus, it can send actual dirty pages to
	 * ->releasepage() via shrink_active_list(). Conversely,
1234 1235
	 * block_invalidatepage() can send pages that are still marked dirty but
	 * otherwise have invalidated buffers.
1236
	 *
1237
	 * We want to release the latter to avoid unnecessary buildup of the
1238 1239 1240 1241 1242 1243 1244
	 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
	 * that are entirely invalidated and need to be released.  Hence the
	 * only time we should get dirty pages here is through
	 * shrink_active_list() and so we can simply skip those now.
	 *
	 * warn if we've left any lingering delalloc/unwritten buffers on clean
	 * or invalidated pages we are about to release.
1245
	 */
1246 1247 1248
	if (PageDirty(page))
		return 0;

1249
	xfs_count_page_state(page, &delalloc, &unwritten);
1250

1251
	if (WARN_ON_ONCE(delalloc))
1252
		return 0;
1253
	if (WARN_ON_ONCE(unwritten))
1254 1255 1256 1257 1258
		return 0;

	return try_to_free_buffers(page);
}

1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
/*
 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
 * is, so that we can avoid repeated get_blocks calls.
 *
 * If the mapping spans EOF, then we have to break the mapping up as the mapping
 * for blocks beyond EOF must be marked new so that sub block regions can be
 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
 * was just allocated or is unwritten, otherwise the callers would overwrite
 * existing data with zeros. Hence we have to split the mapping into a range up
 * to and including EOF, and a second mapping for beyond EOF.
 */
static void
xfs_map_trim_size(
	struct inode		*inode,
	sector_t		iblock,
	struct buffer_head	*bh_result,
	struct xfs_bmbt_irec	*imap,
	xfs_off_t		offset,
	ssize_t			size)
{
	xfs_off_t		mapping_size;

	mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
	mapping_size <<= inode->i_blkbits;

	ASSERT(mapping_size > 0);
	if (mapping_size > size)
		mapping_size = size;
	if (offset < i_size_read(inode) &&
D
Darrick J. Wong 已提交
1288
	    (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
1289 1290
		/* limit mapping to block that spans EOF */
		mapping_size = roundup_64(i_size_read(inode) - offset,
F
Fabian Frederick 已提交
1291
					  i_blocksize(inode));
1292 1293 1294 1295 1296 1297 1298
	}
	if (mapping_size > LONG_MAX)
		mapping_size = LONG_MAX;

	bh_result->b_size = mapping_size;
}

1299
static int
C
Christoph Hellwig 已提交
1300
xfs_get_blocks(
L
Linus Torvalds 已提交
1301 1302 1303
	struct inode		*inode,
	sector_t		iblock,
	struct buffer_head	*bh_result,
C
Christoph Hellwig 已提交
1304
	int			create)
L
Linus Torvalds 已提交
1305
{
C
Christoph Hellwig 已提交
1306 1307 1308 1309 1310
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			error = 0;
	int			lockmode = 0;
C
Christoph Hellwig 已提交
1311
	struct xfs_bmbt_irec	imap;
C
Christoph Hellwig 已提交
1312
	int			nimaps = 1;
1313 1314
	xfs_off_t		offset;
	ssize_t			size;
C
Christoph Hellwig 已提交
1315

C
Christoph Hellwig 已提交
1316
	BUG_ON(create);
1317

C
Christoph Hellwig 已提交
1318
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
1319
		return -EIO;
L
Linus Torvalds 已提交
1320

1321
	offset = (xfs_off_t)iblock << inode->i_blkbits;
F
Fabian Frederick 已提交
1322
	ASSERT(bh_result->b_size >= i_blocksize(inode));
1323
	size = bh_result->b_size;
1324

C
Christoph Hellwig 已提交
1325
	if (offset >= i_size_read(inode))
1326 1327
		return 0;

1328 1329
	/*
	 * Direct I/O is usually done on preallocated files, so try getting
1330
	 * a block mapping without an exclusive lock first.
1331
	 */
1332
	lockmode = xfs_ilock_data_map_shared(ip);
1333

D
Dave Chinner 已提交
1334
	ASSERT(offset <= mp->m_super->s_maxbytes);
1335
	if (offset > mp->m_super->s_maxbytes - size)
D
Dave Chinner 已提交
1336
		size = mp->m_super->s_maxbytes - offset;
C
Christoph Hellwig 已提交
1337 1338 1339
	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
	offset_fsb = XFS_B_TO_FSBT(mp, offset);

1340 1341
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
			&nimaps, 0);
L
Linus Torvalds 已提交
1342
	if (error)
C
Christoph Hellwig 已提交
1343
		goto out_unlock;
1344
	if (!nimaps) {
C
Christoph Hellwig 已提交
1345 1346 1347
		trace_xfs_get_blocks_notfound(ip, offset, size);
		goto out_unlock;
	}
L
Linus Torvalds 已提交
1348

1349 1350 1351 1352 1353
	trace_xfs_get_blocks_found(ip, offset, size,
		imap.br_state == XFS_EXT_UNWRITTEN ?
			XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
	xfs_iunlock(ip, lockmode);

1354
	/* trim mapping down to size requested */
1355
	xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1356

1357 1358 1359 1360
	/*
	 * For unwritten extents do not report a disk address in the buffered
	 * read case (treat as if we're reading into a hole).
	 */
1361
	if (xfs_bmap_is_real_extent(&imap))
1362
		xfs_map_buffer(inode, bh_result, &imap, offset);
L
Linus Torvalds 已提交
1363

1364 1365 1366 1367
	/*
	 * If this is a realtime file, data may be on a different device.
	 * to that pointed to from the buffer_head b_bdev currently.
	 */
C
Christoph Hellwig 已提交
1368
	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
L
Linus Torvalds 已提交
1369
	return 0;
C
Christoph Hellwig 已提交
1370 1371 1372

out_unlock:
	xfs_iunlock(ip, lockmode);
D
Dave Chinner 已提交
1373
	return error;
L
Linus Torvalds 已提交
1374 1375 1376
}

STATIC sector_t
1377
xfs_vm_bmap(
L
Linus Torvalds 已提交
1378 1379 1380 1381
	struct address_space	*mapping,
	sector_t		block)
{
	struct inode		*inode = (struct inode *)mapping->host;
1382
	struct xfs_inode	*ip = XFS_I(inode);
L
Linus Torvalds 已提交
1383

C
Christoph Hellwig 已提交
1384
	trace_xfs_vm_bmap(XFS_I(inode));
1385 1386 1387

	/*
	 * The swap code (ab-)uses ->bmap to get a block mapping and then
1388
	 * bypasses the file system for actual I/O.  We really can't allow
1389
	 * that on reflinks inodes, so we have to skip out here.  And yes,
1390 1391 1392 1393
	 * 0 is the magic code for a bmap error.
	 *
	 * Since we don't pass back blockdev info, we can't return bmap
	 * information for rt files either.
1394
	 */
1395
	if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1396
		return 0;
1397

D
Dave Chinner 已提交
1398
	filemap_write_and_wait(mapping);
1399
	return generic_block_bmap(mapping, block, xfs_get_blocks);
L
Linus Torvalds 已提交
1400 1401 1402
}

STATIC int
1403
xfs_vm_readpage(
L
Linus Torvalds 已提交
1404 1405 1406
	struct file		*unused,
	struct page		*page)
{
1407
	trace_xfs_vm_readpage(page->mapping->host, 1);
1408
	return mpage_readpage(page, xfs_get_blocks);
L
Linus Torvalds 已提交
1409 1410 1411
}

STATIC int
1412
xfs_vm_readpages(
L
Linus Torvalds 已提交
1413 1414 1415 1416 1417
	struct file		*unused,
	struct address_space	*mapping,
	struct list_head	*pages,
	unsigned		nr_pages)
{
1418
	trace_xfs_vm_readpages(mapping->host, nr_pages);
1419
	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
L
Linus Torvalds 已提交
1420 1421
}

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
/*
 * This is basically a copy of __set_page_dirty_buffers() with one
 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
 * dirty, we'll never be able to clean them because we don't write buffers
 * beyond EOF, and that means we can't invalidate pages that span EOF
 * that have been marked dirty. Further, the dirty state can leak into
 * the file interior if the file is extended, resulting in all sorts of
 * bad things happening as the state does not match the underlying data.
 *
 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
 * this only exist because of bufferheads and how the generic code manages them.
 */
STATIC int
xfs_vm_set_page_dirty(
	struct page		*page)
{
	struct address_space	*mapping = page->mapping;
	struct inode		*inode = mapping->host;
	loff_t			end_offset;
	loff_t			offset;
	int			newly_dirty;

	if (unlikely(!mapping))
		return !TestSetPageDirty(page);

	end_offset = i_size_read(inode);
	offset = page_offset(page);

	spin_lock(&mapping->private_lock);
	if (page_has_buffers(page)) {
		struct buffer_head *head = page_buffers(page);
		struct buffer_head *bh = head;

		do {
			if (offset < end_offset)
				set_buffer_dirty(bh);
			bh = bh->b_this_page;
F
Fabian Frederick 已提交
1459
			offset += i_blocksize(inode);
1460 1461
		} while (bh != head);
	}
1462
	/*
1463 1464
	 * Lock out page->mem_cgroup migration to keep PageDirty
	 * synchronized with per-memcg dirty page counters.
1465
	 */
J
Johannes Weiner 已提交
1466
	lock_page_memcg(page);
1467 1468 1469
	newly_dirty = !TestSetPageDirty(page);
	spin_unlock(&mapping->private_lock);

M
Matthew Wilcox 已提交
1470 1471
	if (newly_dirty)
		__set_page_dirty(page, mapping, 1);
J
Johannes Weiner 已提交
1472
	unlock_page_memcg(page);
1473 1474
	if (newly_dirty)
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1475 1476 1477
	return newly_dirty;
}

1478
const struct address_space_operations xfs_address_space_operations = {
1479 1480 1481
	.readpage		= xfs_vm_readpage,
	.readpages		= xfs_vm_readpages,
	.writepage		= xfs_vm_writepage,
1482
	.writepages		= xfs_vm_writepages,
1483
	.set_page_dirty		= xfs_vm_set_page_dirty,
1484 1485
	.releasepage		= xfs_vm_releasepage,
	.invalidatepage		= xfs_vm_invalidatepage,
1486
	.bmap			= xfs_vm_bmap,
D
Dan Williams 已提交
1487
	.direct_IO		= noop_direct_IO,
1488
	.migratepage		= buffer_migrate_page,
1489
	.is_partially_uptodate  = block_is_partially_uptodate,
1490
	.error_remove_page	= generic_error_remove_page,
L
Linus Torvalds 已提交
1491
};
D
Dan Williams 已提交
1492 1493 1494 1495 1496 1497 1498

const struct address_space_operations xfs_dax_aops = {
	.writepages		= xfs_dax_writepages,
	.direct_IO		= noop_direct_IO,
	.set_page_dirty		= noop_set_page_dirty,
	.invalidatepage		= noop_invalidatepage,
};