xfs_aops.c 40.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17 18
 */
#include "xfs.h"
19
#include "xfs_shared.h"
20 21 22
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
23 24
#include "xfs_mount.h"
#include "xfs_inode.h"
25
#include "xfs_trans.h"
26
#include "xfs_inode_item.h"
27
#include "xfs_alloc.h"
L
Linus Torvalds 已提交
28 29
#include "xfs_error.h"
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
30
#include "xfs_trace.h"
31
#include "xfs_bmap.h"
D
Dave Chinner 已提交
32
#include "xfs_bmap_util.h"
33
#include "xfs_bmap_btree.h"
34
#include "xfs_reflink.h"
35
#include <linux/gfp.h>
L
Linus Torvalds 已提交
36
#include <linux/mpage.h>
37
#include <linux/pagevec.h>
L
Linus Torvalds 已提交
38 39
#include <linux/writeback.h>

40 41 42 43 44 45 46 47 48 49 50
/*
 * structure owned by writepages passed to individual writepage calls
 */
struct xfs_writepage_ctx {
	struct xfs_bmbt_irec    imap;
	bool			imap_valid;
	unsigned int		io_type;
	struct xfs_ioend	*ioend;
	sector_t		last_block;
};

C
Christoph Hellwig 已提交
51
void
52 53 54 55 56 57 58
xfs_count_page_state(
	struct page		*page,
	int			*delalloc,
	int			*unwritten)
{
	struct buffer_head	*bh, *head;

59
	*delalloc = *unwritten = 0;
60 61 62

	bh = head = page_buffers(page);
	do {
63
		if (buffer_unwritten(bh))
64 65 66 67 68 69
			(*unwritten) = 1;
		else if (buffer_delay(bh))
			(*delalloc) = 1;
	} while ((bh = bh->b_this_page) != head);
}

70
struct block_device *
C
Christoph Hellwig 已提交
71
xfs_find_bdev_for_inode(
C
Christoph Hellwig 已提交
72
	struct inode		*inode)
C
Christoph Hellwig 已提交
73
{
C
Christoph Hellwig 已提交
74
	struct xfs_inode	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
75 76
	struct xfs_mount	*mp = ip->i_mount;

77
	if (XFS_IS_REALTIME_INODE(ip))
C
Christoph Hellwig 已提交
78 79 80 81 82
		return mp->m_rtdev_targp->bt_bdev;
	else
		return mp->m_ddev_targp->bt_bdev;
}

83 84 85 86 87 88 89 90 91 92 93 94 95
struct dax_device *
xfs_find_daxdev_for_inode(
	struct inode		*inode)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;

	if (XFS_IS_REALTIME_INODE(ip))
		return mp->m_rtdev_targp->bt_daxdev;
	else
		return mp->m_ddev_targp->bt_daxdev;
}

96
/*
97 98 99
 * We're now finished for good with this page.  Update the page state via the
 * associated buffer_heads, paying attention to the start and end offsets that
 * we need to process on the page.
100
 *
101 102 103 104 105
 * Note that we open code the action in end_buffer_async_write here so that we
 * only have to iterate over the buffers attached to the page once.  This is not
 * only more efficient, but also ensures that we only calls end_page_writeback
 * at the end of the iteration, and thus avoids the pitfall of having the page
 * and buffers potentially freed after every call to end_buffer_async_write.
106 107 108 109 110 111 112
 */
static void
xfs_finish_page_writeback(
	struct inode		*inode,
	struct bio_vec		*bvec,
	int			error)
{
113 114
	struct buffer_head	*head = page_buffers(bvec->bv_page), *bh = head;
	bool			busy = false;
115
	unsigned int		off = 0;
116
	unsigned long		flags;
117 118

	ASSERT(bvec->bv_offset < PAGE_SIZE);
F
Fabian Frederick 已提交
119
	ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
120
	ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
F
Fabian Frederick 已提交
121
	ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
122

123 124
	local_irq_save(flags);
	bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
125
	do {
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
		if (off >= bvec->bv_offset &&
		    off < bvec->bv_offset + bvec->bv_len) {
			ASSERT(buffer_async_write(bh));
			ASSERT(bh->b_end_io == NULL);

			if (error) {
				mark_buffer_write_io_error(bh);
				clear_buffer_uptodate(bh);
				SetPageError(bvec->bv_page);
			} else {
				set_buffer_uptodate(bh);
			}
			clear_buffer_async_write(bh);
			unlock_buffer(bh);
		} else if (buffer_async_write(bh)) {
			ASSERT(buffer_locked(bh));
			busy = true;
		}
		off += bh->b_size;
	} while ((bh = bh->b_this_page) != head);
	bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
	local_irq_restore(flags);

	if (!busy)
		end_page_writeback(bvec->bv_page);
151 152 153 154 155 156
}

/*
 * We're now finished for good with this ioend structure.  Update the page
 * state, release holds on bios, and finally free up memory.  Do not use the
 * ioend after this.
157
 */
158 159
STATIC void
xfs_destroy_ioend(
160 161
	struct xfs_ioend	*ioend,
	int			error)
162
{
163
	struct inode		*inode = ioend->io_inode;
164 165 166 167
	struct bio		*bio = &ioend->io_inline_bio;
	struct bio		*last = ioend->io_bio, *next;
	u64			start = bio->bi_iter.bi_sector;
	bool			quiet = bio_flagged(bio, BIO_QUIET);
168

169
	for (bio = &ioend->io_inline_bio; bio; bio = next) {
170 171 172
		struct bio_vec	*bvec;
		int		i;

173 174 175 176 177 178 179 180
		/*
		 * For the last bio, bi_private points to the ioend, so we
		 * need to explicitly end the iteration here.
		 */
		if (bio == last)
			next = NULL;
		else
			next = bio->bi_private;
C
Christoph Hellwig 已提交
181

182 183 184 185 186
		/* walk each page on bio, ending page IO on them */
		bio_for_each_segment_all(bvec, bio, i)
			xfs_finish_page_writeback(inode, bvec, error);

		bio_put(bio);
187
	}
188 189 190 191 192

	if (unlikely(error && !quiet)) {
		xfs_err_ratelimited(XFS_I(inode)->i_mount,
			"writeback error on sector %llu", start);
	}
193 194
}

C
Christoph Hellwig 已提交
195 196 197 198 199 200 201 202 203
/*
 * Fast and loose check if this write could update the on-disk inode size.
 */
static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
{
	return ioend->io_offset + ioend->io_size >
		XFS_I(ioend->io_inode)->i_d.di_size;
}

204 205 206 207 208 209 210 211
STATIC int
xfs_setfilesize_trans_alloc(
	struct xfs_ioend	*ioend)
{
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
	struct xfs_trans	*tp;
	int			error;

212 213
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
	if (error)
214 215 216 217
		return error;

	ioend->io_append_trans = tp;

J
Jan Kara 已提交
218
	/*
219
	 * We may pass freeze protection with a transaction.  So tell lockdep
J
Jan Kara 已提交
220 221
	 * we released it.
	 */
222
	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
223 224 225 226
	/*
	 * We hand off the transaction to the completion thread now, so
	 * clear the flag here.
	 */
227
	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
228 229 230
	return 0;
}

231
/*
232
 * Update on-disk file size now that data has been written to disk.
233
 */
234
STATIC int
235
__xfs_setfilesize(
236 237 238 239
	struct xfs_inode	*ip,
	struct xfs_trans	*tp,
	xfs_off_t		offset,
	size_t			size)
240 241 242
{
	xfs_fsize_t		isize;

243
	xfs_ilock(ip, XFS_ILOCK_EXCL);
244
	isize = xfs_new_eof(ip, offset + size);
245 246
	if (!isize) {
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
247
		xfs_trans_cancel(tp);
248
		return 0;
249 250
	}

251
	trace_xfs_setfilesize(ip, offset, size);
252 253 254 255 256

	ip->i_d.di_size = isize;
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

257
	return xfs_trans_commit(tp);
258 259
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
int
xfs_setfilesize(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	size_t			size)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error;

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
	if (error)
		return error;

	return __xfs_setfilesize(ip, tp, offset, size);
}

277 278
STATIC int
xfs_setfilesize_ioend(
279 280
	struct xfs_ioend	*ioend,
	int			error)
281 282 283 284 285 286 287 288 289
{
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	struct xfs_trans	*tp = ioend->io_append_trans;

	/*
	 * The transaction may have been allocated in the I/O submission thread,
	 * thus we need to mark ourselves as being in a transaction manually.
	 * Similarly for freeze protection.
	 */
290
	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
291
	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
292

293
	/* we abort the update if there was an IO error */
294
	if (error) {
295
		xfs_trans_cancel(tp);
296
		return error;
297 298
	}

299
	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
300 301
}

302
/*
303
 * IO write completion.
304 305
 */
STATIC void
306
xfs_end_io(
307
	struct work_struct *work)
308
{
309 310 311
	struct xfs_ioend	*ioend =
		container_of(work, struct xfs_ioend, io_work);
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
312 313
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;
314
	int			error;
315

316
	/*
317
	 * Just clean up the in-memory strutures if the fs has been shut down.
318
	 */
319
	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
320
		error = -EIO;
321 322
		goto done;
	}
323

324
	/*
325
	 * Clean up any COW blocks on an I/O error.
326
	 */
327
	error = blk_status_to_errno(ioend->io_bio->bi_status);
328 329 330 331 332
	if (unlikely(error)) {
		switch (ioend->io_type) {
		case XFS_IO_COW:
			xfs_reflink_cancel_cow_range(ip, offset, size, true);
			break;
333
		}
334 335

		goto done;
336 337
	}

338
	/*
339
	 * Success:  commit the COW or unwritten blocks if needed.
340
	 */
341 342 343 344 345
	switch (ioend->io_type) {
	case XFS_IO_COW:
		error = xfs_reflink_end_cow(ip, offset, size);
		break;
	case XFS_IO_UNWRITTEN:
346 347
		/* writeback should never update isize */
		error = xfs_iomap_write_unwritten(ip, offset, size, false);
348 349 350 351
		break;
	default:
		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
		break;
352
	}
353

354
done:
355 356
	if (ioend->io_append_trans)
		error = xfs_setfilesize_ioend(ioend, error);
357
	xfs_destroy_ioend(ioend, error);
358 359
}

360 361 362
STATIC void
xfs_end_bio(
	struct bio		*bio)
363
{
364 365
	struct xfs_ioend	*ioend = bio->bi_private;
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
366

367
	if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
368 369 370 371
		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
	else if (ioend->io_append_trans)
		queue_work(mp->m_data_workqueue, &ioend->io_work);
	else
372
		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
373 374
}

L
Linus Torvalds 已提交
375 376 377 378
STATIC int
xfs_map_blocks(
	struct inode		*inode,
	loff_t			offset,
C
Christoph Hellwig 已提交
379
	struct xfs_bmbt_irec	*imap,
380
	int			type)
L
Linus Torvalds 已提交
381
{
C
Christoph Hellwig 已提交
382 383
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
F
Fabian Frederick 已提交
384
	ssize_t			count = i_blocksize(inode);
C
Christoph Hellwig 已提交
385 386 387 388 389 390
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			error = 0;
	int			bmapi_flags = XFS_BMAPI_ENTIRE;
	int			nimaps = 1;

	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
391
		return -EIO;
C
Christoph Hellwig 已提交
392

393
	ASSERT(type != XFS_IO_COW);
394
	if (type == XFS_IO_UNWRITTEN)
C
Christoph Hellwig 已提交
395
		bmapi_flags |= XFS_BMAPI_IGSTATE;
C
Christoph Hellwig 已提交
396

397
	xfs_ilock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
398 399
	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
	       (ip->i_df.if_flags & XFS_IFEXTENTS));
D
Dave Chinner 已提交
400
	ASSERT(offset <= mp->m_super->s_maxbytes);
C
Christoph Hellwig 已提交
401

D
Dave Chinner 已提交
402 403
	if (offset + count > mp->m_super->s_maxbytes)
		count = mp->m_super->s_maxbytes - offset;
C
Christoph Hellwig 已提交
404 405
	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
D
Dave Chinner 已提交
406 407
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
				imap, &nimaps, bmapi_flags);
408 409 410 411 412 413 414
	/*
	 * Truncate an overwrite extent if there's a pending CoW
	 * reservation before the end of this extent.  This forces us
	 * to come back to writepage to take care of the CoW.
	 */
	if (nimaps && type == XFS_IO_OVERWRITE)
		xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
C
Christoph Hellwig 已提交
415
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
416

C
Christoph Hellwig 已提交
417
	if (error)
D
Dave Chinner 已提交
418
		return error;
C
Christoph Hellwig 已提交
419

420
	if (type == XFS_IO_DELALLOC &&
C
Christoph Hellwig 已提交
421
	    (!nimaps || isnullstartblock(imap->br_startblock))) {
422 423
		error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
				imap);
C
Christoph Hellwig 已提交
424
		if (!error)
425
			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
D
Dave Chinner 已提交
426
		return error;
C
Christoph Hellwig 已提交
427 428
	}

C
Christoph Hellwig 已提交
429
#ifdef DEBUG
430
	if (type == XFS_IO_UNWRITTEN) {
C
Christoph Hellwig 已提交
431 432 433 434 435 436 437 438
		ASSERT(nimaps);
		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
	}
#endif
	if (nimaps)
		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
	return 0;
L
Linus Torvalds 已提交
439 440
}

441
STATIC bool
442
xfs_imap_valid(
443
	struct inode		*inode,
C
Christoph Hellwig 已提交
444
	struct xfs_bmbt_irec	*imap,
445
	xfs_off_t		offset)
L
Linus Torvalds 已提交
446
{
447
	offset >>= inode->i_blkbits;
448

449 450
	return offset >= imap->br_startoff &&
		offset < imap->br_startoff + imap->br_blockcount;
L
Linus Torvalds 已提交
451 452
}

453 454 455 456 457 458 459 460 461
STATIC void
xfs_start_buffer_writeback(
	struct buffer_head	*bh)
{
	ASSERT(buffer_mapped(bh));
	ASSERT(buffer_locked(bh));
	ASSERT(!buffer_delay(bh));
	ASSERT(!buffer_unwritten(bh));

462 463
	bh->b_end_io = NULL;
	set_buffer_async_write(bh);
464 465 466 467 468 469 470
	set_buffer_uptodate(bh);
	clear_buffer_dirty(bh);
}

STATIC void
xfs_start_page_writeback(
	struct page		*page,
471
	int			clear_dirty)
472 473 474
{
	ASSERT(PageLocked(page));
	ASSERT(!PageWriteback(page));
475 476 477 478 479 480 481 482 483

	/*
	 * if the page was not fully cleaned, we need to ensure that the higher
	 * layers come back to it correctly. That means we need to keep the page
	 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
	 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
	 * write this page in this writeback sweep will be made.
	 */
	if (clear_dirty) {
484
		clear_page_dirty_for_io(page);
485 486 487 488
		set_page_writeback(page);
	} else
		set_page_writeback_keepwrite(page);

489 490 491
	unlock_page(page);
}

492
static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
493 494 495 496 497
{
	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
}

/*
498 499 500 501 502 503
 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
 * it, and we submit that bio. The ioend may be used for multiple bio
 * submissions, so we only want to allocate an append transaction for the ioend
 * once. In the case of multiple bio submission, each bio will take an IO
 * reference to the ioend to ensure that the ioend completion is only done once
 * all bios have been submitted and the ioend is really done.
504 505 506
 *
 * If @fail is non-zero, it means that we have a situation where some part of
 * the submission process has failed after we have marked paged for writeback
507 508 509
 * and unlocked them. In this situation, we need to fail the bio and ioend
 * rather than submit it to IO. This typically only happens on a filesystem
 * shutdown.
510
 */
511
STATIC int
512
xfs_submit_ioend(
513
	struct writeback_control *wbc,
514
	struct xfs_ioend	*ioend,
515
	int			status)
516
{
517 518 519 520 521 522
	/* Convert CoW extents to regular */
	if (!status && ioend->io_type == XFS_IO_COW) {
		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
				ioend->io_offset, ioend->io_size);
	}

523 524
	/* Reserve log space if we might write beyond the on-disk inode size. */
	if (!status &&
525
	    ioend->io_type != XFS_IO_UNWRITTEN &&
526 527
	    xfs_ioend_is_append(ioend) &&
	    !ioend->io_append_trans)
528
		status = xfs_setfilesize_trans_alloc(ioend);
529

530 531
	ioend->io_bio->bi_private = ioend;
	ioend->io_bio->bi_end_io = xfs_end_bio;
J
Jens Axboe 已提交
532
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
533

534 535 536 537 538 539 540
	/*
	 * If we are failing the IO now, just mark the ioend with an
	 * error and finish it. This will run IO completion immediately
	 * as there is only one reference to the ioend at this point in
	 * time.
	 */
	if (status) {
541
		ioend->io_bio->bi_status = errno_to_blk_status(status);
542
		bio_endio(ioend->io_bio);
543 544
		return status;
	}
545

546
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
547
	submit_bio(ioend->io_bio);
548
	return 0;
549 550
}

551 552 553 554 555 556
static void
xfs_init_bio_from_bh(
	struct bio		*bio,
	struct buffer_head	*bh)
{
	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
557
	bio_set_dev(bio, bh->b_bdev);
558
}
559

560 561 562 563 564 565 566 567 568
static struct xfs_ioend *
xfs_alloc_ioend(
	struct inode		*inode,
	unsigned int		type,
	xfs_off_t		offset,
	struct buffer_head	*bh)
{
	struct xfs_ioend	*ioend;
	struct bio		*bio;
569

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
	xfs_init_bio_from_bh(bio, bh);

	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
	INIT_LIST_HEAD(&ioend->io_list);
	ioend->io_type = type;
	ioend->io_inode = inode;
	ioend->io_size = 0;
	ioend->io_offset = offset;
	INIT_WORK(&ioend->io_work, xfs_end_io);
	ioend->io_append_trans = NULL;
	ioend->io_bio = bio;
	return ioend;
}

/*
 * Allocate a new bio, and chain the old bio to the new one.
 *
 * Note that we have to do perform the chaining in this unintuitive order
 * so that the bi_private linkage is set up in the right direction for the
 * traversal in xfs_destroy_ioend().
 */
static void
xfs_chain_bio(
	struct xfs_ioend	*ioend,
	struct writeback_control *wbc,
	struct buffer_head	*bh)
{
	struct bio *new;

	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
	xfs_init_bio_from_bh(new, bh);

	bio_chain(ioend->io_bio, new);
	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
J
Jens Axboe 已提交
605
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
606
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
607
	submit_bio(ioend->io_bio);
608
	ioend->io_bio = new;
609 610 611 612 613 614
}

/*
 * Test to see if we've been building up a completion structure for
 * earlier buffers -- if so, we try to append to this ioend if we
 * can, otherwise we finish off any current ioend and start another.
615 616
 * Return the ioend we finished off so that the caller can submit it
 * once it has finished processing the dirty page.
617 618 619 620 621
 */
STATIC void
xfs_add_to_ioend(
	struct inode		*inode,
	struct buffer_head	*bh,
622
	xfs_off_t		offset,
623
	struct xfs_writepage_ctx *wpc,
624
	struct writeback_control *wbc,
625
	struct list_head	*iolist)
626
{
627
	if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
628 629
	    bh->b_blocknr != wpc->last_block + 1 ||
	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
630 631
		if (wpc->ioend)
			list_add(&wpc->ioend->io_list, iolist);
632
		wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
633 634
	}

635 636 637 638 639 640
	/*
	 * If the buffer doesn't fit into the bio we need to allocate a new
	 * one.  This shouldn't happen more than once for a given buffer.
	 */
	while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
		xfs_chain_bio(wpc->ioend, wbc, bh);
641

642 643
	wpc->ioend->io_size += bh->b_size;
	wpc->last_block = bh->b_blocknr;
644
	xfs_start_buffer_writeback(bh);
645 646
}

647 648
STATIC void
xfs_map_buffer(
C
Christoph Hellwig 已提交
649
	struct inode		*inode,
650
	struct buffer_head	*bh,
C
Christoph Hellwig 已提交
651
	struct xfs_bmbt_irec	*imap,
C
Christoph Hellwig 已提交
652
	xfs_off_t		offset)
653 654
{
	sector_t		bn;
655
	struct xfs_mount	*m = XFS_I(inode)->i_mount;
C
Christoph Hellwig 已提交
656 657
	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
658

C
Christoph Hellwig 已提交
659 660
	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
661

662
	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
663
	      ((offset - iomap_offset) >> inode->i_blkbits);
664

C
Christoph Hellwig 已提交
665
	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
666 667 668 669 670

	bh->b_blocknr = bn;
	set_buffer_mapped(bh);
}

L
Linus Torvalds 已提交
671 672
STATIC void
xfs_map_at_offset(
C
Christoph Hellwig 已提交
673
	struct inode		*inode,
L
Linus Torvalds 已提交
674
	struct buffer_head	*bh,
C
Christoph Hellwig 已提交
675
	struct xfs_bmbt_irec	*imap,
C
Christoph Hellwig 已提交
676
	xfs_off_t		offset)
L
Linus Torvalds 已提交
677
{
C
Christoph Hellwig 已提交
678 679
	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
L
Linus Torvalds 已提交
680

C
Christoph Hellwig 已提交
681
	xfs_map_buffer(inode, bh, imap, offset);
L
Linus Torvalds 已提交
682 683
	set_buffer_mapped(bh);
	clear_buffer_delay(bh);
684
	clear_buffer_unwritten(bh);
L
Linus Torvalds 已提交
685 686 687
}

/*
688 689 690 691
 * Test if a given page contains at least one buffer of a given @type.
 * If @check_all_buffers is true, then we walk all the buffers in the page to
 * try to find one of the type passed in. If it is not set, then the caller only
 * needs to check the first buffer on the page for a match.
L
Linus Torvalds 已提交
692
 */
693
STATIC bool
694
xfs_check_page_type(
695
	struct page		*page,
696 697
	unsigned int		type,
	bool			check_all_buffers)
L
Linus Torvalds 已提交
698
{
699 700
	struct buffer_head	*bh;
	struct buffer_head	*head;
L
Linus Torvalds 已提交
701

702 703 704 705 706 707
	if (PageWriteback(page))
		return false;
	if (!page->mapping)
		return false;
	if (!page_has_buffers(page))
		return false;
L
Linus Torvalds 已提交
708

709 710 711 712 713 714
	bh = head = page_buffers(page);
	do {
		if (buffer_unwritten(bh)) {
			if (type == XFS_IO_UNWRITTEN)
				return true;
		} else if (buffer_delay(bh)) {
715
			if (type == XFS_IO_DELALLOC)
716 717
				return true;
		} else if (buffer_dirty(bh) && buffer_mapped(bh)) {
718
			if (type == XFS_IO_OVERWRITE)
719 720
				return true;
		}
L
Linus Torvalds 已提交
721

722 723 724 725
		/* If we are only checking the first buffer, we are done now. */
		if (!check_all_buffers)
			break;
	} while ((bh = bh->b_this_page) != head);
L
Linus Torvalds 已提交
726

727
	return false;
L
Linus Torvalds 已提交
728 729
}

730 731 732
STATIC void
xfs_vm_invalidatepage(
	struct page		*page,
733 734
	unsigned int		offset,
	unsigned int		length)
735
{
736 737
	trace_xfs_invalidatepage(page->mapping->host, page, offset,
				 length);
738 739 740 741 742 743 744 745

	/*
	 * If we are invalidating the entire page, clear the dirty state from it
	 * so that we can check for attempts to release dirty cached pages in
	 * xfs_vm_releasepage().
	 */
	if (offset == 0 && length >= PAGE_SIZE)
		cancel_dirty_page(page);
746
	block_invalidatepage(page, offset, length);
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
}

/*
 * If the page has delalloc buffers on it, we need to punch them out before we
 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
 * is done on that same region - the delalloc extent is returned when none is
 * supposed to be there.
 *
 * We prevent this by truncating away the delalloc regions on the page before
 * invalidating it. Because they are delalloc, we can do this without needing a
 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
 * truncation without a transaction as there is no space left for block
 * reservation (typically why we see a ENOSPC in writeback).
 *
 * This is not a performance critical path, so for now just do the punching a
 * buffer head at a time.
 */
STATIC void
xfs_aops_discard_page(
	struct page		*page)
{
	struct inode		*inode = page->mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct buffer_head	*bh, *head;
	loff_t			offset = page_offset(page);

774
	if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
775 776
		goto out_invalidate;

777 778 779
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		goto out_invalidate;

780
	xfs_alert(ip->i_mount,
781 782 783 784 785 786 787
		"page discard on page %p, inode 0x%llx, offset %llu.",
			page, ip->i_ino, offset);

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	bh = head = page_buffers(page);
	do {
		int		error;
788
		xfs_fileoff_t	start_fsb;
789 790 791 792

		if (!buffer_delay(bh))
			goto next_buffer;

793 794
		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
795 796
		if (error) {
			/* something screwed, just bail */
797
			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
798
				xfs_alert(ip->i_mount,
799
			"page discard unable to remove delalloc mapping.");
800
			}
801 802 803
			break;
		}
next_buffer:
F
Fabian Frederick 已提交
804
		offset += i_blocksize(inode);
805 806 807 808 809

	} while ((bh = bh->b_this_page) != head);

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_invalidate:
810
	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
811 812 813
	return;
}

814 815 816 817 818 819 820 821 822
static int
xfs_map_cow(
	struct xfs_writepage_ctx *wpc,
	struct inode		*inode,
	loff_t			offset,
	unsigned int		*new_type)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_bmbt_irec	imap;
823
	bool			is_cow = false;
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
	int			error;

	/*
	 * If we already have a valid COW mapping keep using it.
	 */
	if (wpc->io_type == XFS_IO_COW) {
		wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
		if (wpc->imap_valid) {
			*new_type = XFS_IO_COW;
			return 0;
		}
	}

	/*
	 * Else we need to check if there is a COW mapping at this offset.
	 */
	xfs_ilock(ip, XFS_ILOCK_SHARED);
841
	is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
842 843 844 845 846 847 848 849 850
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!is_cow)
		return 0;

	/*
	 * And if the COW mapping has a delayed extent here we need to
	 * allocate real space for it now.
	 */
851
	if (isnullstartblock(imap.br_startblock)) {
852 853 854 855 856 857 858 859 860 861 862 863
		error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
				&imap);
		if (error)
			return error;
	}

	wpc->io_type = *new_type = XFS_IO_COW;
	wpc->imap_valid = true;
	wpc->imap = imap;
	return 0;
}

864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
/*
 * We implement an immediate ioend submission policy here to avoid needing to
 * chain multiple ioends and hence nest mempool allocations which can violate
 * forward progress guarantees we need to provide. The current ioend we are
 * adding buffers to is cached on the writepage context, and if the new buffer
 * does not append to the cached ioend it will create a new ioend and cache that
 * instead.
 *
 * If a new ioend is created and cached, the old ioend is returned and queued
 * locally for submission once the entire page is processed or an error has been
 * detected.  While ioends are submitted immediately after they are completed,
 * batching optimisations are provided by higher level block plugging.
 *
 * At the end of a writeback pass, there will be a cached ioend remaining on the
 * writepage context that the caller will need to submit.
 */
880 881 882
static int
xfs_writepage_map(
	struct xfs_writepage_ctx *wpc,
883
	struct writeback_control *wbc,
884 885 886
	struct inode		*inode,
	struct page		*page,
	loff_t			offset,
887
	uint64_t              end_offset)
888
{
889 890
	LIST_HEAD(submit_list);
	struct xfs_ioend	*ioend, *next;
891
	struct buffer_head	*bh, *head;
F
Fabian Frederick 已提交
892
	ssize_t			len = i_blocksize(inode);
893 894
	int			error = 0;
	int			count = 0;
895
	int			uptodate = 1;
896
	unsigned int		new_type;
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916

	bh = head = page_buffers(page);
	offset = page_offset(page);
	do {
		if (offset >= end_offset)
			break;
		if (!buffer_uptodate(bh))
			uptodate = 0;

		/*
		 * set_page_dirty dirties all buffers in a page, independent
		 * of their state.  The dirty state however is entirely
		 * meaningless for holes (!mapped && uptodate), so skip
		 * buffers covering holes here.
		 */
		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
			wpc->imap_valid = false;
			continue;
		}

917 918 919 920 921 922 923
		if (buffer_unwritten(bh))
			new_type = XFS_IO_UNWRITTEN;
		else if (buffer_delay(bh))
			new_type = XFS_IO_DELALLOC;
		else if (buffer_uptodate(bh))
			new_type = XFS_IO_OVERWRITE;
		else {
924 925 926 927 928 929 930 931 932 933 934 935
			if (PageUptodate(page))
				ASSERT(buffer_mapped(bh));
			/*
			 * This buffer is not uptodate and will not be
			 * written to disk.  Ensure that we will put any
			 * subsequent writeable buffers into a new
			 * ioend.
			 */
			wpc->imap_valid = false;
			continue;
		}

936 937 938 939 940 941 942 943 944 945 946
		if (xfs_is_reflink_inode(XFS_I(inode))) {
			error = xfs_map_cow(wpc, inode, offset, &new_type);
			if (error)
				goto out;
		}

		if (wpc->io_type != new_type) {
			wpc->io_type = new_type;
			wpc->imap_valid = false;
		}

947 948 949 950 951 952 953
		if (wpc->imap_valid)
			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
							 offset);
		if (!wpc->imap_valid) {
			error = xfs_map_blocks(inode, offset, &wpc->imap,
					     wpc->io_type);
			if (error)
954
				goto out;
955 956 957 958 959 960 961
			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
							 offset);
		}
		if (wpc->imap_valid) {
			lock_buffer(bh);
			if (wpc->io_type != XFS_IO_OVERWRITE)
				xfs_map_at_offset(inode, bh, &wpc->imap, offset);
962
			xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
963 964 965 966 967 968 969 970
			count++;
		}

	} while (offset += len, ((bh = bh->b_this_page) != head));

	if (uptodate && bh == head)
		SetPageUptodate(page);

971
	ASSERT(wpc->ioend || list_empty(&submit_list));
972

973
out:
974
	/*
975 976 977 978 979 980 981 982 983
	 * On error, we have to fail the ioend here because we have locked
	 * buffers in the ioend. If we don't do this, we'll deadlock
	 * invalidating the page as that tries to lock the buffers on the page.
	 * Also, because we may have set pages under writeback, we have to make
	 * sure we run IO completion to mark the error state of the IO
	 * appropriately, so we can't cancel the ioend directly here. That means
	 * we have to mark this page as under writeback if we included any
	 * buffers from it in the ioend chain so that completion treats it
	 * correctly.
984
	 *
985 986 987 988 989
	 * If we didn't include the page in the ioend, the on error we can
	 * simply discard and unlock it as there are no other users of the page
	 * or it's buffers right now. The caller will still need to trigger
	 * submission of outstanding ioends on the writepage context so they are
	 * treated correctly on error.
990
	 */
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	if (count) {
		xfs_start_page_writeback(page, !error);

		/*
		 * Preserve the original error if there was one, otherwise catch
		 * submission errors here and propagate into subsequent ioend
		 * submissions.
		 */
		list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
			int error2;

			list_del_init(&ioend->io_list);
			error2 = xfs_submit_ioend(wbc, ioend, error);
			if (error2 && !error)
				error = error2;
		}
	} else if (error) {
1008 1009 1010
		xfs_aops_discard_page(page);
		ClearPageUptodate(page);
		unlock_page(page);
1011 1012 1013 1014 1015 1016 1017 1018
	} else {
		/*
		 * We can end up here with no error and nothing to write if we
		 * race with a partial page truncate on a sub-page block sized
		 * filesystem. In that case we need to mark the page clean.
		 */
		xfs_start_page_writeback(page, 1);
		end_page_writeback(page);
1019
	}
1020

1021 1022 1023 1024
	mapping_set_error(page->mapping, error);
	return error;
}

L
Linus Torvalds 已提交
1025
/*
1026 1027 1028 1029 1030 1031
 * Write out a dirty page.
 *
 * For delalloc space on the page we need to allocate space and flush it.
 * For unwritten space on the page we need to start the conversion to
 * regular allocated space.
 * For any other dirty buffer heads on the page we should flush them.
L
Linus Torvalds 已提交
1032 1033
 */
STATIC int
1034
xfs_do_writepage(
1035
	struct page		*page,
1036 1037
	struct writeback_control *wbc,
	void			*data)
L
Linus Torvalds 已提交
1038
{
1039
	struct xfs_writepage_ctx *wpc = data;
1040
	struct inode		*inode = page->mapping->host;
L
Linus Torvalds 已提交
1041
	loff_t			offset;
1042
	uint64_t              end_offset;
1043
	pgoff_t                 end_index;
1044

1045
	trace_xfs_writepage(inode, page, 0, 0);
1046

1047 1048
	ASSERT(page_has_buffers(page));

1049 1050 1051
	/*
	 * Refuse to write the page out if we are called from reclaim context.
	 *
1052 1053 1054
	 * This avoids stack overflows when called from deeply used stacks in
	 * random callers for direct reclaim or memcg reclaim.  We explicitly
	 * allow reclaim from kswapd as the stack usage there is relatively low.
1055
	 *
1056 1057
	 * This should never happen except in the case of a VM regression so
	 * warn about it.
1058
	 */
1059 1060
	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
			PF_MEMALLOC))
1061
		goto redirty;
L
Linus Torvalds 已提交
1062

1063
	/*
1064 1065
	 * Given that we do not allow direct reclaim to call us, we should
	 * never be called while in a filesystem transaction.
1066
	 */
1067
	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1068
		goto redirty;
1069

1070
	/*
1071 1072
	 * Is this page beyond the end of the file?
	 *
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
	 * The page index is less than the end_index, adjust the end_offset
	 * to the highest offset that this page should represent.
	 * -----------------------------------------------------
	 * |			file mapping	       | <EOF> |
	 * -----------------------------------------------------
	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
	 * ^--------------------------------^----------|--------
	 * |     desired writeback range    |      see else    |
	 * ---------------------------------^------------------|
	 */
1083
	offset = i_size_read(inode);
1084
	end_index = offset >> PAGE_SHIFT;
1085
	if (page->index < end_index)
1086
		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
	else {
		/*
		 * Check whether the page to write out is beyond or straddles
		 * i_size or not.
		 * -------------------------------------------------------
		 * |		file mapping		        | <EOF>  |
		 * -------------------------------------------------------
		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
		 * ^--------------------------------^-----------|---------
		 * |				    |      Straddles     |
		 * ---------------------------------^-----------|--------|
		 */
1099
		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1100 1101

		/*
1102 1103 1104 1105
		 * Skip the page if it is fully outside i_size, e.g. due to a
		 * truncate operation that is in progress. We must redirty the
		 * page so that reclaim stops reclaiming it. Otherwise
		 * xfs_vm_releasepage() is called on it and gets confused.
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
		 *
		 * Note that the end_index is unsigned long, it would overflow
		 * if the given offset is greater than 16TB on 32-bit system
		 * and if we do check the page is fully outside i_size or not
		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
		 * will be evaluated to 0.  Hence this page will be redirtied
		 * and be written out repeatedly which would result in an
		 * infinite loop, the user program that perform this operation
		 * will hang.  Instead, we can verify this situation by checking
		 * if the page to write is totally beyond the i_size or if it's
		 * offset is just equal to the EOF.
1117
		 */
1118 1119
		if (page->index > end_index ||
		    (page->index == end_index && offset_into_page == 0))
1120
			goto redirty;
1121 1122 1123 1124 1125

		/*
		 * The page straddles i_size.  It must be zeroed out on each
		 * and every writepage invocation because it may be mmapped.
		 * "A file is mapped in multiples of the page size.  For a file
1126
		 * that is not a multiple of the page size, the remaining
1127 1128 1129
		 * memory is zeroed when mapped, and writes to that region are
		 * not written out to the file."
		 */
1130
		zero_user_segment(page, offset_into_page, PAGE_SIZE);
1131 1132 1133

		/* Adjust the end_offset to the end of file */
		end_offset = offset;
L
Linus Torvalds 已提交
1134 1135
	}

1136
	return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
1137

1138
redirty:
1139 1140 1141 1142 1143
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);
	return 0;
}

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
STATIC int
xfs_vm_writepage(
	struct page		*page,
	struct writeback_control *wbc)
{
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

	ret = xfs_do_writepage(page, wbc, &wpc);
1155 1156 1157
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1158 1159
}

1160 1161 1162 1163 1164
STATIC int
xfs_vm_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
1165 1166 1167 1168 1169
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

1170
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1171 1172 1173 1174
	if (dax_mapping(mapping))
		return dax_writeback_mapping_range(mapping,
				xfs_find_bdev_for_inode(mapping->host), wbc);

1175
	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1176 1177 1178
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1179 1180
}

1181 1182
/*
 * Called to move a page into cleanable state - and from there
1183
 * to be released. The page should already be clean. We always
1184 1185
 * have buffer heads in this call.
 *
1186
 * Returns 1 if the page is ok to release, 0 otherwise.
1187 1188
 */
STATIC int
1189
xfs_vm_releasepage(
1190 1191 1192
	struct page		*page,
	gfp_t			gfp_mask)
{
1193
	int			delalloc, unwritten;
1194

1195
	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1196

1197 1198 1199 1200
	/*
	 * mm accommodates an old ext3 case where clean pages might not have had
	 * the dirty bit cleared. Thus, it can send actual dirty pages to
	 * ->releasepage() via shrink_active_list(). Conversely,
1201 1202
	 * block_invalidatepage() can send pages that are still marked dirty but
	 * otherwise have invalidated buffers.
1203
	 *
1204
	 * We want to release the latter to avoid unnecessary buildup of the
1205 1206 1207 1208 1209 1210 1211
	 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
	 * that are entirely invalidated and need to be released.  Hence the
	 * only time we should get dirty pages here is through
	 * shrink_active_list() and so we can simply skip those now.
	 *
	 * warn if we've left any lingering delalloc/unwritten buffers on clean
	 * or invalidated pages we are about to release.
1212
	 */
1213 1214 1215
	if (PageDirty(page))
		return 0;

1216
	xfs_count_page_state(page, &delalloc, &unwritten);
1217

1218
	if (WARN_ON_ONCE(delalloc))
1219
		return 0;
1220
	if (WARN_ON_ONCE(unwritten))
1221 1222 1223 1224 1225
		return 0;

	return try_to_free_buffers(page);
}

1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
/*
 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
 * is, so that we can avoid repeated get_blocks calls.
 *
 * If the mapping spans EOF, then we have to break the mapping up as the mapping
 * for blocks beyond EOF must be marked new so that sub block regions can be
 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
 * was just allocated or is unwritten, otherwise the callers would overwrite
 * existing data with zeros. Hence we have to split the mapping into a range up
 * to and including EOF, and a second mapping for beyond EOF.
 */
static void
xfs_map_trim_size(
	struct inode		*inode,
	sector_t		iblock,
	struct buffer_head	*bh_result,
	struct xfs_bmbt_irec	*imap,
	xfs_off_t		offset,
	ssize_t			size)
{
	xfs_off_t		mapping_size;

	mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
	mapping_size <<= inode->i_blkbits;

	ASSERT(mapping_size > 0);
	if (mapping_size > size)
		mapping_size = size;
	if (offset < i_size_read(inode) &&
	    offset + mapping_size >= i_size_read(inode)) {
		/* limit mapping to block that spans EOF */
		mapping_size = roundup_64(i_size_read(inode) - offset,
F
Fabian Frederick 已提交
1258
					  i_blocksize(inode));
1259 1260 1261 1262 1263 1264 1265
	}
	if (mapping_size > LONG_MAX)
		mapping_size = LONG_MAX;

	bh_result->b_size = mapping_size;
}

1266
static int
C
Christoph Hellwig 已提交
1267
xfs_get_blocks(
L
Linus Torvalds 已提交
1268 1269 1270
	struct inode		*inode,
	sector_t		iblock,
	struct buffer_head	*bh_result,
C
Christoph Hellwig 已提交
1271
	int			create)
L
Linus Torvalds 已提交
1272
{
C
Christoph Hellwig 已提交
1273 1274 1275 1276 1277
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			error = 0;
	int			lockmode = 0;
C
Christoph Hellwig 已提交
1278
	struct xfs_bmbt_irec	imap;
C
Christoph Hellwig 已提交
1279
	int			nimaps = 1;
1280 1281
	xfs_off_t		offset;
	ssize_t			size;
C
Christoph Hellwig 已提交
1282

C
Christoph Hellwig 已提交
1283
	BUG_ON(create);
1284

C
Christoph Hellwig 已提交
1285
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
1286
		return -EIO;
L
Linus Torvalds 已提交
1287

1288
	offset = (xfs_off_t)iblock << inode->i_blkbits;
F
Fabian Frederick 已提交
1289
	ASSERT(bh_result->b_size >= i_blocksize(inode));
1290
	size = bh_result->b_size;
1291

C
Christoph Hellwig 已提交
1292
	if (offset >= i_size_read(inode))
1293 1294
		return 0;

1295 1296
	/*
	 * Direct I/O is usually done on preallocated files, so try getting
1297
	 * a block mapping without an exclusive lock first.
1298
	 */
1299
	lockmode = xfs_ilock_data_map_shared(ip);
1300

D
Dave Chinner 已提交
1301 1302 1303
	ASSERT(offset <= mp->m_super->s_maxbytes);
	if (offset + size > mp->m_super->s_maxbytes)
		size = mp->m_super->s_maxbytes - offset;
C
Christoph Hellwig 已提交
1304 1305 1306
	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
	offset_fsb = XFS_B_TO_FSBT(mp, offset);

C
Christoph Hellwig 已提交
1307 1308
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
				&imap, &nimaps, XFS_BMAPI_ENTIRE);
L
Linus Torvalds 已提交
1309
	if (error)
C
Christoph Hellwig 已提交
1310 1311
		goto out_unlock;

C
Christoph Hellwig 已提交
1312
	if (nimaps) {
1313
		trace_xfs_get_blocks_found(ip, offset, size,
1314 1315
			imap.br_state == XFS_EXT_UNWRITTEN ?
				XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
1316
		xfs_iunlock(ip, lockmode);
C
Christoph Hellwig 已提交
1317 1318 1319 1320
	} else {
		trace_xfs_get_blocks_notfound(ip, offset, size);
		goto out_unlock;
	}
L
Linus Torvalds 已提交
1321

1322
	/* trim mapping down to size requested */
1323
	xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1324

1325 1326 1327 1328
	/*
	 * For unwritten extents do not report a disk address in the buffered
	 * read case (treat as if we're reading into a hole).
	 */
1329
	if (xfs_bmap_is_real_extent(&imap))
1330
		xfs_map_buffer(inode, bh_result, &imap, offset);
L
Linus Torvalds 已提交
1331

1332 1333 1334 1335
	/*
	 * If this is a realtime file, data may be on a different device.
	 * to that pointed to from the buffer_head b_bdev currently.
	 */
C
Christoph Hellwig 已提交
1336
	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
L
Linus Torvalds 已提交
1337
	return 0;
C
Christoph Hellwig 已提交
1338 1339 1340

out_unlock:
	xfs_iunlock(ip, lockmode);
D
Dave Chinner 已提交
1341
	return error;
L
Linus Torvalds 已提交
1342 1343
}

1344 1345
STATIC ssize_t
xfs_vm_direct_IO(
D
Dave Chinner 已提交
1346
	struct kiocb		*iocb,
1347
	struct iov_iter		*iter)
D
Dave Chinner 已提交
1348
{
1349
	/*
1350
	 * We just need the method present so that open/fcntl allow direct I/O.
1351
	 */
1352
	return -EINVAL;
1353
}
L
Linus Torvalds 已提交
1354 1355

STATIC sector_t
1356
xfs_vm_bmap(
L
Linus Torvalds 已提交
1357 1358 1359 1360
	struct address_space	*mapping,
	sector_t		block)
{
	struct inode		*inode = (struct inode *)mapping->host;
1361
	struct xfs_inode	*ip = XFS_I(inode);
L
Linus Torvalds 已提交
1362

C
Christoph Hellwig 已提交
1363
	trace_xfs_vm_bmap(XFS_I(inode));
1364 1365 1366 1367 1368

	/*
	 * The swap code (ab-)uses ->bmap to get a block mapping and then
	 * bypasseѕ the file system for actual I/O.  We really can't allow
	 * that on reflinks inodes, so we have to skip out here.  And yes,
1369 1370 1371 1372
	 * 0 is the magic code for a bmap error.
	 *
	 * Since we don't pass back blockdev info, we can't return bmap
	 * information for rt files either.
1373
	 */
1374
	if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1375
		return 0;
1376

D
Dave Chinner 已提交
1377
	filemap_write_and_wait(mapping);
1378
	return generic_block_bmap(mapping, block, xfs_get_blocks);
L
Linus Torvalds 已提交
1379 1380 1381
}

STATIC int
1382
xfs_vm_readpage(
L
Linus Torvalds 已提交
1383 1384 1385
	struct file		*unused,
	struct page		*page)
{
1386
	trace_xfs_vm_readpage(page->mapping->host, 1);
1387
	return mpage_readpage(page, xfs_get_blocks);
L
Linus Torvalds 已提交
1388 1389 1390
}

STATIC int
1391
xfs_vm_readpages(
L
Linus Torvalds 已提交
1392 1393 1394 1395 1396
	struct file		*unused,
	struct address_space	*mapping,
	struct list_head	*pages,
	unsigned		nr_pages)
{
1397
	trace_xfs_vm_readpages(mapping->host, nr_pages);
1398
	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
L
Linus Torvalds 已提交
1399 1400
}

1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
/*
 * This is basically a copy of __set_page_dirty_buffers() with one
 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
 * dirty, we'll never be able to clean them because we don't write buffers
 * beyond EOF, and that means we can't invalidate pages that span EOF
 * that have been marked dirty. Further, the dirty state can leak into
 * the file interior if the file is extended, resulting in all sorts of
 * bad things happening as the state does not match the underlying data.
 *
 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
 * this only exist because of bufferheads and how the generic code manages them.
 */
STATIC int
xfs_vm_set_page_dirty(
	struct page		*page)
{
	struct address_space	*mapping = page->mapping;
	struct inode		*inode = mapping->host;
	loff_t			end_offset;
	loff_t			offset;
	int			newly_dirty;

	if (unlikely(!mapping))
		return !TestSetPageDirty(page);

	end_offset = i_size_read(inode);
	offset = page_offset(page);

	spin_lock(&mapping->private_lock);
	if (page_has_buffers(page)) {
		struct buffer_head *head = page_buffers(page);
		struct buffer_head *bh = head;

		do {
			if (offset < end_offset)
				set_buffer_dirty(bh);
			bh = bh->b_this_page;
F
Fabian Frederick 已提交
1438
			offset += i_blocksize(inode);
1439 1440
		} while (bh != head);
	}
1441
	/*
1442 1443
	 * Lock out page->mem_cgroup migration to keep PageDirty
	 * synchronized with per-memcg dirty page counters.
1444
	 */
J
Johannes Weiner 已提交
1445
	lock_page_memcg(page);
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
	newly_dirty = !TestSetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	if (newly_dirty) {
		/* sigh - __set_page_dirty() is static, so copy it here, too */
		unsigned long flags;

		spin_lock_irqsave(&mapping->tree_lock, flags);
		if (page->mapping) {	/* Race with truncate? */
			WARN_ON_ONCE(!PageUptodate(page));
J
Johannes Weiner 已提交
1456
			account_page_dirtied(page, mapping);
1457 1458 1459 1460 1461
			radix_tree_tag_set(&mapping->page_tree,
					page_index(page), PAGECACHE_TAG_DIRTY);
		}
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
	}
J
Johannes Weiner 已提交
1462
	unlock_page_memcg(page);
1463 1464
	if (newly_dirty)
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1465 1466 1467
	return newly_dirty;
}

1468
const struct address_space_operations xfs_address_space_operations = {
1469 1470 1471
	.readpage		= xfs_vm_readpage,
	.readpages		= xfs_vm_readpages,
	.writepage		= xfs_vm_writepage,
1472
	.writepages		= xfs_vm_writepages,
1473
	.set_page_dirty		= xfs_vm_set_page_dirty,
1474 1475
	.releasepage		= xfs_vm_releasepage,
	.invalidatepage		= xfs_vm_invalidatepage,
1476 1477
	.bmap			= xfs_vm_bmap,
	.direct_IO		= xfs_vm_direct_IO,
1478
	.migratepage		= buffer_migrate_page,
1479
	.is_partially_uptodate  = block_is_partially_uptodate,
1480
	.error_remove_page	= generic_error_remove_page,
L
Linus Torvalds 已提交
1481
};