xfs_aops.c 38.5 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17 18
 */
#include "xfs.h"
19
#include "xfs_shared.h"
20 21 22
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
23 24
#include "xfs_mount.h"
#include "xfs_inode.h"
25
#include "xfs_trans.h"
26
#include "xfs_inode_item.h"
27
#include "xfs_alloc.h"
L
Linus Torvalds 已提交
28 29
#include "xfs_error.h"
#include "xfs_iomap.h"
C
Christoph Hellwig 已提交
30
#include "xfs_trace.h"
31
#include "xfs_bmap.h"
D
Dave Chinner 已提交
32
#include "xfs_bmap_util.h"
33
#include "xfs_bmap_btree.h"
34
#include "xfs_reflink.h"
35
#include <linux/gfp.h>
L
Linus Torvalds 已提交
36
#include <linux/mpage.h>
37
#include <linux/pagevec.h>
L
Linus Torvalds 已提交
38 39
#include <linux/writeback.h>

40 41 42 43 44 45 46 47 48 49 50
/*
 * structure owned by writepages passed to individual writepage calls
 */
struct xfs_writepage_ctx {
	struct xfs_bmbt_irec    imap;
	bool			imap_valid;
	unsigned int		io_type;
	struct xfs_ioend	*ioend;
	sector_t		last_block;
};

C
Christoph Hellwig 已提交
51
void
52 53 54 55 56 57 58
xfs_count_page_state(
	struct page		*page,
	int			*delalloc,
	int			*unwritten)
{
	struct buffer_head	*bh, *head;

59
	*delalloc = *unwritten = 0;
60 61 62

	bh = head = page_buffers(page);
	do {
63
		if (buffer_unwritten(bh))
64 65 66 67 68 69
			(*unwritten) = 1;
		else if (buffer_delay(bh))
			(*delalloc) = 1;
	} while ((bh = bh->b_this_page) != head);
}

70
struct block_device *
C
Christoph Hellwig 已提交
71
xfs_find_bdev_for_inode(
C
Christoph Hellwig 已提交
72
	struct inode		*inode)
C
Christoph Hellwig 已提交
73
{
C
Christoph Hellwig 已提交
74
	struct xfs_inode	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
75 76
	struct xfs_mount	*mp = ip->i_mount;

77
	if (XFS_IS_REALTIME_INODE(ip))
C
Christoph Hellwig 已提交
78 79 80 81 82
		return mp->m_rtdev_targp->bt_bdev;
	else
		return mp->m_ddev_targp->bt_bdev;
}

83
/*
84 85 86
 * We're now finished for good with this page.  Update the page state via the
 * associated buffer_heads, paying attention to the start and end offsets that
 * we need to process on the page.
87 88 89 90 91 92
 *
 * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
 * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
 * the page at all, as we may be racing with memory reclaim and it can free both
 * the bufferhead chain and the page as it will see the page as clean and
 * unused.
93 94 95 96 97 98 99 100
 */
static void
xfs_finish_page_writeback(
	struct inode		*inode,
	struct bio_vec		*bvec,
	int			error)
{
	unsigned int		end = bvec->bv_offset + bvec->bv_len - 1;
101
	struct buffer_head	*head, *bh, *next;
102
	unsigned int		off = 0;
103
	unsigned int		bsize;
104 105

	ASSERT(bvec->bv_offset < PAGE_SIZE);
F
Fabian Frederick 已提交
106
	ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
107
	ASSERT(end < PAGE_SIZE);
F
Fabian Frederick 已提交
108
	ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
109 110 111

	bh = head = page_buffers(bvec->bv_page);

112
	bsize = bh->b_size;
113
	do {
114 115
		if (off > end)
			break;
116
		next = bh->b_this_page;
117 118 119 120
		if (off < bvec->bv_offset)
			goto next_bh;
		bh->b_end_io(bh, !error);
next_bh:
121 122
		off += bsize;
	} while ((bh = next) != head);
123 124 125 126 127 128
}

/*
 * We're now finished for good with this ioend structure.  Update the page
 * state, release holds on bios, and finally free up memory.  Do not use the
 * ioend after this.
129
 */
130 131
STATIC void
xfs_destroy_ioend(
132 133
	struct xfs_ioend	*ioend,
	int			error)
134
{
135
	struct inode		*inode = ioend->io_inode;
136
	struct bio		*last = ioend->io_bio;
137
	struct bio		*bio, *next;
138

139
	for (bio = &ioend->io_inline_bio; bio; bio = next) {
140 141 142
		struct bio_vec	*bvec;
		int		i;

143 144 145 146 147 148 149 150
		/*
		 * For the last bio, bi_private points to the ioend, so we
		 * need to explicitly end the iteration here.
		 */
		if (bio == last)
			next = NULL;
		else
			next = bio->bi_private;
C
Christoph Hellwig 已提交
151

152 153 154 155 156
		/* walk each page on bio, ending page IO on them */
		bio_for_each_segment_all(bvec, bio, i)
			xfs_finish_page_writeback(inode, bvec, error);

		bio_put(bio);
157
	}
158 159
}

C
Christoph Hellwig 已提交
160 161 162 163 164 165 166 167 168
/*
 * Fast and loose check if this write could update the on-disk inode size.
 */
static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
{
	return ioend->io_offset + ioend->io_size >
		XFS_I(ioend->io_inode)->i_d.di_size;
}

169 170 171 172 173 174 175 176
STATIC int
xfs_setfilesize_trans_alloc(
	struct xfs_ioend	*ioend)
{
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
	struct xfs_trans	*tp;
	int			error;

177 178
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
	if (error)
179 180 181 182
		return error;

	ioend->io_append_trans = tp;

J
Jan Kara 已提交
183
	/*
184
	 * We may pass freeze protection with a transaction.  So tell lockdep
J
Jan Kara 已提交
185 186
	 * we released it.
	 */
187
	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
188 189 190 191
	/*
	 * We hand off the transaction to the completion thread now, so
	 * clear the flag here.
	 */
192
	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
193 194 195
	return 0;
}

196
/*
197
 * Update on-disk file size now that data has been written to disk.
198
 */
199
STATIC int
200
__xfs_setfilesize(
201 202 203 204
	struct xfs_inode	*ip,
	struct xfs_trans	*tp,
	xfs_off_t		offset,
	size_t			size)
205 206 207
{
	xfs_fsize_t		isize;

208
	xfs_ilock(ip, XFS_ILOCK_EXCL);
209
	isize = xfs_new_eof(ip, offset + size);
210 211
	if (!isize) {
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
212
		xfs_trans_cancel(tp);
213
		return 0;
214 215
	}

216
	trace_xfs_setfilesize(ip, offset, size);
217 218 219 220 221

	ip->i_d.di_size = isize;
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

222
	return xfs_trans_commit(tp);
223 224
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
int
xfs_setfilesize(
	struct xfs_inode	*ip,
	xfs_off_t		offset,
	size_t			size)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error;

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
	if (error)
		return error;

	return __xfs_setfilesize(ip, tp, offset, size);
}

242 243
STATIC int
xfs_setfilesize_ioend(
244 245
	struct xfs_ioend	*ioend,
	int			error)
246 247 248 249 250 251 252 253 254
{
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	struct xfs_trans	*tp = ioend->io_append_trans;

	/*
	 * The transaction may have been allocated in the I/O submission thread,
	 * thus we need to mark ourselves as being in a transaction manually.
	 * Similarly for freeze protection.
	 */
255
	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
256
	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
257

258
	/* we abort the update if there was an IO error */
259
	if (error) {
260
		xfs_trans_cancel(tp);
261
		return error;
262 263
	}

264
	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
265 266
}

267
/*
268
 * IO write completion.
269 270
 */
STATIC void
271
xfs_end_io(
272
	struct work_struct *work)
273
{
274 275 276
	struct xfs_ioend	*ioend =
		container_of(work, struct xfs_ioend, io_work);
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
277 278
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;
279
	int			error;
280

281
	/*
282
	 * Just clean up the in-memory strutures if the fs has been shut down.
283
	 */
284
	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
285
		error = -EIO;
286 287
		goto done;
	}
288

289
	/*
290
	 * Clean up any COW blocks on an I/O error.
291
	 */
292
	error = blk_status_to_errno(ioend->io_bio->bi_status);
293 294 295 296 297
	if (unlikely(error)) {
		switch (ioend->io_type) {
		case XFS_IO_COW:
			xfs_reflink_cancel_cow_range(ip, offset, size, true);
			break;
298
		}
299 300

		goto done;
301 302
	}

303
	/*
304
	 * Success:  commit the COW or unwritten blocks if needed.
305
	 */
306 307 308 309 310 311 312 313 314 315
	switch (ioend->io_type) {
	case XFS_IO_COW:
		error = xfs_reflink_end_cow(ip, offset, size);
		break;
	case XFS_IO_UNWRITTEN:
		error = xfs_iomap_write_unwritten(ip, offset, size);
		break;
	default:
		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
		break;
316
	}
317

318
done:
319 320
	if (ioend->io_append_trans)
		error = xfs_setfilesize_ioend(ioend, error);
321
	xfs_destroy_ioend(ioend, error);
322 323
}

324 325 326
STATIC void
xfs_end_bio(
	struct bio		*bio)
327
{
328 329
	struct xfs_ioend	*ioend = bio->bi_private;
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
330

331
	if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
332 333 334 335
		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
	else if (ioend->io_append_trans)
		queue_work(mp->m_data_workqueue, &ioend->io_work);
	else
336
		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
337 338
}

L
Linus Torvalds 已提交
339 340 341 342
STATIC int
xfs_map_blocks(
	struct inode		*inode,
	loff_t			offset,
C
Christoph Hellwig 已提交
343
	struct xfs_bmbt_irec	*imap,
344
	int			type)
L
Linus Torvalds 已提交
345
{
C
Christoph Hellwig 已提交
346 347
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
F
Fabian Frederick 已提交
348
	ssize_t			count = i_blocksize(inode);
C
Christoph Hellwig 已提交
349 350 351 352 353 354
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			error = 0;
	int			bmapi_flags = XFS_BMAPI_ENTIRE;
	int			nimaps = 1;

	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
355
		return -EIO;
C
Christoph Hellwig 已提交
356

357
	ASSERT(type != XFS_IO_COW);
358
	if (type == XFS_IO_UNWRITTEN)
C
Christoph Hellwig 已提交
359
		bmapi_flags |= XFS_BMAPI_IGSTATE;
C
Christoph Hellwig 已提交
360

361
	xfs_ilock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
362 363
	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
	       (ip->i_df.if_flags & XFS_IFEXTENTS));
D
Dave Chinner 已提交
364
	ASSERT(offset <= mp->m_super->s_maxbytes);
C
Christoph Hellwig 已提交
365

D
Dave Chinner 已提交
366 367
	if (offset + count > mp->m_super->s_maxbytes)
		count = mp->m_super->s_maxbytes - offset;
C
Christoph Hellwig 已提交
368 369
	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
	offset_fsb = XFS_B_TO_FSBT(mp, offset);
D
Dave Chinner 已提交
370 371
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
				imap, &nimaps, bmapi_flags);
372 373 374 375 376 377 378
	/*
	 * Truncate an overwrite extent if there's a pending CoW
	 * reservation before the end of this extent.  This forces us
	 * to come back to writepage to take care of the CoW.
	 */
	if (nimaps && type == XFS_IO_OVERWRITE)
		xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
C
Christoph Hellwig 已提交
379
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
C
Christoph Hellwig 已提交
380

C
Christoph Hellwig 已提交
381
	if (error)
D
Dave Chinner 已提交
382
		return error;
C
Christoph Hellwig 已提交
383

384
	if (type == XFS_IO_DELALLOC &&
C
Christoph Hellwig 已提交
385
	    (!nimaps || isnullstartblock(imap->br_startblock))) {
386 387
		error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
				imap);
C
Christoph Hellwig 已提交
388
		if (!error)
389
			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
D
Dave Chinner 已提交
390
		return error;
C
Christoph Hellwig 已提交
391 392
	}

C
Christoph Hellwig 已提交
393
#ifdef DEBUG
394
	if (type == XFS_IO_UNWRITTEN) {
C
Christoph Hellwig 已提交
395 396 397 398 399 400 401 402
		ASSERT(nimaps);
		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
	}
#endif
	if (nimaps)
		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
	return 0;
L
Linus Torvalds 已提交
403 404
}

405
STATIC bool
406
xfs_imap_valid(
407
	struct inode		*inode,
C
Christoph Hellwig 已提交
408
	struct xfs_bmbt_irec	*imap,
409
	xfs_off_t		offset)
L
Linus Torvalds 已提交
410
{
411
	offset >>= inode->i_blkbits;
412

413 414
	return offset >= imap->br_startoff &&
		offset < imap->br_startoff + imap->br_blockcount;
L
Linus Torvalds 已提交
415 416
}

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
STATIC void
xfs_start_buffer_writeback(
	struct buffer_head	*bh)
{
	ASSERT(buffer_mapped(bh));
	ASSERT(buffer_locked(bh));
	ASSERT(!buffer_delay(bh));
	ASSERT(!buffer_unwritten(bh));

	mark_buffer_async_write(bh);
	set_buffer_uptodate(bh);
	clear_buffer_dirty(bh);
}

STATIC void
xfs_start_page_writeback(
	struct page		*page,
434
	int			clear_dirty)
435 436 437
{
	ASSERT(PageLocked(page));
	ASSERT(!PageWriteback(page));
438 439 440 441 442 443 444 445 446

	/*
	 * if the page was not fully cleaned, we need to ensure that the higher
	 * layers come back to it correctly. That means we need to keep the page
	 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
	 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
	 * write this page in this writeback sweep will be made.
	 */
	if (clear_dirty) {
447
		clear_page_dirty_for_io(page);
448 449 450 451
		set_page_writeback(page);
	} else
		set_page_writeback_keepwrite(page);

452 453 454
	unlock_page(page);
}

455
static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
456 457 458 459 460
{
	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
}

/*
461 462 463 464 465 466
 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
 * it, and we submit that bio. The ioend may be used for multiple bio
 * submissions, so we only want to allocate an append transaction for the ioend
 * once. In the case of multiple bio submission, each bio will take an IO
 * reference to the ioend to ensure that the ioend completion is only done once
 * all bios have been submitted and the ioend is really done.
467 468 469
 *
 * If @fail is non-zero, it means that we have a situation where some part of
 * the submission process has failed after we have marked paged for writeback
470 471 472
 * and unlocked them. In this situation, we need to fail the bio and ioend
 * rather than submit it to IO. This typically only happens on a filesystem
 * shutdown.
473
 */
474
STATIC int
475
xfs_submit_ioend(
476
	struct writeback_control *wbc,
477
	struct xfs_ioend	*ioend,
478
	int			status)
479
{
480 481 482 483 484 485
	/* Convert CoW extents to regular */
	if (!status && ioend->io_type == XFS_IO_COW) {
		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
				ioend->io_offset, ioend->io_size);
	}

486 487
	/* Reserve log space if we might write beyond the on-disk inode size. */
	if (!status &&
488
	    ioend->io_type != XFS_IO_UNWRITTEN &&
489 490
	    xfs_ioend_is_append(ioend) &&
	    !ioend->io_append_trans)
491
		status = xfs_setfilesize_trans_alloc(ioend);
492

493 494
	ioend->io_bio->bi_private = ioend;
	ioend->io_bio->bi_end_io = xfs_end_bio;
J
Jens Axboe 已提交
495
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
496

497 498 499 500 501 502 503
	/*
	 * If we are failing the IO now, just mark the ioend with an
	 * error and finish it. This will run IO completion immediately
	 * as there is only one reference to the ioend at this point in
	 * time.
	 */
	if (status) {
504
		ioend->io_bio->bi_status = errno_to_blk_status(status);
505
		bio_endio(ioend->io_bio);
506 507
		return status;
	}
508

509
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
510
	submit_bio(ioend->io_bio);
511
	return 0;
512 513
}

514 515 516 517 518 519 520 521
static void
xfs_init_bio_from_bh(
	struct bio		*bio,
	struct buffer_head	*bh)
{
	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
	bio->bi_bdev = bh->b_bdev;
}
522

523 524 525 526 527 528 529 530 531
static struct xfs_ioend *
xfs_alloc_ioend(
	struct inode		*inode,
	unsigned int		type,
	xfs_off_t		offset,
	struct buffer_head	*bh)
{
	struct xfs_ioend	*ioend;
	struct bio		*bio;
532

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
	xfs_init_bio_from_bh(bio, bh);

	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
	INIT_LIST_HEAD(&ioend->io_list);
	ioend->io_type = type;
	ioend->io_inode = inode;
	ioend->io_size = 0;
	ioend->io_offset = offset;
	INIT_WORK(&ioend->io_work, xfs_end_io);
	ioend->io_append_trans = NULL;
	ioend->io_bio = bio;
	return ioend;
}

/*
 * Allocate a new bio, and chain the old bio to the new one.
 *
 * Note that we have to do perform the chaining in this unintuitive order
 * so that the bi_private linkage is set up in the right direction for the
 * traversal in xfs_destroy_ioend().
 */
static void
xfs_chain_bio(
	struct xfs_ioend	*ioend,
	struct writeback_control *wbc,
	struct buffer_head	*bh)
{
	struct bio *new;

	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
	xfs_init_bio_from_bh(new, bh);

	bio_chain(ioend->io_bio, new);
	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
J
Jens Axboe 已提交
568
	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
569
	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
570
	submit_bio(ioend->io_bio);
571
	ioend->io_bio = new;
572 573 574 575 576 577
}

/*
 * Test to see if we've been building up a completion structure for
 * earlier buffers -- if so, we try to append to this ioend if we
 * can, otherwise we finish off any current ioend and start another.
578 579
 * Return the ioend we finished off so that the caller can submit it
 * once it has finished processing the dirty page.
580 581 582 583 584
 */
STATIC void
xfs_add_to_ioend(
	struct inode		*inode,
	struct buffer_head	*bh,
585
	xfs_off_t		offset,
586
	struct xfs_writepage_ctx *wpc,
587
	struct writeback_control *wbc,
588
	struct list_head	*iolist)
589
{
590
	if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
591 592
	    bh->b_blocknr != wpc->last_block + 1 ||
	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
593 594
		if (wpc->ioend)
			list_add(&wpc->ioend->io_list, iolist);
595
		wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
596 597
	}

598 599 600 601 602 603
	/*
	 * If the buffer doesn't fit into the bio we need to allocate a new
	 * one.  This shouldn't happen more than once for a given buffer.
	 */
	while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
		xfs_chain_bio(wpc->ioend, wbc, bh);
604

605 606
	wpc->ioend->io_size += bh->b_size;
	wpc->last_block = bh->b_blocknr;
607
	xfs_start_buffer_writeback(bh);
608 609
}

610 611
STATIC void
xfs_map_buffer(
C
Christoph Hellwig 已提交
612
	struct inode		*inode,
613
	struct buffer_head	*bh,
C
Christoph Hellwig 已提交
614
	struct xfs_bmbt_irec	*imap,
C
Christoph Hellwig 已提交
615
	xfs_off_t		offset)
616 617
{
	sector_t		bn;
618
	struct xfs_mount	*m = XFS_I(inode)->i_mount;
C
Christoph Hellwig 已提交
619 620
	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
621

C
Christoph Hellwig 已提交
622 623
	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
624

625
	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
626
	      ((offset - iomap_offset) >> inode->i_blkbits);
627

C
Christoph Hellwig 已提交
628
	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
629 630 631 632 633

	bh->b_blocknr = bn;
	set_buffer_mapped(bh);
}

L
Linus Torvalds 已提交
634 635
STATIC void
xfs_map_at_offset(
C
Christoph Hellwig 已提交
636
	struct inode		*inode,
L
Linus Torvalds 已提交
637
	struct buffer_head	*bh,
C
Christoph Hellwig 已提交
638
	struct xfs_bmbt_irec	*imap,
C
Christoph Hellwig 已提交
639
	xfs_off_t		offset)
L
Linus Torvalds 已提交
640
{
C
Christoph Hellwig 已提交
641 642
	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
L
Linus Torvalds 已提交
643

C
Christoph Hellwig 已提交
644
	xfs_map_buffer(inode, bh, imap, offset);
L
Linus Torvalds 已提交
645 646
	set_buffer_mapped(bh);
	clear_buffer_delay(bh);
647
	clear_buffer_unwritten(bh);
L
Linus Torvalds 已提交
648 649 650
}

/*
651 652 653 654
 * Test if a given page contains at least one buffer of a given @type.
 * If @check_all_buffers is true, then we walk all the buffers in the page to
 * try to find one of the type passed in. If it is not set, then the caller only
 * needs to check the first buffer on the page for a match.
L
Linus Torvalds 已提交
655
 */
656
STATIC bool
657
xfs_check_page_type(
658
	struct page		*page,
659 660
	unsigned int		type,
	bool			check_all_buffers)
L
Linus Torvalds 已提交
661
{
662 663
	struct buffer_head	*bh;
	struct buffer_head	*head;
L
Linus Torvalds 已提交
664

665 666 667 668 669 670
	if (PageWriteback(page))
		return false;
	if (!page->mapping)
		return false;
	if (!page_has_buffers(page))
		return false;
L
Linus Torvalds 已提交
671

672 673 674 675 676 677
	bh = head = page_buffers(page);
	do {
		if (buffer_unwritten(bh)) {
			if (type == XFS_IO_UNWRITTEN)
				return true;
		} else if (buffer_delay(bh)) {
678
			if (type == XFS_IO_DELALLOC)
679 680
				return true;
		} else if (buffer_dirty(bh) && buffer_mapped(bh)) {
681
			if (type == XFS_IO_OVERWRITE)
682 683
				return true;
		}
L
Linus Torvalds 已提交
684

685 686 687 688
		/* If we are only checking the first buffer, we are done now. */
		if (!check_all_buffers)
			break;
	} while ((bh = bh->b_this_page) != head);
L
Linus Torvalds 已提交
689

690
	return false;
L
Linus Torvalds 已提交
691 692
}

693 694 695
STATIC void
xfs_vm_invalidatepage(
	struct page		*page,
696 697
	unsigned int		offset,
	unsigned int		length)
698
{
699 700 701
	trace_xfs_invalidatepage(page->mapping->host, page, offset,
				 length);
	block_invalidatepage(page, offset, length);
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
}

/*
 * If the page has delalloc buffers on it, we need to punch them out before we
 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
 * is done on that same region - the delalloc extent is returned when none is
 * supposed to be there.
 *
 * We prevent this by truncating away the delalloc regions on the page before
 * invalidating it. Because they are delalloc, we can do this without needing a
 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
 * truncation without a transaction as there is no space left for block
 * reservation (typically why we see a ENOSPC in writeback).
 *
 * This is not a performance critical path, so for now just do the punching a
 * buffer head at a time.
 */
STATIC void
xfs_aops_discard_page(
	struct page		*page)
{
	struct inode		*inode = page->mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct buffer_head	*bh, *head;
	loff_t			offset = page_offset(page);

729
	if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
730 731
		goto out_invalidate;

732 733 734
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		goto out_invalidate;

735
	xfs_alert(ip->i_mount,
736 737 738 739 740 741 742
		"page discard on page %p, inode 0x%llx, offset %llu.",
			page, ip->i_ino, offset);

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	bh = head = page_buffers(page);
	do {
		int		error;
743
		xfs_fileoff_t	start_fsb;
744 745 746 747

		if (!buffer_delay(bh))
			goto next_buffer;

748 749
		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
750 751
		if (error) {
			/* something screwed, just bail */
752
			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
753
				xfs_alert(ip->i_mount,
754
			"page discard unable to remove delalloc mapping.");
755
			}
756 757 758
			break;
		}
next_buffer:
F
Fabian Frederick 已提交
759
		offset += i_blocksize(inode);
760 761 762 763 764

	} while ((bh = bh->b_this_page) != head);

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_invalidate:
765
	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
766 767 768
	return;
}

769 770 771 772 773 774 775 776 777
static int
xfs_map_cow(
	struct xfs_writepage_ctx *wpc,
	struct inode		*inode,
	loff_t			offset,
	unsigned int		*new_type)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_bmbt_irec	imap;
778
	bool			is_cow = false;
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
	int			error;

	/*
	 * If we already have a valid COW mapping keep using it.
	 */
	if (wpc->io_type == XFS_IO_COW) {
		wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
		if (wpc->imap_valid) {
			*new_type = XFS_IO_COW;
			return 0;
		}
	}

	/*
	 * Else we need to check if there is a COW mapping at this offset.
	 */
	xfs_ilock(ip, XFS_ILOCK_SHARED);
796
	is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
797 798 799 800 801 802 803 804 805
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!is_cow)
		return 0;

	/*
	 * And if the COW mapping has a delayed extent here we need to
	 * allocate real space for it now.
	 */
806
	if (isnullstartblock(imap.br_startblock)) {
807 808 809 810 811 812 813 814 815 816 817 818
		error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
				&imap);
		if (error)
			return error;
	}

	wpc->io_type = *new_type = XFS_IO_COW;
	wpc->imap_valid = true;
	wpc->imap = imap;
	return 0;
}

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
/*
 * We implement an immediate ioend submission policy here to avoid needing to
 * chain multiple ioends and hence nest mempool allocations which can violate
 * forward progress guarantees we need to provide. The current ioend we are
 * adding buffers to is cached on the writepage context, and if the new buffer
 * does not append to the cached ioend it will create a new ioend and cache that
 * instead.
 *
 * If a new ioend is created and cached, the old ioend is returned and queued
 * locally for submission once the entire page is processed or an error has been
 * detected.  While ioends are submitted immediately after they are completed,
 * batching optimisations are provided by higher level block plugging.
 *
 * At the end of a writeback pass, there will be a cached ioend remaining on the
 * writepage context that the caller will need to submit.
 */
835 836 837
static int
xfs_writepage_map(
	struct xfs_writepage_ctx *wpc,
838
	struct writeback_control *wbc,
839 840 841
	struct inode		*inode,
	struct page		*page,
	loff_t			offset,
842
	uint64_t              end_offset)
843
{
844 845
	LIST_HEAD(submit_list);
	struct xfs_ioend	*ioend, *next;
846
	struct buffer_head	*bh, *head;
F
Fabian Frederick 已提交
847
	ssize_t			len = i_blocksize(inode);
848 849
	int			error = 0;
	int			count = 0;
850
	int			uptodate = 1;
851
	unsigned int		new_type;
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871

	bh = head = page_buffers(page);
	offset = page_offset(page);
	do {
		if (offset >= end_offset)
			break;
		if (!buffer_uptodate(bh))
			uptodate = 0;

		/*
		 * set_page_dirty dirties all buffers in a page, independent
		 * of their state.  The dirty state however is entirely
		 * meaningless for holes (!mapped && uptodate), so skip
		 * buffers covering holes here.
		 */
		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
			wpc->imap_valid = false;
			continue;
		}

872 873 874 875 876 877 878
		if (buffer_unwritten(bh))
			new_type = XFS_IO_UNWRITTEN;
		else if (buffer_delay(bh))
			new_type = XFS_IO_DELALLOC;
		else if (buffer_uptodate(bh))
			new_type = XFS_IO_OVERWRITE;
		else {
879 880 881 882 883 884 885 886 887 888 889 890
			if (PageUptodate(page))
				ASSERT(buffer_mapped(bh));
			/*
			 * This buffer is not uptodate and will not be
			 * written to disk.  Ensure that we will put any
			 * subsequent writeable buffers into a new
			 * ioend.
			 */
			wpc->imap_valid = false;
			continue;
		}

891 892 893 894 895 896 897 898 899 900 901
		if (xfs_is_reflink_inode(XFS_I(inode))) {
			error = xfs_map_cow(wpc, inode, offset, &new_type);
			if (error)
				goto out;
		}

		if (wpc->io_type != new_type) {
			wpc->io_type = new_type;
			wpc->imap_valid = false;
		}

902 903 904 905 906 907 908
		if (wpc->imap_valid)
			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
							 offset);
		if (!wpc->imap_valid) {
			error = xfs_map_blocks(inode, offset, &wpc->imap,
					     wpc->io_type);
			if (error)
909
				goto out;
910 911 912 913 914 915 916
			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
							 offset);
		}
		if (wpc->imap_valid) {
			lock_buffer(bh);
			if (wpc->io_type != XFS_IO_OVERWRITE)
				xfs_map_at_offset(inode, bh, &wpc->imap, offset);
917
			xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
918 919 920 921 922 923 924 925
			count++;
		}

	} while (offset += len, ((bh = bh->b_this_page) != head));

	if (uptodate && bh == head)
		SetPageUptodate(page);

926
	ASSERT(wpc->ioend || list_empty(&submit_list));
927

928
out:
929
	/*
930 931 932 933 934 935 936 937 938
	 * On error, we have to fail the ioend here because we have locked
	 * buffers in the ioend. If we don't do this, we'll deadlock
	 * invalidating the page as that tries to lock the buffers on the page.
	 * Also, because we may have set pages under writeback, we have to make
	 * sure we run IO completion to mark the error state of the IO
	 * appropriately, so we can't cancel the ioend directly here. That means
	 * we have to mark this page as under writeback if we included any
	 * buffers from it in the ioend chain so that completion treats it
	 * correctly.
939
	 *
940 941 942 943 944
	 * If we didn't include the page in the ioend, the on error we can
	 * simply discard and unlock it as there are no other users of the page
	 * or it's buffers right now. The caller will still need to trigger
	 * submission of outstanding ioends on the writepage context so they are
	 * treated correctly on error.
945
	 */
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
	if (count) {
		xfs_start_page_writeback(page, !error);

		/*
		 * Preserve the original error if there was one, otherwise catch
		 * submission errors here and propagate into subsequent ioend
		 * submissions.
		 */
		list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
			int error2;

			list_del_init(&ioend->io_list);
			error2 = xfs_submit_ioend(wbc, ioend, error);
			if (error2 && !error)
				error = error2;
		}
	} else if (error) {
963 964 965
		xfs_aops_discard_page(page);
		ClearPageUptodate(page);
		unlock_page(page);
966 967 968 969 970 971 972 973
	} else {
		/*
		 * We can end up here with no error and nothing to write if we
		 * race with a partial page truncate on a sub-page block sized
		 * filesystem. In that case we need to mark the page clean.
		 */
		xfs_start_page_writeback(page, 1);
		end_page_writeback(page);
974
	}
975

976 977 978 979
	mapping_set_error(page->mapping, error);
	return error;
}

L
Linus Torvalds 已提交
980
/*
981 982 983 984 985 986
 * Write out a dirty page.
 *
 * For delalloc space on the page we need to allocate space and flush it.
 * For unwritten space on the page we need to start the conversion to
 * regular allocated space.
 * For any other dirty buffer heads on the page we should flush them.
L
Linus Torvalds 已提交
987 988
 */
STATIC int
989
xfs_do_writepage(
990
	struct page		*page,
991 992
	struct writeback_control *wbc,
	void			*data)
L
Linus Torvalds 已提交
993
{
994
	struct xfs_writepage_ctx *wpc = data;
995
	struct inode		*inode = page->mapping->host;
L
Linus Torvalds 已提交
996
	loff_t			offset;
997
	uint64_t              end_offset;
998
	pgoff_t                 end_index;
999

1000
	trace_xfs_writepage(inode, page, 0, 0);
1001

1002 1003
	ASSERT(page_has_buffers(page));

1004 1005 1006
	/*
	 * Refuse to write the page out if we are called from reclaim context.
	 *
1007 1008 1009
	 * This avoids stack overflows when called from deeply used stacks in
	 * random callers for direct reclaim or memcg reclaim.  We explicitly
	 * allow reclaim from kswapd as the stack usage there is relatively low.
1010
	 *
1011 1012
	 * This should never happen except in the case of a VM regression so
	 * warn about it.
1013
	 */
1014 1015
	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
			PF_MEMALLOC))
1016
		goto redirty;
L
Linus Torvalds 已提交
1017

1018
	/*
1019 1020
	 * Given that we do not allow direct reclaim to call us, we should
	 * never be called while in a filesystem transaction.
1021
	 */
1022
	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1023
		goto redirty;
1024

1025
	/*
1026 1027
	 * Is this page beyond the end of the file?
	 *
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	 * The page index is less than the end_index, adjust the end_offset
	 * to the highest offset that this page should represent.
	 * -----------------------------------------------------
	 * |			file mapping	       | <EOF> |
	 * -----------------------------------------------------
	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
	 * ^--------------------------------^----------|--------
	 * |     desired writeback range    |      see else    |
	 * ---------------------------------^------------------|
	 */
1038
	offset = i_size_read(inode);
1039
	end_index = offset >> PAGE_SHIFT;
1040
	if (page->index < end_index)
1041
		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	else {
		/*
		 * Check whether the page to write out is beyond or straddles
		 * i_size or not.
		 * -------------------------------------------------------
		 * |		file mapping		        | <EOF>  |
		 * -------------------------------------------------------
		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
		 * ^--------------------------------^-----------|---------
		 * |				    |      Straddles     |
		 * ---------------------------------^-----------|--------|
		 */
1054
		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1055 1056

		/*
1057 1058 1059 1060
		 * Skip the page if it is fully outside i_size, e.g. due to a
		 * truncate operation that is in progress. We must redirty the
		 * page so that reclaim stops reclaiming it. Otherwise
		 * xfs_vm_releasepage() is called on it and gets confused.
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
		 *
		 * Note that the end_index is unsigned long, it would overflow
		 * if the given offset is greater than 16TB on 32-bit system
		 * and if we do check the page is fully outside i_size or not
		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
		 * will be evaluated to 0.  Hence this page will be redirtied
		 * and be written out repeatedly which would result in an
		 * infinite loop, the user program that perform this operation
		 * will hang.  Instead, we can verify this situation by checking
		 * if the page to write is totally beyond the i_size or if it's
		 * offset is just equal to the EOF.
1072
		 */
1073 1074
		if (page->index > end_index ||
		    (page->index == end_index && offset_into_page == 0))
1075
			goto redirty;
1076 1077 1078 1079 1080

		/*
		 * The page straddles i_size.  It must be zeroed out on each
		 * and every writepage invocation because it may be mmapped.
		 * "A file is mapped in multiples of the page size.  For a file
1081
		 * that is not a multiple of the page size, the remaining
1082 1083 1084
		 * memory is zeroed when mapped, and writes to that region are
		 * not written out to the file."
		 */
1085
		zero_user_segment(page, offset_into_page, PAGE_SIZE);
1086 1087 1088

		/* Adjust the end_offset to the end of file */
		end_offset = offset;
L
Linus Torvalds 已提交
1089 1090
	}

1091
	return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
1092

1093
redirty:
1094 1095 1096 1097 1098
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);
	return 0;
}

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
STATIC int
xfs_vm_writepage(
	struct page		*page,
	struct writeback_control *wbc)
{
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

	ret = xfs_do_writepage(page, wbc, &wpc);
1110 1111 1112
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1113 1114
}

1115 1116 1117 1118 1119
STATIC int
xfs_vm_writepages(
	struct address_space	*mapping,
	struct writeback_control *wbc)
{
1120 1121 1122 1123 1124
	struct xfs_writepage_ctx wpc = {
		.io_type = XFS_IO_INVALID,
	};
	int			ret;

1125
	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1126 1127 1128 1129
	if (dax_mapping(mapping))
		return dax_writeback_mapping_range(mapping,
				xfs_find_bdev_for_inode(mapping->host), wbc);

1130
	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1131 1132 1133
	if (wpc.ioend)
		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
	return ret;
1134 1135
}

1136 1137
/*
 * Called to move a page into cleanable state - and from there
1138
 * to be released. The page should already be clean. We always
1139 1140
 * have buffer heads in this call.
 *
1141
 * Returns 1 if the page is ok to release, 0 otherwise.
1142 1143
 */
STATIC int
1144
xfs_vm_releasepage(
1145 1146 1147
	struct page		*page,
	gfp_t			gfp_mask)
{
1148
	int			delalloc, unwritten;
1149

1150
	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1151

1152 1153 1154 1155 1156 1157 1158
	/*
	 * mm accommodates an old ext3 case where clean pages might not have had
	 * the dirty bit cleared. Thus, it can send actual dirty pages to
	 * ->releasepage() via shrink_active_list(). Conversely,
	 * block_invalidatepage() can send pages that are still marked dirty
	 * but otherwise have invalidated buffers.
	 *
1159 1160 1161 1162 1163
	 * We want to release the latter to avoid unnecessary buildup of the
	 * LRU, skip the former and warn if we've left any lingering
	 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
	 * or unwritten buffers and warn if the page is not dirty. Otherwise
	 * try to release the buffers.
1164
	 */
1165
	xfs_count_page_state(page, &delalloc, &unwritten);
1166

1167 1168
	if (delalloc) {
		WARN_ON_ONCE(!PageDirty(page));
1169
		return 0;
1170 1171 1172
	}
	if (unwritten) {
		WARN_ON_ONCE(!PageDirty(page));
1173
		return 0;
1174
	}
1175 1176 1177 1178

	return try_to_free_buffers(page);
}

1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
/*
 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
 * is, so that we can avoid repeated get_blocks calls.
 *
 * If the mapping spans EOF, then we have to break the mapping up as the mapping
 * for blocks beyond EOF must be marked new so that sub block regions can be
 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
 * was just allocated or is unwritten, otherwise the callers would overwrite
 * existing data with zeros. Hence we have to split the mapping into a range up
 * to and including EOF, and a second mapping for beyond EOF.
 */
static void
xfs_map_trim_size(
	struct inode		*inode,
	sector_t		iblock,
	struct buffer_head	*bh_result,
	struct xfs_bmbt_irec	*imap,
	xfs_off_t		offset,
	ssize_t			size)
{
	xfs_off_t		mapping_size;

	mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
	mapping_size <<= inode->i_blkbits;

	ASSERT(mapping_size > 0);
	if (mapping_size > size)
		mapping_size = size;
	if (offset < i_size_read(inode) &&
	    offset + mapping_size >= i_size_read(inode)) {
		/* limit mapping to block that spans EOF */
		mapping_size = roundup_64(i_size_read(inode) - offset,
F
Fabian Frederick 已提交
1211
					  i_blocksize(inode));
1212 1213 1214 1215 1216 1217 1218
	}
	if (mapping_size > LONG_MAX)
		mapping_size = LONG_MAX;

	bh_result->b_size = mapping_size;
}

1219
static int
C
Christoph Hellwig 已提交
1220
xfs_get_blocks(
L
Linus Torvalds 已提交
1221 1222 1223
	struct inode		*inode,
	sector_t		iblock,
	struct buffer_head	*bh_result,
C
Christoph Hellwig 已提交
1224
	int			create)
L
Linus Torvalds 已提交
1225
{
C
Christoph Hellwig 已提交
1226 1227 1228 1229 1230
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			error = 0;
	int			lockmode = 0;
C
Christoph Hellwig 已提交
1231
	struct xfs_bmbt_irec	imap;
C
Christoph Hellwig 已提交
1232
	int			nimaps = 1;
1233 1234
	xfs_off_t		offset;
	ssize_t			size;
C
Christoph Hellwig 已提交
1235

C
Christoph Hellwig 已提交
1236
	BUG_ON(create);
1237

C
Christoph Hellwig 已提交
1238
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
1239
		return -EIO;
L
Linus Torvalds 已提交
1240

1241
	offset = (xfs_off_t)iblock << inode->i_blkbits;
F
Fabian Frederick 已提交
1242
	ASSERT(bh_result->b_size >= i_blocksize(inode));
1243
	size = bh_result->b_size;
1244

C
Christoph Hellwig 已提交
1245
	if (offset >= i_size_read(inode))
1246 1247
		return 0;

1248 1249
	/*
	 * Direct I/O is usually done on preallocated files, so try getting
1250
	 * a block mapping without an exclusive lock first.
1251
	 */
1252
	lockmode = xfs_ilock_data_map_shared(ip);
1253

D
Dave Chinner 已提交
1254 1255 1256
	ASSERT(offset <= mp->m_super->s_maxbytes);
	if (offset + size > mp->m_super->s_maxbytes)
		size = mp->m_super->s_maxbytes - offset;
C
Christoph Hellwig 已提交
1257 1258 1259
	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
	offset_fsb = XFS_B_TO_FSBT(mp, offset);

C
Christoph Hellwig 已提交
1260 1261
	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
				&imap, &nimaps, XFS_BMAPI_ENTIRE);
L
Linus Torvalds 已提交
1262
	if (error)
C
Christoph Hellwig 已提交
1263 1264
		goto out_unlock;

C
Christoph Hellwig 已提交
1265
	if (nimaps) {
1266
		trace_xfs_get_blocks_found(ip, offset, size,
1267 1268
			imap.br_state == XFS_EXT_UNWRITTEN ?
				XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
1269
		xfs_iunlock(ip, lockmode);
C
Christoph Hellwig 已提交
1270 1271 1272 1273
	} else {
		trace_xfs_get_blocks_notfound(ip, offset, size);
		goto out_unlock;
	}
L
Linus Torvalds 已提交
1274

1275
	/* trim mapping down to size requested */
1276
	xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1277

1278 1279 1280 1281
	/*
	 * For unwritten extents do not report a disk address in the buffered
	 * read case (treat as if we're reading into a hole).
	 */
1282
	if (xfs_bmap_is_real_extent(&imap))
1283
		xfs_map_buffer(inode, bh_result, &imap, offset);
L
Linus Torvalds 已提交
1284

1285 1286 1287 1288
	/*
	 * If this is a realtime file, data may be on a different device.
	 * to that pointed to from the buffer_head b_bdev currently.
	 */
C
Christoph Hellwig 已提交
1289
	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
L
Linus Torvalds 已提交
1290
	return 0;
C
Christoph Hellwig 已提交
1291 1292 1293

out_unlock:
	xfs_iunlock(ip, lockmode);
D
Dave Chinner 已提交
1294
	return error;
L
Linus Torvalds 已提交
1295 1296
}

1297 1298
STATIC ssize_t
xfs_vm_direct_IO(
D
Dave Chinner 已提交
1299
	struct kiocb		*iocb,
1300
	struct iov_iter		*iter)
D
Dave Chinner 已提交
1301
{
1302
	/*
1303
	 * We just need the method present so that open/fcntl allow direct I/O.
1304
	 */
1305
	return -EINVAL;
1306
}
L
Linus Torvalds 已提交
1307 1308

STATIC sector_t
1309
xfs_vm_bmap(
L
Linus Torvalds 已提交
1310 1311 1312 1313
	struct address_space	*mapping,
	sector_t		block)
{
	struct inode		*inode = (struct inode *)mapping->host;
1314
	struct xfs_inode	*ip = XFS_I(inode);
L
Linus Torvalds 已提交
1315

C
Christoph Hellwig 已提交
1316
	trace_xfs_vm_bmap(XFS_I(inode));
1317 1318 1319 1320 1321

	/*
	 * The swap code (ab-)uses ->bmap to get a block mapping and then
	 * bypasseѕ the file system for actual I/O.  We really can't allow
	 * that on reflinks inodes, so we have to skip out here.  And yes,
1322 1323 1324 1325
	 * 0 is the magic code for a bmap error.
	 *
	 * Since we don't pass back blockdev info, we can't return bmap
	 * information for rt files either.
1326
	 */
1327
	if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1328
		return 0;
1329

D
Dave Chinner 已提交
1330
	filemap_write_and_wait(mapping);
1331
	return generic_block_bmap(mapping, block, xfs_get_blocks);
L
Linus Torvalds 已提交
1332 1333 1334
}

STATIC int
1335
xfs_vm_readpage(
L
Linus Torvalds 已提交
1336 1337 1338
	struct file		*unused,
	struct page		*page)
{
1339
	trace_xfs_vm_readpage(page->mapping->host, 1);
1340
	return mpage_readpage(page, xfs_get_blocks);
L
Linus Torvalds 已提交
1341 1342 1343
}

STATIC int
1344
xfs_vm_readpages(
L
Linus Torvalds 已提交
1345 1346 1347 1348 1349
	struct file		*unused,
	struct address_space	*mapping,
	struct list_head	*pages,
	unsigned		nr_pages)
{
1350
	trace_xfs_vm_readpages(mapping->host, nr_pages);
1351
	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
L
Linus Torvalds 已提交
1352 1353
}

1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
/*
 * This is basically a copy of __set_page_dirty_buffers() with one
 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
 * dirty, we'll never be able to clean them because we don't write buffers
 * beyond EOF, and that means we can't invalidate pages that span EOF
 * that have been marked dirty. Further, the dirty state can leak into
 * the file interior if the file is extended, resulting in all sorts of
 * bad things happening as the state does not match the underlying data.
 *
 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
 * this only exist because of bufferheads and how the generic code manages them.
 */
STATIC int
xfs_vm_set_page_dirty(
	struct page		*page)
{
	struct address_space	*mapping = page->mapping;
	struct inode		*inode = mapping->host;
	loff_t			end_offset;
	loff_t			offset;
	int			newly_dirty;

	if (unlikely(!mapping))
		return !TestSetPageDirty(page);

	end_offset = i_size_read(inode);
	offset = page_offset(page);

	spin_lock(&mapping->private_lock);
	if (page_has_buffers(page)) {
		struct buffer_head *head = page_buffers(page);
		struct buffer_head *bh = head;

		do {
			if (offset < end_offset)
				set_buffer_dirty(bh);
			bh = bh->b_this_page;
F
Fabian Frederick 已提交
1391
			offset += i_blocksize(inode);
1392 1393
		} while (bh != head);
	}
1394
	/*
1395 1396
	 * Lock out page->mem_cgroup migration to keep PageDirty
	 * synchronized with per-memcg dirty page counters.
1397
	 */
J
Johannes Weiner 已提交
1398
	lock_page_memcg(page);
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
	newly_dirty = !TestSetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	if (newly_dirty) {
		/* sigh - __set_page_dirty() is static, so copy it here, too */
		unsigned long flags;

		spin_lock_irqsave(&mapping->tree_lock, flags);
		if (page->mapping) {	/* Race with truncate? */
			WARN_ON_ONCE(!PageUptodate(page));
J
Johannes Weiner 已提交
1409
			account_page_dirtied(page, mapping);
1410 1411 1412 1413 1414
			radix_tree_tag_set(&mapping->page_tree,
					page_index(page), PAGECACHE_TAG_DIRTY);
		}
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
	}
J
Johannes Weiner 已提交
1415
	unlock_page_memcg(page);
1416 1417
	if (newly_dirty)
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1418 1419 1420
	return newly_dirty;
}

1421
const struct address_space_operations xfs_address_space_operations = {
1422 1423 1424
	.readpage		= xfs_vm_readpage,
	.readpages		= xfs_vm_readpages,
	.writepage		= xfs_vm_writepage,
1425
	.writepages		= xfs_vm_writepages,
1426
	.set_page_dirty		= xfs_vm_set_page_dirty,
1427 1428
	.releasepage		= xfs_vm_releasepage,
	.invalidatepage		= xfs_vm_invalidatepage,
1429 1430
	.bmap			= xfs_vm_bmap,
	.direct_IO		= xfs_vm_direct_IO,
1431
	.migratepage		= buffer_migrate_page,
1432
	.is_partially_uptodate  = block_is_partially_uptodate,
1433
	.error_remove_page	= generic_error_remove_page,
L
Linus Torvalds 已提交
1434
};