xfs_file.c 36.3 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3 4
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
5 6
 */
#include "xfs.h"
7
#include "xfs_fs.h"
8
#include "xfs_shared.h"
9
#include "xfs_format.h"
10 11
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
15
#include "xfs_inode_item.h"
16
#include "xfs_bmap.h"
D
Dave Chinner 已提交
17
#include "xfs_bmap_util.h"
18
#include "xfs_dir2.h"
D
Dave Chinner 已提交
19
#include "xfs_dir2_priv.h"
20
#include "xfs_ioctl.h"
21
#include "xfs_trace.h"
22
#include "xfs_log.h"
23
#include "xfs_icache.h"
24
#include "xfs_pnfs.h"
25
#include "xfs_iomap.h"
26
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
27

28
#include <linux/falloc.h>
29
#include <linux/backing-dev.h>
30
#include <linux/mman.h>
31
#include <linux/fadvise.h>
C
Christoph Hellwig 已提交
32
#include <linux/mount.h>
L
Linus Torvalds 已提交
33

34
static const struct vm_operations_struct xfs_file_vm_ops;
L
Linus Torvalds 已提交
35

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
/*
 * Decide if the given file range is aligned to the size of the fundamental
 * allocation unit for the file.
 */
static bool
xfs_is_falloc_aligned(
	struct xfs_inode	*ip,
	loff_t			pos,
	long long int		len)
{
	struct xfs_mount	*mp = ip->i_mount;
	uint64_t		mask;

	if (XFS_IS_REALTIME_INODE(ip)) {
		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
			u64	rextbytes;
			u32	mod;

			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
			div_u64_rem(pos, rextbytes, &mod);
			if (mod)
				return false;
			div_u64_rem(len, rextbytes, &mod);
			return mod == 0;
		}
		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
	} else {
		mask = mp->m_sb.sb_blocksize - 1;
	}

	return !((pos | len) & mask);
}

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
/*
 * Fsync operations on directories are much simpler than on regular files,
 * as there is no file data to flush, and thus also no need for explicit
 * cache flush operations, and there are no non-transaction metadata updates
 * on directories either.
 */
STATIC int
xfs_dir_fsync(
	struct file		*file,
	loff_t			start,
	loff_t			end,
	int			datasync)
{
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);

	trace_xfs_dir_fsync(ip);
85
	return xfs_log_force_inode(ip);
86 87
}

88 89
static xfs_csn_t
xfs_fsync_seq(
C
Christoph Hellwig 已提交
90 91 92 93 94 95 96
	struct xfs_inode	*ip,
	bool			datasync)
{
	if (!xfs_ipincount(ip))
		return 0;
	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
		return 0;
97
	return ip->i_itemp->ili_commit_seq;
C
Christoph Hellwig 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
}

/*
 * All metadata updates are logged, which means that we just have to flush the
 * log up to the latest LSN that touched the inode.
 *
 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
 * the log force before we clear the ili_fsync_fields field. This ensures that
 * we don't get a racing sync operation that does not wait for the metadata to
 * hit the journal before returning.  If we race with clearing ili_fsync_fields,
 * then all that will happen is the log force will do nothing as the lsn will
 * already be on disk.  We can't race with setting ili_fsync_fields because that
 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
 * shared until after the ili_fsync_fields is cleared.
 */
static  int
xfs_fsync_flush_log(
	struct xfs_inode	*ip,
	bool			datasync,
	int			*log_flushed)
{
	int			error = 0;
120
	xfs_csn_t		seq;
C
Christoph Hellwig 已提交
121 122

	xfs_ilock(ip, XFS_ILOCK_SHARED);
123 124 125
	seq = xfs_fsync_seq(ip, datasync);
	if (seq) {
		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
C
Christoph Hellwig 已提交
126 127 128 129 130 131 132 133 134 135
					  log_flushed);

		spin_lock(&ip->i_itemp->ili_lock);
		ip->i_itemp->ili_fsync_fields = 0;
		spin_unlock(&ip->i_itemp->ili_lock);
	}
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	return error;
}

136 137 138
STATIC int
xfs_file_fsync(
	struct file		*file,
139 140
	loff_t			start,
	loff_t			end,
141 142
	int			datasync)
{
C
Christoph Hellwig 已提交
143
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
144
	struct xfs_mount	*mp = ip->i_mount;
145 146 147
	int			error = 0;
	int			log_flushed = 0;

C
Christoph Hellwig 已提交
148
	trace_xfs_file_fsync(ip);
149

150
	error = file_write_and_wait_range(file, start, end);
151 152 153
	if (error)
		return error;

154
	if (xfs_is_shutdown(mp))
E
Eric Sandeen 已提交
155
		return -EIO;
156 157 158

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

159 160 161 162 163 164 165
	/*
	 * If we have an RT and/or log subvolume we need to make sure to flush
	 * the write cache the device used for file data first.  This is to
	 * ensure newly written file data make it to disk before logging the new
	 * inode size in case of an extending write.
	 */
	if (XFS_IS_REALTIME_INODE(ip))
166
		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
167
	else if (mp->m_logdev_targp != mp->m_ddev_targp)
168
		blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
169

170
	/*
171 172 173 174
	 * Any inode that has dirty modifications in the log is pinned.  The
	 * racy check here for a pinned inode while not catch modifications
	 * that happen concurrently to the fsync call, but fsync semantics
	 * only require to sync previously completed I/O.
175
	 */
176 177
	if (xfs_ipincount(ip))
		error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
178

179 180 181 182 183 184 185
	/*
	 * If we only have a single device, and the log force about was
	 * a no-op we might have to flush the data device cache here.
	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
186 187
	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
	    mp->m_logdev_targp == mp->m_ddev_targp)
188
		blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
189

D
Dave Chinner 已提交
190
	return error;
191 192
}

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static int
xfs_ilock_iocb(
	struct kiocb		*iocb,
	unsigned int		lock_mode)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));

	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, lock_mode))
			return -EAGAIN;
	} else {
		xfs_ilock(ip, lock_mode);
	}

	return 0;
}

210
STATIC ssize_t
211
xfs_file_dio_read(
212
	struct kiocb		*iocb,
A
Al Viro 已提交
213
	struct iov_iter		*to)
214
{
C
Christoph Hellwig 已提交
215 216
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;
217

218
	trace_xfs_file_direct_read(iocb, to);
219

220
	if (!iov_iter_count(to))
221
		return 0; /* skip atime */
222

223 224
	file_accessed(iocb->ki_filp);

225 226 227
	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
	if (ret)
		return ret;
228
	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
229
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
230

231 232 233
	return ret;
}

234
static noinline ssize_t
235 236 237 238
xfs_file_dax_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
239
	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
240 241
	ssize_t			ret = 0;

242
	trace_xfs_file_dax_read(iocb, to);
243

244
	if (!iov_iter_count(to))
245 246
		return 0; /* skip atime */

247 248 249
	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
	if (ret)
		return ret;
250
	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
251
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
252

253
	file_accessed(iocb->ki_filp);
254 255 256 257
	return ret;
}

STATIC ssize_t
258
xfs_file_buffered_read(
259 260 261 262 263 264
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;

265
	trace_xfs_file_buffered_read(iocb, to);
266

267 268 269
	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
	if (ret)
		return ret;
A
Al Viro 已提交
270
	ret = generic_file_read_iter(iocb, to);
271
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
272 273 274 275 276 277 278 279 280

	return ret;
}

STATIC ssize_t
xfs_file_read_iter(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
281 282
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
283 284 285 286
	ssize_t			ret = 0;

	XFS_STATS_INC(mp, xs_read_calls);

287
	if (xfs_is_shutdown(mp))
288 289
		return -EIO;

290 291 292
	if (IS_DAX(inode))
		ret = xfs_file_dax_read(iocb, to);
	else if (iocb->ki_flags & IOCB_DIRECT)
293
		ret = xfs_file_dio_read(iocb, to);
C
Christoph Hellwig 已提交
294
	else
295
		ret = xfs_file_buffered_read(iocb, to);
296 297

	if (ret > 0)
298
		XFS_STATS_ADD(mp, xs_read_bytes, ret);
299 300 301
	return ret;
}

302 303 304
/*
 * Common pre-write limit and setup checks.
 *
305 306 307
 * Called with the iolocked held either shared and exclusive according to
 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 * if called for a direct write beyond i_size.
308 309
 */
STATIC ssize_t
310
xfs_file_write_checks(
311 312
	struct kiocb		*iocb,
	struct iov_iter		*from,
313
	unsigned int		*iolock)
314
{
315
	struct file		*file = iocb->ki_filp;
316 317
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
318
	ssize_t			error = 0;
319
	size_t			count = iov_iter_count(from);
320
	bool			drained_dio = false;
C
Christoph Hellwig 已提交
321
	loff_t			isize;
322

323
restart:
324 325
	error = generic_write_checks(iocb, from);
	if (error <= 0)
326 327
		return error;

328 329 330 331 332 333 334 335
	if (iocb->ki_flags & IOCB_NOWAIT) {
		error = break_layout(inode, false);
		if (error == -EWOULDBLOCK)
			error = -EAGAIN;
	} else {
		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
	}

336 337 338
	if (error)
		return error;

339 340 341 342
	/*
	 * For changing security info in file_remove_privs() we need i_rwsem
	 * exclusively.
	 */
343
	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
344
		xfs_iunlock(ip, *iolock);
345
		*iolock = XFS_IOLOCK_EXCL;
346 347 348 349 350
		error = xfs_ilock_iocb(iocb, *iolock);
		if (error) {
			*iolock = 0;
			return error;
		}
351 352
		goto restart;
	}
353

354 355 356
	/*
	 * If the offset is beyond the size of the file, we need to zero any
	 * blocks that fall between the existing EOF and the start of this
357 358 359 360 361 362 363 364 365 366 367
	 * write.  If zeroing is needed and we are currently holding the iolock
	 * shared, we need to update it to exclusive which implies having to
	 * redo all checks before.
	 *
	 * We need to serialise against EOF updates that occur in IO completions
	 * here. We want to make sure that nobody is changing the size while we
	 * do this check until we have placed an IO barrier (i.e.  hold the
	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
	 * spinlock effectively forms a memory barrier once we have the
	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
	 * hence be able to correctly determine if we need to run zeroing.
368
	 *
369 370 371 372
	 * We can do an unlocked check here safely as IO completion can only
	 * extend EOF. Truncate is locked out at this point, so the EOF can
	 * not move backwards, only forwards. Hence we only need to take the
	 * slow path and spin locks when we are at or beyond the current EOF.
373
	 */
374 375 376
	if (iocb->ki_pos <= i_size_read(inode))
		goto out;

377
	spin_lock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
378 379
	isize = i_size_read(inode);
	if (iocb->ki_pos > isize) {
380
		spin_unlock(&ip->i_flags_lock);
381 382 383 384

		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;

385 386
		if (!drained_dio) {
			if (*iolock == XFS_IOLOCK_SHARED) {
387
				xfs_iunlock(ip, *iolock);
388
				*iolock = XFS_IOLOCK_EXCL;
389
				xfs_ilock(ip, *iolock);
390 391
				iov_iter_reexpand(from, count);
			}
392 393 394 395 396 397 398 399 400
			/*
			 * We now have an IO submission barrier in place, but
			 * AIO can do EOF updates during IO completion and hence
			 * we now need to wait for all of them to drain. Non-AIO
			 * DIO will have drained before we are given the
			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
			 * no-op.
			 */
			inode_dio_wait(inode);
401
			drained_dio = true;
402 403
			goto restart;
		}
404

C
Christoph Hellwig 已提交
405
		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
406
		error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
407 408
		if (error)
			return error;
409 410
	} else
		spin_unlock(&ip->i_flags_lock);
411

412
out:
A
Amir Goldstein 已提交
413
	return file_modified(file);
414 415
}

C
Christoph Hellwig 已提交
416 417 418 419
static int
xfs_dio_write_end_io(
	struct kiocb		*iocb,
	ssize_t			size,
420
	int			error,
C
Christoph Hellwig 已提交
421 422 423 424 425
	unsigned		flags)
{
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_inode	*ip = XFS_I(inode);
	loff_t			offset = iocb->ki_pos;
C
Christoph Hellwig 已提交
426
	unsigned int		nofs_flag;
C
Christoph Hellwig 已提交
427 428 429

	trace_xfs_end_io_direct_write(ip, offset, size);

430
	if (xfs_is_shutdown(ip->i_mount))
C
Christoph Hellwig 已提交
431 432
		return -EIO;

433 434 435 436
	if (error)
		return error;
	if (!size)
		return 0;
C
Christoph Hellwig 已提交
437

438 439 440 441 442 443
	/*
	 * Capture amount written on completion as we can't reliably account
	 * for it on submission.
	 */
	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);

C
Christoph Hellwig 已提交
444 445 446 447 448 449 450
	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

451 452 453
	if (flags & IOMAP_DIO_COW) {
		error = xfs_reflink_end_cow(ip, offset, size);
		if (error)
C
Christoph Hellwig 已提交
454
			goto out;
455 456 457 458 459 460 461 462
	}

	/*
	 * Unwritten conversion updates the in-core isize after extent
	 * conversion but before updating the on-disk size. Updating isize any
	 * earlier allows a racing dio read to find unwritten extents before
	 * they are converted.
	 */
C
Christoph Hellwig 已提交
463 464 465 466
	if (flags & IOMAP_DIO_UNWRITTEN) {
		error = xfs_iomap_write_unwritten(ip, offset, size, true);
		goto out;
	}
467

C
Christoph Hellwig 已提交
468 469 470 471 472 473 474 475 476 477
	/*
	 * We need to update the in-core inode size here so that we don't end up
	 * with the on-disk inode size being outside the in-core inode size. We
	 * have no other method of updating EOF for AIO, so always do it here
	 * if necessary.
	 *
	 * We need to lock the test/set EOF update as we can be racing with
	 * other IO completions here to update the EOF. Failing to serialise
	 * here can result in EOF moving backwards and Bad Things Happen when
	 * that occurs.
478 479 480 481 482 483 484
	 *
	 * As IO completion only ever extends EOF, we can do an unlocked check
	 * here to avoid taking the spinlock. If we land within the current EOF,
	 * then we do not need to do an extending update at all, and we don't
	 * need to take the lock to check this. If we race with an update moving
	 * EOF, then we'll either still be beyond EOF and need to take the lock,
	 * or we'll be within EOF and we don't need to take it at all.
C
Christoph Hellwig 已提交
485
	 */
486 487 488
	if (offset + size <= i_size_read(inode))
		goto out;

C
Christoph Hellwig 已提交
489 490 491
	spin_lock(&ip->i_flags_lock);
	if (offset + size > i_size_read(inode)) {
		i_size_write(inode, offset + size);
492
		spin_unlock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
493
		error = xfs_setfilesize(ip, offset, size);
494 495 496
	} else {
		spin_unlock(&ip->i_flags_lock);
	}
C
Christoph Hellwig 已提交
497

C
Christoph Hellwig 已提交
498 499
out:
	memalloc_nofs_restore(nofs_flag);
C
Christoph Hellwig 已提交
500 501 502
	return error;
}

503 504 505 506
static const struct iomap_dio_ops xfs_dio_write_ops = {
	.end_io		= xfs_dio_write_end_io,
};

507
/*
508
 * Handle block aligned direct I/O writes
509
 */
510 511 512
static noinline ssize_t
xfs_file_dio_write_aligned(
	struct xfs_inode	*ip,
513
	struct kiocb		*iocb,
514
	struct iov_iter		*from)
515
{
516
	unsigned int		iolock = XFS_IOLOCK_SHARED;
517
	ssize_t			ret;
518

519 520 521 522 523 524
	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
	ret = xfs_file_write_checks(iocb, from, &iolock);
	if (ret)
		goto out_unlock;
525

526
	/*
527 528 529
	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
	 * the iolock back to shared if we had to take the exclusive lock in
	 * xfs_file_write_checks() for other reasons.
530
	 */
531 532
	if (iolock == XFS_IOLOCK_EXCL) {
		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
533
		iolock = XFS_IOLOCK_SHARED;
534
	}
535 536
	trace_xfs_file_direct_write(iocb, from);
	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
537
			   &xfs_dio_write_ops, 0, NULL, 0);
538 539 540 541 542
out_unlock:
	if (iolock)
		xfs_iunlock(ip, iolock);
	return ret;
}
543

544 545 546 547 548 549 550 551 552
/*
 * Handle block unaligned direct I/O writes
 *
 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
 * them to be done in parallel with reads and other direct I/O writes.  However,
 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
 * to do sub-block zeroing and that requires serialisation against other direct
 * I/O to the same block.  In this case we need to serialise the submission of
 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
553 554
 * In the case where sub-block zeroing is not required, we can do concurrent
 * sub-block dios to the same block successfully.
555
 *
556 557 558 559
 * Optimistically submit the I/O using the shared lock first, but use the
 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
 * if block allocation or partial block zeroing would be required.  In that case
 * we try again with the exclusive lock.
560 561 562 563 564 565 566
 */
static noinline ssize_t
xfs_file_dio_write_unaligned(
	struct xfs_inode	*ip,
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
567 568
	size_t			isize = i_size_read(VFS_I(ip));
	size_t			count = iov_iter_count(from);
569
	unsigned int		iolock = XFS_IOLOCK_SHARED;
570
	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
571 572
	ssize_t			ret;

573 574 575 576 577 578 579 580
	/*
	 * Extending writes need exclusivity because of the sub-block zeroing
	 * that the DIO code always does for partial tail blocks beyond EOF, so
	 * don't even bother trying the fast path in this case.
	 */
	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
581
retry_exclusive:
582 583 584 585 586 587 588
		iolock = XFS_IOLOCK_EXCL;
		flags = IOMAP_DIO_FORCE_WAIT;
	}

	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
589 590 591 592 593 594 595 596 597

	/*
	 * We can't properly handle unaligned direct I/O to reflink files yet,
	 * as we can't unshare a partial block.
	 */
	if (xfs_is_cow_inode(ip)) {
		trace_xfs_reflink_bounce_dio_write(iocb, from);
		ret = -ENOTBLK;
		goto out_unlock;
G
Goldwyn Rodrigues 已提交
598
	}
599

600
	ret = xfs_file_write_checks(iocb, from, &iolock);
601
	if (ret)
602
		goto out_unlock;
603

604
	/*
605 606 607 608
	 * If we are doing exclusive unaligned I/O, this must be the only I/O
	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
	 * conversions from the AIO end_io handler.  Wait for all other I/O to
	 * drain first.
609
	 */
610 611
	if (flags & IOMAP_DIO_FORCE_WAIT)
		inode_dio_wait(VFS_I(ip));
612

613
	trace_xfs_file_direct_write(iocb, from);
614
	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
615
			   &xfs_dio_write_ops, flags, NULL, 0);
616 617 618 619 620 621 622 623 624 625 626 627

	/*
	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
	 * layer rejected it for mapping or locking reasons. If we are doing
	 * nonblocking user I/O, propagate the error.
	 */
	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
		xfs_iunlock(ip, iolock);
		goto retry_exclusive;
	}

628
out_unlock:
629 630
	if (iolock)
		xfs_iunlock(ip, iolock);
631 632 633
	return ret;
}

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
static ssize_t
xfs_file_dio_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
	size_t			count = iov_iter_count(from);

	/* direct I/O must be aligned to device logical sector size */
	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
		return -EINVAL;
	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
		return xfs_file_dio_write_unaligned(ip, iocb, from);
	return xfs_file_dio_write_aligned(ip, iocb, from);
}

651
static noinline ssize_t
652 653 654 655
xfs_file_dax_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
656
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
657
	struct xfs_inode	*ip = XFS_I(inode);
658
	unsigned int		iolock = XFS_IOLOCK_EXCL;
659 660
	ssize_t			ret, error = 0;
	loff_t			pos;
661

662 663 664
	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
665
	ret = xfs_file_write_checks(iocb, from, &iolock);
666 667 668
	if (ret)
		goto out;

669
	pos = iocb->ki_pos;
670

671
	trace_xfs_file_dax_write(iocb, from);
672
	ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
673 674 675
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
		i_size_write(inode, iocb->ki_pos);
		error = xfs_setfilesize(ip, pos, ret);
676 677
	}
out:
678 679
	if (iolock)
		xfs_iunlock(ip, iolock);
680 681 682 683 684 685 686 687 688 689
	if (error)
		return error;

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);

		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
	return ret;
690 691
}

692
STATIC ssize_t
693
xfs_file_buffered_write(
694
	struct kiocb		*iocb,
695
	struct iov_iter		*from)
696
{
697
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
698
	struct xfs_inode	*ip = XFS_I(inode);
699
	ssize_t			ret;
700
	bool			cleared_space = false;
701
	unsigned int		iolock;
702

703 704 705
	if (iocb->ki_flags & IOCB_NOWAIT)
		return -EOPNOTSUPP;

706 707
write_retry:
	iolock = XFS_IOLOCK_EXCL;
708
	xfs_ilock(ip, iolock);
709

710
	ret = xfs_file_write_checks(iocb, from, &iolock);
711
	if (ret)
712
		goto out;
713 714

	/* We can write back this queue in page reclaim */
715
	current->backing_dev_info = inode_to_bdi(inode);
716

717
	trace_xfs_file_buffered_write(iocb, from);
718 719
	ret = iomap_file_buffered_write(iocb, from,
			&xfs_buffered_write_iomap_ops);
720
	if (likely(ret >= 0))
721
		iocb->ki_pos += ret;
722

723
	/*
724 725 726 727 728 729
	 * If we hit a space limit, try to free up some lingering preallocated
	 * space before returning an error. In the case of ENOSPC, first try to
	 * write back all dirty inodes to free up some of the excess reserved
	 * metadata space. This reduces the chances that the eofblocks scan
	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
	 * also behaves as a filter to prevent too many eofblocks scans from
730 731
	 * running at the same time.  Use a synchronous scan to increase the
	 * effectiveness of the scan.
732
	 */
733
	if (ret == -EDQUOT && !cleared_space) {
734
		xfs_iunlock(ip, iolock);
735
		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
736 737
		cleared_space = true;
		goto write_retry;
738
	} else if (ret == -ENOSPC && !cleared_space) {
739
		struct xfs_icwalk	icw = {0};
740

741
		cleared_space = true;
D
Dave Chinner 已提交
742
		xfs_flush_inodes(ip->i_mount);
743 744

		xfs_iunlock(ip, iolock);
745 746
		icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
		xfs_blockgc_free_space(ip->i_mount, &icw);
D
Dave Chinner 已提交
747
		goto write_retry;
748
	}
749

750
	current->backing_dev_info = NULL;
751
out:
752 753
	if (iolock)
		xfs_iunlock(ip, iolock);
754 755 756 757 758 759

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
760 761 762 763
	return ret;
}

STATIC ssize_t
A
Al Viro 已提交
764
xfs_file_write_iter(
765
	struct kiocb		*iocb,
A
Al Viro 已提交
766
	struct iov_iter		*from)
767
{
768
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
769 770
	struct xfs_inode	*ip = XFS_I(inode);
	ssize_t			ret;
A
Al Viro 已提交
771
	size_t			ocount = iov_iter_count(from);
772

773
	XFS_STATS_INC(ip->i_mount, xs_write_calls);
774 775 776 777

	if (ocount == 0)
		return 0;

778
	if (xfs_is_shutdown(ip->i_mount))
A
Al Viro 已提交
779
		return -EIO;
780

781
	if (IS_DAX(inode))
782 783 784
		return xfs_file_dax_write(iocb, from);

	if (iocb->ki_flags & IOCB_DIRECT) {
785 786 787 788 789 790
		/*
		 * Allow a directio write to fall back to a buffered
		 * write *only* in the case that we're doing a reflink
		 * CoW.  In all other directio scenarios we do not
		 * allow an operation to fall back to buffered mode.
		 */
791
		ret = xfs_file_dio_write(iocb, from);
792
		if (ret != -ENOTBLK)
793
			return ret;
794
	}
795

796
	return xfs_file_buffered_write(iocb, from);
797 798
}

799 800
static void
xfs_wait_dax_page(
801
	struct inode		*inode)
802 803 804 805 806 807 808 809 810 811 812
{
	struct xfs_inode        *ip = XFS_I(inode);

	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
	schedule();
	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
}

static int
xfs_break_dax_layouts(
	struct inode		*inode,
813
	bool			*retry)
814 815 816 817 818 819 820 821 822
{
	struct page		*page;

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));

	page = dax_layout_busy_page(inode->i_mapping);
	if (!page)
		return 0;

823
	*retry = true;
824 825
	return ___wait_var_event(&page->_refcount,
			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
826
			0, 0, xfs_wait_dax_page(inode));
827 828
}

829 830 831 832 833 834 835
int
xfs_break_layouts(
	struct inode		*inode,
	uint			*iolock,
	enum layout_break_reason reason)
{
	bool			retry;
836
	int			error;
837 838 839

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));

840 841 842 843
	do {
		retry = false;
		switch (reason) {
		case BREAK_UNMAP:
844
			error = xfs_break_dax_layouts(inode, &retry);
845 846
			if (error || retry)
				break;
847
			fallthrough;
848 849 850 851 852 853 854 855 856 857
		case BREAK_WRITE:
			error = xfs_break_leased_layouts(inode, iolock, &retry);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EINVAL;
		}
	} while (error == 0 && retry);

	return error;
858 859
}

860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
/* Does this file, inode, or mount want synchronous writes? */
static inline bool xfs_file_sync_writes(struct file *filp)
{
	struct xfs_inode	*ip = XFS_I(file_inode(filp));

	if (xfs_has_wsync(ip->i_mount))
		return true;
	if (filp->f_flags & (__O_SYNC | O_DSYNC))
		return true;
	if (IS_SYNC(file_inode(filp)))
		return true;

	return false;
}

875 876 877
#define	XFS_FALLOC_FL_SUPPORTED						\
		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
878
		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
879

880 881
STATIC long
xfs_file_fallocate(
882 883 884 885
	struct file		*file,
	int			mode,
	loff_t			offset,
	loff_t			len)
886
{
887 888 889
	struct inode		*inode = file_inode(file);
	struct xfs_inode	*ip = XFS_I(inode);
	long			error;
890
	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
891
	loff_t			new_size = 0;
892
	bool			do_file_insert = false;
893

894 895
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;
896
	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
897 898
		return -EOPNOTSUPP;

899
	xfs_ilock(ip, iolock);
900
	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
901 902 903
	if (error)
		goto out_unlock;

904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
	/*
	 * Must wait for all AIO to complete before we continue as AIO can
	 * change the file size on completion without holding any locks we
	 * currently hold. We must do this first because AIO can update both
	 * the on disk and in memory inode sizes, and the operations that follow
	 * require the in-memory size to be fully up-to-date.
	 */
	inode_dio_wait(inode);

	/*
	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
	 * the cached range over the first operation we are about to run.
	 *
	 * We care about zero and collapse here because they both run a hole
	 * punch over the range first. Because that can zero data, and the range
	 * of invalidation for the shift operations is much larger, we still do
	 * the required flush for collapse in xfs_prepare_shift().
	 *
	 * Insert has the same range requirements as collapse, and we extend the
	 * file first which can zero data. Hence insert has the same
	 * flush/invalidate requirements as collapse and so they are both
	 * handled at the right time by xfs_prepare_shift().
	 */
	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
		    FALLOC_FL_COLLAPSE_RANGE)) {
		error = xfs_flush_unmap_range(ip, offset, len);
		if (error)
			goto out_unlock;
	}

934 935 936 937
	error = file_modified(file);
	if (error)
		goto out_unlock;

938 939 940 941
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		error = xfs_free_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
942
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
943
		if (!xfs_is_falloc_aligned(ip, offset, len)) {
D
Dave Chinner 已提交
944
			error = -EINVAL;
945 946 947
			goto out_unlock;
		}

948 949 950 951 952
		/*
		 * There is no need to overlap collapse range with EOF,
		 * in which case it is effectively a truncate operation
		 */
		if (offset + len >= i_size_read(inode)) {
D
Dave Chinner 已提交
953
			error = -EINVAL;
954 955 956
			goto out_unlock;
		}

957 958 959 960 961
		new_size = i_size_read(inode) - len;

		error = xfs_collapse_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
962
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
963
		loff_t		isize = i_size_read(inode);
964

965
		if (!xfs_is_falloc_aligned(ip, offset, len)) {
966 967 968 969
			error = -EINVAL;
			goto out_unlock;
		}

970 971 972 973 974
		/*
		 * New inode size must not exceed ->s_maxbytes, accounting for
		 * possible signed overflow.
		 */
		if (inode->i_sb->s_maxbytes - isize < len) {
975 976 977
			error = -EFBIG;
			goto out_unlock;
		}
978
		new_size = isize + len;
979 980

		/* Offset should be less than i_size */
981
		if (offset >= isize) {
982 983 984
			error = -EINVAL;
			goto out_unlock;
		}
985
		do_file_insert = true;
986 987 988 989
	} else {
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
		    offset + len > i_size_read(inode)) {
			new_size = offset + len;
D
Dave Chinner 已提交
990
			error = inode_newsize_ok(inode, new_size);
991 992 993
			if (error)
				goto out_unlock;
		}
994

995
		if (mode & FALLOC_FL_ZERO_RANGE) {
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
			/*
			 * Punch a hole and prealloc the range.  We use a hole
			 * punch rather than unwritten extent conversion for two
			 * reasons:
			 *
			 *   1.) Hole punch handles partial block zeroing for us.
			 *   2.) If prealloc returns ENOSPC, the file range is
			 *       still zero-valued by virtue of the hole punch.
			 */
			unsigned int blksize = i_blocksize(inode);

			trace_xfs_zero_file_space(ip);

			error = xfs_free_file_space(ip, offset, len);
			if (error)
				goto out_unlock;

			len = round_up(offset + len, blksize) -
			      round_down(offset, blksize);
			offset = round_down(offset, blksize);
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
			error = xfs_reflink_unshare(ip, offset, len);
			if (error)
				goto out_unlock;
		} else {
			/*
			 * If always_cow mode we can't use preallocations and
			 * thus should not create them.
			 */
			if (xfs_is_always_cow_inode(ip)) {
				error = -EOPNOTSUPP;
				goto out_unlock;
			}
1029
		}
1030

1031
		if (!xfs_is_always_cow_inode(ip)) {
1032
			error = xfs_alloc_file_space(ip, offset, len);
1033 1034
			if (error)
				goto out_unlock;
1035
		}
1036
	}
1037 1038 1039 1040 1041 1042 1043

	/* Change file size if needed */
	if (new_size) {
		struct iattr iattr;

		iattr.ia_valid = ATTR_SIZE;
		iattr.ia_size = new_size;
C
Christoph Hellwig 已提交
1044 1045
		error = xfs_vn_setattr_size(file_mnt_user_ns(file),
					    file_dentry(file), &iattr);
1046 1047
		if (error)
			goto out_unlock;
1048 1049
	}

1050 1051 1052 1053 1054 1055
	/*
	 * Perform hole insertion now that the file size has been
	 * updated so that if we crash during the operation we don't
	 * leave shifted extents past EOF and hence losing access to
	 * the data that is contained within them.
	 */
D
Dave Chinner 已提交
1056
	if (do_file_insert) {
1057
		error = xfs_insert_file_space(ip, offset, len);
D
Dave Chinner 已提交
1058 1059 1060 1061
		if (error)
			goto out_unlock;
	}

1062
	if (xfs_file_sync_writes(file))
D
Dave Chinner 已提交
1063
		error = xfs_log_force_inode(ip);
1064

1065
out_unlock:
1066
	xfs_iunlock(ip, iolock);
D
Dave Chinner 已提交
1067
	return error;
1068 1069
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
STATIC int
xfs_file_fadvise(
	struct file	*file,
	loff_t		start,
	loff_t		end,
	int		advice)
{
	struct xfs_inode *ip = XFS_I(file_inode(file));
	int ret;
	int lockflags = 0;

	/*
	 * Operations creating pages in page cache need protection from hole
	 * punching and similar ops
	 */
	if (advice == POSIX_FADV_WILLNEED) {
		lockflags = XFS_IOLOCK_SHARED;
		xfs_ilock(ip, lockflags);
	}
	ret = generic_fadvise(file, start, end, advice);
	if (lockflags)
		xfs_iunlock(ip, lockflags);
	return ret;
}
1094

1095
STATIC loff_t
1096
xfs_file_remap_range(
1097 1098 1099 1100 1101 1102
	struct file		*file_in,
	loff_t			pos_in,
	struct file		*file_out,
	loff_t			pos_out,
	loff_t			len,
	unsigned int		remap_flags)
1103
{
1104 1105 1106 1107 1108 1109 1110 1111 1112
	struct inode		*inode_in = file_inode(file_in);
	struct xfs_inode	*src = XFS_I(inode_in);
	struct inode		*inode_out = file_inode(file_out);
	struct xfs_inode	*dest = XFS_I(inode_out);
	struct xfs_mount	*mp = src->i_mount;
	loff_t			remapped = 0;
	xfs_extlen_t		cowextsize;
	int			ret;

1113 1114
	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
		return -EINVAL;
1115

1116
	if (!xfs_has_reflink(mp))
1117 1118
		return -EOPNOTSUPP;

1119
	if (xfs_is_shutdown(mp))
1120 1121 1122 1123 1124
		return -EIO;

	/* Prepare and then clone file data. */
	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
			&len, remap_flags);
1125
	if (ret || len == 0)
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
		return ret;

	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);

	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
			&remapped);
	if (ret)
		goto out_unlock;

	/*
	 * Carry the cowextsize hint from src to dest if we're sharing the
	 * entire source file to the entire destination file, the source file
	 * has a cowextsize hint, and the destination file does not.
	 */
	cowextsize = 0;
	if (pos_in == 0 && len == i_size_read(inode_in) &&
1142
	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1143
	    pos_out == 0 && len >= i_size_read(inode_out) &&
1144
	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1145
		cowextsize = src->i_cowextsize;
1146 1147 1148

	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
			remap_flags);
1149 1150
	if (ret)
		goto out_unlock;
1151

1152
	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1153
		xfs_log_force_inode(dest);
1154
out_unlock:
1155
	xfs_iunlock2_io_mmap(src, dest);
1156 1157 1158
	if (ret)
		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
	return remapped > 0 ? remapped : ret;
1159
}
1160

L
Linus Torvalds 已提交
1161
STATIC int
1162
xfs_file_open(
L
Linus Torvalds 已提交
1163
	struct inode	*inode,
1164
	struct file	*file)
L
Linus Torvalds 已提交
1165
{
1166
	if (xfs_is_shutdown(XFS_M(inode->i_sb)))
1167
		return -EIO;
1168
	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
1169
	return generic_file_open(inode, file);
1170 1171 1172 1173 1174 1175 1176 1177
}

STATIC int
xfs_dir_open(
	struct inode	*inode,
	struct file	*file)
{
	struct xfs_inode *ip = XFS_I(inode);
1178
	unsigned int	mode;
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
	int		error;

	error = xfs_file_open(inode, file);
	if (error)
		return error;

	/*
	 * If there are any blocks, read-ahead block 0 as we're almost
	 * certain to have the next operation be a read there.
	 */
1189
	mode = xfs_ilock_data_map_shared(ip);
1190
	if (ip->i_df.if_nextents > 0)
1191
		error = xfs_dir3_data_readahead(ip, 0, 0);
1192
	xfs_iunlock(ip, mode);
1193
	return error;
L
Linus Torvalds 已提交
1194 1195 1196
}

STATIC int
1197
xfs_file_release(
L
Linus Torvalds 已提交
1198 1199 1200
	struct inode	*inode,
	struct file	*filp)
{
D
Dave Chinner 已提交
1201
	return xfs_release(XFS_I(inode));
L
Linus Torvalds 已提交
1202 1203 1204
}

STATIC int
1205
xfs_file_readdir(
A
Al Viro 已提交
1206 1207
	struct file	*file,
	struct dir_context *ctx)
L
Linus Torvalds 已提交
1208
{
A
Al Viro 已提交
1209
	struct inode	*inode = file_inode(file);
1210
	xfs_inode_t	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
	size_t		bufsize;

	/*
	 * The Linux API doesn't pass down the total size of the buffer
	 * we read into down to the filesystem.  With the filldir concept
	 * it's not needed for correct information, but the XFS dir2 leaf
	 * code wants an estimate of the buffer size to calculate it's
	 * readahead window and size the buffers used for mapping to
	 * physical blocks.
	 *
	 * Try to give it an estimate that's good enough, maybe at some
	 * point we can change the ->readdir prototype to include the
E
Eric Sandeen 已提交
1223
	 * buffer size.  For now we use the current glibc buffer size.
C
Christoph Hellwig 已提交
1224
	 */
1225
	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
C
Christoph Hellwig 已提交
1226

1227
	return xfs_readdir(NULL, ip, ctx, bufsize);
1228 1229 1230 1231 1232 1233
}

STATIC loff_t
xfs_file_llseek(
	struct file	*file,
	loff_t		offset,
1234
	int		whence)
1235
{
1236 1237
	struct inode		*inode = file->f_mapping->host;

1238
	if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1239 1240
		return -EIO;

1241
	switch (whence) {
1242
	default:
1243
		return generic_file_llseek(file, offset, whence);
1244
	case SEEK_HOLE:
1245
		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1246
		break;
1247
	case SEEK_DATA:
1248
		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1249
		break;
1250
	}
1251 1252 1253 1254

	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1255 1256
}

1257 1258 1259 1260
/*
 * Locking for serialisation of IO during page faults. This results in a lock
 * ordering of:
 *
1261
 * mmap_lock (MM)
1262
 *   sb_start_pagefault(vfs, freeze)
J
Jan Kara 已提交
1263
 *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1264 1265
 *       page_lock (MM)
 *         i_lock (XFS - extent map serialisation)
1266
 */
1267
static vm_fault_t
1268 1269 1270 1271
__xfs_filemap_fault(
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size,
	bool			write_fault)
1272
{
1273
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1274
	struct xfs_inode	*ip = XFS_I(inode);
1275
	vm_fault_t		ret;
1276

1277
	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1278

1279 1280 1281 1282
	if (write_fault) {
		sb_start_pagefault(inode->i_sb);
		file_update_time(vmf->vma->vm_file);
	}
1283

1284
	if (IS_DAX(inode)) {
1285 1286
		pfn_t pfn;

J
Jan Kara 已提交
1287
		xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1288 1289
		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
				(write_fault && !vmf->cow_page) ?
1290 1291
				 &xfs_direct_write_iomap_ops :
				 &xfs_read_iomap_ops);
1292 1293
		if (ret & VM_FAULT_NEEDDSYNC)
			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
J
Jan Kara 已提交
1294
		xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1295
	} else {
J
Jan Kara 已提交
1296 1297
		if (write_fault) {
			xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1298 1299
			ret = iomap_page_mkwrite(vmf,
					&xfs_buffered_write_iomap_ops);
J
Jan Kara 已提交
1300 1301
			xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
		} else {
1302
			ret = filemap_fault(vmf);
J
Jan Kara 已提交
1303
		}
1304 1305
	}

1306 1307
	if (write_fault)
		sb_end_pagefault(inode->i_sb);
1308
	return ret;
1309 1310
}

1311 1312 1313 1314 1315 1316 1317 1318
static inline bool
xfs_is_write_fault(
	struct vm_fault		*vmf)
{
	return (vmf->flags & FAULT_FLAG_WRITE) &&
	       (vmf->vma->vm_flags & VM_SHARED);
}

1319
static vm_fault_t
1320
xfs_filemap_fault(
1321 1322
	struct vm_fault		*vmf)
{
1323
	/* DAX can shortcut the normal fault path on write faults! */
1324 1325
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1326
			xfs_is_write_fault(vmf));
1327 1328
}

1329
static vm_fault_t
1330
xfs_filemap_huge_fault(
1331 1332
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size)
M
Matthew Wilcox 已提交
1333
{
1334
	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
M
Matthew Wilcox 已提交
1335 1336
		return VM_FAULT_FALLBACK;

1337 1338
	/* DAX can shortcut the normal fault path on write faults! */
	return __xfs_filemap_fault(vmf, pe_size,
1339
			xfs_is_write_fault(vmf));
1340
}
M
Matthew Wilcox 已提交
1341

1342
static vm_fault_t
1343 1344 1345 1346
xfs_filemap_page_mkwrite(
	struct vm_fault		*vmf)
{
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1347 1348
}

1349
/*
1350 1351 1352
 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
 * on write faults. In reality, it needs to serialise against truncate and
 * prepare memory for writing so handle is as standard write fault.
1353
 */
1354
static vm_fault_t
1355 1356 1357 1358
xfs_filemap_pfn_mkwrite(
	struct vm_fault		*vmf)
{

1359
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1360 1361
}

1362
static vm_fault_t
1363 1364 1365 1366 1367 1368
xfs_filemap_map_pages(
	struct vm_fault		*vmf,
	pgoff_t			start_pgoff,
	pgoff_t			end_pgoff)
{
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1369
	vm_fault_t ret;
1370 1371

	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1372
	ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
1373
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1374
	return ret;
1375 1376
}

1377 1378
static const struct vm_operations_struct xfs_file_vm_ops = {
	.fault		= xfs_filemap_fault,
1379
	.huge_fault	= xfs_filemap_huge_fault,
1380
	.map_pages	= xfs_filemap_map_pages,
1381
	.page_mkwrite	= xfs_filemap_page_mkwrite,
1382
	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1383 1384 1385 1386
};

STATIC int
xfs_file_mmap(
1387 1388
	struct file		*file,
	struct vm_area_struct	*vma)
1389
{
1390 1391
	struct inode		*inode = file_inode(file);
	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1392

1393
	/*
1394 1395
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
1396
	 */
1397
	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1398 1399
		return -EOPNOTSUPP;

1400
	file_accessed(file);
1401
	vma->vm_ops = &xfs_file_vm_ops;
1402
	if (IS_DAX(inode))
1403
		vma->vm_flags |= VM_HUGEPAGE;
1404
	return 0;
1405 1406
}

1407
const struct file_operations xfs_file_operations = {
1408
	.llseek		= xfs_file_llseek,
A
Al Viro 已提交
1409
	.read_iter	= xfs_file_read_iter,
A
Al Viro 已提交
1410
	.write_iter	= xfs_file_write_iter,
1411
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
1412
	.splice_write	= iter_file_splice_write,
1413
	.iopoll		= iocb_bio_iopoll,
1414
	.unlocked_ioctl	= xfs_file_ioctl,
L
Linus Torvalds 已提交
1415
#ifdef CONFIG_COMPAT
1416
	.compat_ioctl	= xfs_file_compat_ioctl,
L
Linus Torvalds 已提交
1417
#endif
1418
	.mmap		= xfs_file_mmap,
1419
	.mmap_supported_flags = MAP_SYNC,
1420 1421 1422
	.open		= xfs_file_open,
	.release	= xfs_file_release,
	.fsync		= xfs_file_fsync,
1423
	.get_unmapped_area = thp_get_unmapped_area,
1424
	.fallocate	= xfs_file_fallocate,
1425
	.fadvise	= xfs_file_fadvise,
1426
	.remap_file_range = xfs_file_remap_range,
L
Linus Torvalds 已提交
1427 1428
};

1429
const struct file_operations xfs_dir_file_operations = {
1430
	.open		= xfs_dir_open,
L
Linus Torvalds 已提交
1431
	.read		= generic_read_dir,
1432
	.iterate_shared	= xfs_file_readdir,
1433
	.llseek		= generic_file_llseek,
1434
	.unlocked_ioctl	= xfs_file_ioctl,
1435
#ifdef CONFIG_COMPAT
1436
	.compat_ioctl	= xfs_file_compat_ioctl,
1437
#endif
1438
	.fsync		= xfs_dir_fsync,
L
Linus Torvalds 已提交
1439
};