xfs_file.c 36.8 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3 4
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
5 6
 */
#include "xfs.h"
7
#include "xfs_fs.h"
8
#include "xfs_shared.h"
9
#include "xfs_format.h"
10 11
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
15
#include "xfs_inode_item.h"
16
#include "xfs_bmap.h"
D
Dave Chinner 已提交
17
#include "xfs_bmap_util.h"
18
#include "xfs_dir2.h"
D
Dave Chinner 已提交
19
#include "xfs_dir2_priv.h"
20
#include "xfs_ioctl.h"
21
#include "xfs_trace.h"
22
#include "xfs_log.h"
23
#include "xfs_icache.h"
24
#include "xfs_pnfs.h"
25
#include "xfs_iomap.h"
26
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
27

S
Shiyang Ruan 已提交
28
#include <linux/dax.h>
29
#include <linux/falloc.h>
30
#include <linux/backing-dev.h>
31
#include <linux/mman.h>
32
#include <linux/fadvise.h>
C
Christoph Hellwig 已提交
33
#include <linux/mount.h>
L
Linus Torvalds 已提交
34

35
static const struct vm_operations_struct xfs_file_vm_ops;
L
Linus Torvalds 已提交
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/*
 * Decide if the given file range is aligned to the size of the fundamental
 * allocation unit for the file.
 */
static bool
xfs_is_falloc_aligned(
	struct xfs_inode	*ip,
	loff_t			pos,
	long long int		len)
{
	struct xfs_mount	*mp = ip->i_mount;
	uint64_t		mask;

	if (XFS_IS_REALTIME_INODE(ip)) {
		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
			u64	rextbytes;
			u32	mod;

			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
			div_u64_rem(pos, rextbytes, &mod);
			if (mod)
				return false;
			div_u64_rem(len, rextbytes, &mod);
			return mod == 0;
		}
		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
	} else {
		mask = mp->m_sb.sb_blocksize - 1;
	}

	return !((pos | len) & mask);
}

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * Fsync operations on directories are much simpler than on regular files,
 * as there is no file data to flush, and thus also no need for explicit
 * cache flush operations, and there are no non-transaction metadata updates
 * on directories either.
 */
STATIC int
xfs_dir_fsync(
	struct file		*file,
	loff_t			start,
	loff_t			end,
	int			datasync)
{
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);

	trace_xfs_dir_fsync(ip);
86
	return xfs_log_force_inode(ip);
87 88
}

89 90
static xfs_csn_t
xfs_fsync_seq(
C
Christoph Hellwig 已提交
91 92 93 94 95 96 97
	struct xfs_inode	*ip,
	bool			datasync)
{
	if (!xfs_ipincount(ip))
		return 0;
	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
		return 0;
98
	return ip->i_itemp->ili_commit_seq;
C
Christoph Hellwig 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
}

/*
 * All metadata updates are logged, which means that we just have to flush the
 * log up to the latest LSN that touched the inode.
 *
 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
 * the log force before we clear the ili_fsync_fields field. This ensures that
 * we don't get a racing sync operation that does not wait for the metadata to
 * hit the journal before returning.  If we race with clearing ili_fsync_fields,
 * then all that will happen is the log force will do nothing as the lsn will
 * already be on disk.  We can't race with setting ili_fsync_fields because that
 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
 * shared until after the ili_fsync_fields is cleared.
 */
static  int
xfs_fsync_flush_log(
	struct xfs_inode	*ip,
	bool			datasync,
	int			*log_flushed)
{
	int			error = 0;
121
	xfs_csn_t		seq;
C
Christoph Hellwig 已提交
122 123

	xfs_ilock(ip, XFS_ILOCK_SHARED);
124 125 126
	seq = xfs_fsync_seq(ip, datasync);
	if (seq) {
		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
C
Christoph Hellwig 已提交
127 128 129 130 131 132 133 134 135 136
					  log_flushed);

		spin_lock(&ip->i_itemp->ili_lock);
		ip->i_itemp->ili_fsync_fields = 0;
		spin_unlock(&ip->i_itemp->ili_lock);
	}
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	return error;
}

137 138 139
STATIC int
xfs_file_fsync(
	struct file		*file,
140 141
	loff_t			start,
	loff_t			end,
142 143
	int			datasync)
{
C
Christoph Hellwig 已提交
144
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
145
	struct xfs_mount	*mp = ip->i_mount;
146
	int			error, err2;
147 148
	int			log_flushed = 0;

C
Christoph Hellwig 已提交
149
	trace_xfs_file_fsync(ip);
150

151
	error = file_write_and_wait_range(file, start, end);
152 153 154
	if (error)
		return error;

155
	if (xfs_is_shutdown(mp))
E
Eric Sandeen 已提交
156
		return -EIO;
157 158 159

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

160 161 162 163 164 165 166
	/*
	 * If we have an RT and/or log subvolume we need to make sure to flush
	 * the write cache the device used for file data first.  This is to
	 * ensure newly written file data make it to disk before logging the new
	 * inode size in case of an extending write.
	 */
	if (XFS_IS_REALTIME_INODE(ip))
167
		error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
168
	else if (mp->m_logdev_targp != mp->m_ddev_targp)
169
		error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
170

171
	/*
172
	 * Any inode that has dirty modifications in the log is pinned.  The
173
	 * racy check here for a pinned inode will not catch modifications
174 175
	 * that happen concurrently to the fsync call, but fsync semantics
	 * only require to sync previously completed I/O.
176
	 */
177 178 179 180 181
	if (xfs_ipincount(ip)) {
		err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
		if (err2 && !error)
			error = err2;
	}
182

183 184 185 186 187 188 189
	/*
	 * If we only have a single device, and the log force about was
	 * a no-op we might have to flush the data device cache here.
	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
190
	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
191 192 193 194 195
	    mp->m_logdev_targp == mp->m_ddev_targp) {
		err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
		if (err2 && !error)
			error = err2;
	}
196

D
Dave Chinner 已提交
197
	return error;
198 199
}

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
static int
xfs_ilock_iocb(
	struct kiocb		*iocb,
	unsigned int		lock_mode)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));

	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, lock_mode))
			return -EAGAIN;
	} else {
		xfs_ilock(ip, lock_mode);
	}

	return 0;
}

217
STATIC ssize_t
218
xfs_file_dio_read(
219
	struct kiocb		*iocb,
A
Al Viro 已提交
220
	struct iov_iter		*to)
221
{
C
Christoph Hellwig 已提交
222 223
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;
224

225
	trace_xfs_file_direct_read(iocb, to);
226

227
	if (!iov_iter_count(to))
228
		return 0; /* skip atime */
229

230 231
	file_accessed(iocb->ki_filp);

232 233 234
	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
	if (ret)
		return ret;
235
	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
236
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
237

238 239 240
	return ret;
}

241
static noinline ssize_t
242 243 244 245
xfs_file_dax_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
246
	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
247 248
	ssize_t			ret = 0;

249
	trace_xfs_file_dax_read(iocb, to);
250

251
	if (!iov_iter_count(to))
252 253
		return 0; /* skip atime */

254 255 256
	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
	if (ret)
		return ret;
257
	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
258
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
259

260
	file_accessed(iocb->ki_filp);
261 262 263 264
	return ret;
}

STATIC ssize_t
265
xfs_file_buffered_read(
266 267 268 269 270 271
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;

272
	trace_xfs_file_buffered_read(iocb, to);
273

274 275 276
	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
	if (ret)
		return ret;
A
Al Viro 已提交
277
	ret = generic_file_read_iter(iocb, to);
278
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
279 280 281 282 283 284 285 286 287

	return ret;
}

STATIC ssize_t
xfs_file_read_iter(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
288 289
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
290 291 292 293
	ssize_t			ret = 0;

	XFS_STATS_INC(mp, xs_read_calls);

294
	if (xfs_is_shutdown(mp))
295 296
		return -EIO;

297 298 299
	if (IS_DAX(inode))
		ret = xfs_file_dax_read(iocb, to);
	else if (iocb->ki_flags & IOCB_DIRECT)
300
		ret = xfs_file_dio_read(iocb, to);
C
Christoph Hellwig 已提交
301
	else
302
		ret = xfs_file_buffered_read(iocb, to);
303 304

	if (ret > 0)
305
		XFS_STATS_ADD(mp, xs_read_bytes, ret);
306 307 308
	return ret;
}

309 310 311
/*
 * Common pre-write limit and setup checks.
 *
312 313 314
 * Called with the iolocked held either shared and exclusive according to
 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 * if called for a direct write beyond i_size.
315 316
 */
STATIC ssize_t
317
xfs_file_write_checks(
318 319
	struct kiocb		*iocb,
	struct iov_iter		*from,
320
	unsigned int		*iolock)
321
{
322
	struct file		*file = iocb->ki_filp;
323 324
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
325
	ssize_t			error = 0;
326
	size_t			count = iov_iter_count(from);
327
	bool			drained_dio = false;
C
Christoph Hellwig 已提交
328
	loff_t			isize;
329

330
restart:
331 332
	error = generic_write_checks(iocb, from);
	if (error <= 0)
333 334
		return error;

335 336 337 338 339 340 341 342
	if (iocb->ki_flags & IOCB_NOWAIT) {
		error = break_layout(inode, false);
		if (error == -EWOULDBLOCK)
			error = -EAGAIN;
	} else {
		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
	}

343 344 345
	if (error)
		return error;

346 347 348 349
	/*
	 * For changing security info in file_remove_privs() we need i_rwsem
	 * exclusively.
	 */
350
	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
351
		xfs_iunlock(ip, *iolock);
352
		*iolock = XFS_IOLOCK_EXCL;
353 354 355 356 357
		error = xfs_ilock_iocb(iocb, *iolock);
		if (error) {
			*iolock = 0;
			return error;
		}
358 359
		goto restart;
	}
360

361 362 363
	/*
	 * If the offset is beyond the size of the file, we need to zero any
	 * blocks that fall between the existing EOF and the start of this
364 365 366 367 368 369 370 371 372 373 374
	 * write.  If zeroing is needed and we are currently holding the iolock
	 * shared, we need to update it to exclusive which implies having to
	 * redo all checks before.
	 *
	 * We need to serialise against EOF updates that occur in IO completions
	 * here. We want to make sure that nobody is changing the size while we
	 * do this check until we have placed an IO barrier (i.e.  hold the
	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
	 * spinlock effectively forms a memory barrier once we have the
	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
	 * hence be able to correctly determine if we need to run zeroing.
375
	 *
376 377 378 379
	 * We can do an unlocked check here safely as IO completion can only
	 * extend EOF. Truncate is locked out at this point, so the EOF can
	 * not move backwards, only forwards. Hence we only need to take the
	 * slow path and spin locks when we are at or beyond the current EOF.
380
	 */
381 382 383
	if (iocb->ki_pos <= i_size_read(inode))
		goto out;

384
	spin_lock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
385 386
	isize = i_size_read(inode);
	if (iocb->ki_pos > isize) {
387
		spin_unlock(&ip->i_flags_lock);
388 389 390 391

		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;

392 393
		if (!drained_dio) {
			if (*iolock == XFS_IOLOCK_SHARED) {
394
				xfs_iunlock(ip, *iolock);
395
				*iolock = XFS_IOLOCK_EXCL;
396
				xfs_ilock(ip, *iolock);
397 398
				iov_iter_reexpand(from, count);
			}
399 400 401 402 403 404 405 406 407
			/*
			 * We now have an IO submission barrier in place, but
			 * AIO can do EOF updates during IO completion and hence
			 * we now need to wait for all of them to drain. Non-AIO
			 * DIO will have drained before we are given the
			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
			 * no-op.
			 */
			inode_dio_wait(inode);
408
			drained_dio = true;
409 410
			goto restart;
		}
411

C
Christoph Hellwig 已提交
412
		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
413
		error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
414 415
		if (error)
			return error;
416 417
	} else
		spin_unlock(&ip->i_flags_lock);
418

419
out:
420
	return kiocb_modified(iocb);
421 422
}

C
Christoph Hellwig 已提交
423 424 425 426
static int
xfs_dio_write_end_io(
	struct kiocb		*iocb,
	ssize_t			size,
427
	int			error,
C
Christoph Hellwig 已提交
428 429 430 431 432
	unsigned		flags)
{
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_inode	*ip = XFS_I(inode);
	loff_t			offset = iocb->ki_pos;
C
Christoph Hellwig 已提交
433
	unsigned int		nofs_flag;
C
Christoph Hellwig 已提交
434 435 436

	trace_xfs_end_io_direct_write(ip, offset, size);

437
	if (xfs_is_shutdown(ip->i_mount))
C
Christoph Hellwig 已提交
438 439
		return -EIO;

440 441 442 443
	if (error)
		return error;
	if (!size)
		return 0;
C
Christoph Hellwig 已提交
444

445 446 447 448 449 450
	/*
	 * Capture amount written on completion as we can't reliably account
	 * for it on submission.
	 */
	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);

C
Christoph Hellwig 已提交
451 452 453 454 455 456 457
	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

458 459 460
	if (flags & IOMAP_DIO_COW) {
		error = xfs_reflink_end_cow(ip, offset, size);
		if (error)
C
Christoph Hellwig 已提交
461
			goto out;
462 463 464 465 466 467 468 469
	}

	/*
	 * Unwritten conversion updates the in-core isize after extent
	 * conversion but before updating the on-disk size. Updating isize any
	 * earlier allows a racing dio read to find unwritten extents before
	 * they are converted.
	 */
C
Christoph Hellwig 已提交
470 471 472 473
	if (flags & IOMAP_DIO_UNWRITTEN) {
		error = xfs_iomap_write_unwritten(ip, offset, size, true);
		goto out;
	}
474

C
Christoph Hellwig 已提交
475 476 477 478 479 480 481 482 483 484
	/*
	 * We need to update the in-core inode size here so that we don't end up
	 * with the on-disk inode size being outside the in-core inode size. We
	 * have no other method of updating EOF for AIO, so always do it here
	 * if necessary.
	 *
	 * We need to lock the test/set EOF update as we can be racing with
	 * other IO completions here to update the EOF. Failing to serialise
	 * here can result in EOF moving backwards and Bad Things Happen when
	 * that occurs.
485 486 487 488 489 490 491
	 *
	 * As IO completion only ever extends EOF, we can do an unlocked check
	 * here to avoid taking the spinlock. If we land within the current EOF,
	 * then we do not need to do an extending update at all, and we don't
	 * need to take the lock to check this. If we race with an update moving
	 * EOF, then we'll either still be beyond EOF and need to take the lock,
	 * or we'll be within EOF and we don't need to take it at all.
C
Christoph Hellwig 已提交
492
	 */
493 494 495
	if (offset + size <= i_size_read(inode))
		goto out;

C
Christoph Hellwig 已提交
496 497 498
	spin_lock(&ip->i_flags_lock);
	if (offset + size > i_size_read(inode)) {
		i_size_write(inode, offset + size);
499
		spin_unlock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
500
		error = xfs_setfilesize(ip, offset, size);
501 502 503
	} else {
		spin_unlock(&ip->i_flags_lock);
	}
C
Christoph Hellwig 已提交
504

C
Christoph Hellwig 已提交
505 506
out:
	memalloc_nofs_restore(nofs_flag);
C
Christoph Hellwig 已提交
507 508 509
	return error;
}

510 511 512 513
static const struct iomap_dio_ops xfs_dio_write_ops = {
	.end_io		= xfs_dio_write_end_io,
};

514
/*
515
 * Handle block aligned direct I/O writes
516
 */
517 518 519
static noinline ssize_t
xfs_file_dio_write_aligned(
	struct xfs_inode	*ip,
520
	struct kiocb		*iocb,
521
	struct iov_iter		*from)
522
{
523
	unsigned int		iolock = XFS_IOLOCK_SHARED;
524
	ssize_t			ret;
525

526 527 528 529 530 531
	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
	ret = xfs_file_write_checks(iocb, from, &iolock);
	if (ret)
		goto out_unlock;
532

533
	/*
534 535 536
	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
	 * the iolock back to shared if we had to take the exclusive lock in
	 * xfs_file_write_checks() for other reasons.
537
	 */
538 539
	if (iolock == XFS_IOLOCK_EXCL) {
		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
540
		iolock = XFS_IOLOCK_SHARED;
541
	}
542 543
	trace_xfs_file_direct_write(iocb, from);
	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
544
			   &xfs_dio_write_ops, 0, NULL, 0);
545 546 547 548 549
out_unlock:
	if (iolock)
		xfs_iunlock(ip, iolock);
	return ret;
}
550

551 552 553 554 555 556 557 558 559
/*
 * Handle block unaligned direct I/O writes
 *
 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
 * them to be done in parallel with reads and other direct I/O writes.  However,
 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
 * to do sub-block zeroing and that requires serialisation against other direct
 * I/O to the same block.  In this case we need to serialise the submission of
 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
560 561
 * In the case where sub-block zeroing is not required, we can do concurrent
 * sub-block dios to the same block successfully.
562
 *
563 564 565 566
 * Optimistically submit the I/O using the shared lock first, but use the
 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
 * if block allocation or partial block zeroing would be required.  In that case
 * we try again with the exclusive lock.
567 568 569 570 571 572 573
 */
static noinline ssize_t
xfs_file_dio_write_unaligned(
	struct xfs_inode	*ip,
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
574 575
	size_t			isize = i_size_read(VFS_I(ip));
	size_t			count = iov_iter_count(from);
576
	unsigned int		iolock = XFS_IOLOCK_SHARED;
577
	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
578 579
	ssize_t			ret;

580 581 582 583 584 585 586 587
	/*
	 * Extending writes need exclusivity because of the sub-block zeroing
	 * that the DIO code always does for partial tail blocks beyond EOF, so
	 * don't even bother trying the fast path in this case.
	 */
	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
588
retry_exclusive:
589 590 591 592 593 594 595
		iolock = XFS_IOLOCK_EXCL;
		flags = IOMAP_DIO_FORCE_WAIT;
	}

	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
596 597 598 599 600 601 602 603 604

	/*
	 * We can't properly handle unaligned direct I/O to reflink files yet,
	 * as we can't unshare a partial block.
	 */
	if (xfs_is_cow_inode(ip)) {
		trace_xfs_reflink_bounce_dio_write(iocb, from);
		ret = -ENOTBLK;
		goto out_unlock;
G
Goldwyn Rodrigues 已提交
605
	}
606

607
	ret = xfs_file_write_checks(iocb, from, &iolock);
608
	if (ret)
609
		goto out_unlock;
610

611
	/*
612 613 614 615
	 * If we are doing exclusive unaligned I/O, this must be the only I/O
	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
	 * conversions from the AIO end_io handler.  Wait for all other I/O to
	 * drain first.
616
	 */
617 618
	if (flags & IOMAP_DIO_FORCE_WAIT)
		inode_dio_wait(VFS_I(ip));
619

620
	trace_xfs_file_direct_write(iocb, from);
621
	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
622
			   &xfs_dio_write_ops, flags, NULL, 0);
623 624 625 626 627 628 629 630 631 632 633 634

	/*
	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
	 * layer rejected it for mapping or locking reasons. If we are doing
	 * nonblocking user I/O, propagate the error.
	 */
	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
		xfs_iunlock(ip, iolock);
		goto retry_exclusive;
	}

635
out_unlock:
636 637
	if (iolock)
		xfs_iunlock(ip, iolock);
638 639 640
	return ret;
}

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
static ssize_t
xfs_file_dio_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
	size_t			count = iov_iter_count(from);

	/* direct I/O must be aligned to device logical sector size */
	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
		return -EINVAL;
	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
		return xfs_file_dio_write_unaligned(ip, iocb, from);
	return xfs_file_dio_write_aligned(ip, iocb, from);
}

658
static noinline ssize_t
659 660 661 662
xfs_file_dax_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
663
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
664
	struct xfs_inode	*ip = XFS_I(inode);
665
	unsigned int		iolock = XFS_IOLOCK_EXCL;
666 667
	ssize_t			ret, error = 0;
	loff_t			pos;
668

669 670 671
	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
672
	ret = xfs_file_write_checks(iocb, from, &iolock);
673 674 675
	if (ret)
		goto out;

676
	pos = iocb->ki_pos;
677

678
	trace_xfs_file_dax_write(iocb, from);
S
Shiyang Ruan 已提交
679
	ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
680 681 682
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
		i_size_write(inode, iocb->ki_pos);
		error = xfs_setfilesize(ip, pos, ret);
683 684
	}
out:
685 686
	if (iolock)
		xfs_iunlock(ip, iolock);
687 688 689 690 691 692 693 694 695 696
	if (error)
		return error;

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);

		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
	return ret;
697 698
}

699
STATIC ssize_t
700
xfs_file_buffered_write(
701
	struct kiocb		*iocb,
702
	struct iov_iter		*from)
703
{
704
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
705
	struct xfs_inode	*ip = XFS_I(inode);
706
	ssize_t			ret;
707
	bool			cleared_space = false;
708
	unsigned int		iolock;
709

710 711
write_retry:
	iolock = XFS_IOLOCK_EXCL;
712 713 714
	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
715

716
	ret = xfs_file_write_checks(iocb, from, &iolock);
717
	if (ret)
718
		goto out;
719 720

	/* We can write back this queue in page reclaim */
721
	current->backing_dev_info = inode_to_bdi(inode);
722

723
	trace_xfs_file_buffered_write(iocb, from);
724 725
	ret = iomap_file_buffered_write(iocb, from,
			&xfs_buffered_write_iomap_ops);
726
	if (likely(ret >= 0))
727
		iocb->ki_pos += ret;
728

729
	/*
730 731 732 733 734 735
	 * If we hit a space limit, try to free up some lingering preallocated
	 * space before returning an error. In the case of ENOSPC, first try to
	 * write back all dirty inodes to free up some of the excess reserved
	 * metadata space. This reduces the chances that the eofblocks scan
	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
	 * also behaves as a filter to prevent too many eofblocks scans from
736 737
	 * running at the same time.  Use a synchronous scan to increase the
	 * effectiveness of the scan.
738
	 */
739
	if (ret == -EDQUOT && !cleared_space) {
740
		xfs_iunlock(ip, iolock);
741
		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
742 743
		cleared_space = true;
		goto write_retry;
744
	} else if (ret == -ENOSPC && !cleared_space) {
745
		struct xfs_icwalk	icw = {0};
746

747
		cleared_space = true;
D
Dave Chinner 已提交
748
		xfs_flush_inodes(ip->i_mount);
749 750

		xfs_iunlock(ip, iolock);
751 752
		icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
		xfs_blockgc_free_space(ip->i_mount, &icw);
D
Dave Chinner 已提交
753
		goto write_retry;
754
	}
755

756
	current->backing_dev_info = NULL;
757
out:
758 759
	if (iolock)
		xfs_iunlock(ip, iolock);
760 761 762 763 764 765

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
766 767 768 769
	return ret;
}

STATIC ssize_t
A
Al Viro 已提交
770
xfs_file_write_iter(
771
	struct kiocb		*iocb,
A
Al Viro 已提交
772
	struct iov_iter		*from)
773
{
774
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
775 776
	struct xfs_inode	*ip = XFS_I(inode);
	ssize_t			ret;
A
Al Viro 已提交
777
	size_t			ocount = iov_iter_count(from);
778

779
	XFS_STATS_INC(ip->i_mount, xs_write_calls);
780 781 782 783

	if (ocount == 0)
		return 0;

784
	if (xfs_is_shutdown(ip->i_mount))
A
Al Viro 已提交
785
		return -EIO;
786

787
	if (IS_DAX(inode))
788 789 790
		return xfs_file_dax_write(iocb, from);

	if (iocb->ki_flags & IOCB_DIRECT) {
791 792 793 794 795 796
		/*
		 * Allow a directio write to fall back to a buffered
		 * write *only* in the case that we're doing a reflink
		 * CoW.  In all other directio scenarios we do not
		 * allow an operation to fall back to buffered mode.
		 */
797
		ret = xfs_file_dio_write(iocb, from);
798
		if (ret != -ENOTBLK)
799
			return ret;
800
	}
801

802
	return xfs_file_buffered_write(iocb, from);
803 804
}

805 806
static void
xfs_wait_dax_page(
807
	struct inode		*inode)
808 809 810 811 812 813 814 815
{
	struct xfs_inode        *ip = XFS_I(inode);

	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
	schedule();
	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
}

S
Shiyang Ruan 已提交
816
int
817 818
xfs_break_dax_layouts(
	struct inode		*inode,
819
	bool			*retry)
820 821 822 823 824 825 826 827 828
{
	struct page		*page;

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));

	page = dax_layout_busy_page(inode->i_mapping);
	if (!page)
		return 0;

829
	*retry = true;
830 831
	return ___wait_var_event(&page->_refcount,
			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
832
			0, 0, xfs_wait_dax_page(inode));
833 834
}

835 836 837 838 839 840 841
int
xfs_break_layouts(
	struct inode		*inode,
	uint			*iolock,
	enum layout_break_reason reason)
{
	bool			retry;
842
	int			error;
843 844 845

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));

846 847 848 849
	do {
		retry = false;
		switch (reason) {
		case BREAK_UNMAP:
850
			error = xfs_break_dax_layouts(inode, &retry);
851 852
			if (error || retry)
				break;
853
			fallthrough;
854 855 856 857 858 859 860 861 862 863
		case BREAK_WRITE:
			error = xfs_break_leased_layouts(inode, iolock, &retry);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EINVAL;
		}
	} while (error == 0 && retry);

	return error;
864 865
}

866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
/* Does this file, inode, or mount want synchronous writes? */
static inline bool xfs_file_sync_writes(struct file *filp)
{
	struct xfs_inode	*ip = XFS_I(file_inode(filp));

	if (xfs_has_wsync(ip->i_mount))
		return true;
	if (filp->f_flags & (__O_SYNC | O_DSYNC))
		return true;
	if (IS_SYNC(file_inode(filp)))
		return true;

	return false;
}

881 882 883
#define	XFS_FALLOC_FL_SUPPORTED						\
		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
884
		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
885

886 887
STATIC long
xfs_file_fallocate(
888 889 890 891
	struct file		*file,
	int			mode,
	loff_t			offset,
	loff_t			len)
892
{
893 894 895
	struct inode		*inode = file_inode(file);
	struct xfs_inode	*ip = XFS_I(inode);
	long			error;
896
	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
897
	loff_t			new_size = 0;
898
	bool			do_file_insert = false;
899

900 901
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;
902
	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
903 904
		return -EOPNOTSUPP;

905
	xfs_ilock(ip, iolock);
906
	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
907 908 909
	if (error)
		goto out_unlock;

910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
	/*
	 * Must wait for all AIO to complete before we continue as AIO can
	 * change the file size on completion without holding any locks we
	 * currently hold. We must do this first because AIO can update both
	 * the on disk and in memory inode sizes, and the operations that follow
	 * require the in-memory size to be fully up-to-date.
	 */
	inode_dio_wait(inode);

	/*
	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
	 * the cached range over the first operation we are about to run.
	 *
	 * We care about zero and collapse here because they both run a hole
	 * punch over the range first. Because that can zero data, and the range
	 * of invalidation for the shift operations is much larger, we still do
	 * the required flush for collapse in xfs_prepare_shift().
	 *
	 * Insert has the same range requirements as collapse, and we extend the
	 * file first which can zero data. Hence insert has the same
	 * flush/invalidate requirements as collapse and so they are both
	 * handled at the right time by xfs_prepare_shift().
	 */
	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
		    FALLOC_FL_COLLAPSE_RANGE)) {
		error = xfs_flush_unmap_range(ip, offset, len);
		if (error)
			goto out_unlock;
	}

940 941 942 943
	error = file_modified(file);
	if (error)
		goto out_unlock;

944 945 946 947
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		error = xfs_free_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
948
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
949
		if (!xfs_is_falloc_aligned(ip, offset, len)) {
D
Dave Chinner 已提交
950
			error = -EINVAL;
951 952 953
			goto out_unlock;
		}

954 955 956 957 958
		/*
		 * There is no need to overlap collapse range with EOF,
		 * in which case it is effectively a truncate operation
		 */
		if (offset + len >= i_size_read(inode)) {
D
Dave Chinner 已提交
959
			error = -EINVAL;
960 961 962
			goto out_unlock;
		}

963 964 965 966 967
		new_size = i_size_read(inode) - len;

		error = xfs_collapse_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
968
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
969
		loff_t		isize = i_size_read(inode);
970

971
		if (!xfs_is_falloc_aligned(ip, offset, len)) {
972 973 974 975
			error = -EINVAL;
			goto out_unlock;
		}

976 977 978 979 980
		/*
		 * New inode size must not exceed ->s_maxbytes, accounting for
		 * possible signed overflow.
		 */
		if (inode->i_sb->s_maxbytes - isize < len) {
981 982 983
			error = -EFBIG;
			goto out_unlock;
		}
984
		new_size = isize + len;
985 986

		/* Offset should be less than i_size */
987
		if (offset >= isize) {
988 989 990
			error = -EINVAL;
			goto out_unlock;
		}
991
		do_file_insert = true;
992 993 994 995
	} else {
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
		    offset + len > i_size_read(inode)) {
			new_size = offset + len;
D
Dave Chinner 已提交
996
			error = inode_newsize_ok(inode, new_size);
997 998 999
			if (error)
				goto out_unlock;
		}
1000

1001
		if (mode & FALLOC_FL_ZERO_RANGE) {
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
			/*
			 * Punch a hole and prealloc the range.  We use a hole
			 * punch rather than unwritten extent conversion for two
			 * reasons:
			 *
			 *   1.) Hole punch handles partial block zeroing for us.
			 *   2.) If prealloc returns ENOSPC, the file range is
			 *       still zero-valued by virtue of the hole punch.
			 */
			unsigned int blksize = i_blocksize(inode);

			trace_xfs_zero_file_space(ip);

			error = xfs_free_file_space(ip, offset, len);
			if (error)
				goto out_unlock;

			len = round_up(offset + len, blksize) -
			      round_down(offset, blksize);
			offset = round_down(offset, blksize);
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
			error = xfs_reflink_unshare(ip, offset, len);
			if (error)
				goto out_unlock;
		} else {
			/*
			 * If always_cow mode we can't use preallocations and
			 * thus should not create them.
			 */
			if (xfs_is_always_cow_inode(ip)) {
				error = -EOPNOTSUPP;
				goto out_unlock;
			}
1035
		}
1036

1037
		if (!xfs_is_always_cow_inode(ip)) {
1038
			error = xfs_alloc_file_space(ip, offset, len);
1039 1040
			if (error)
				goto out_unlock;
1041
		}
1042
	}
1043 1044 1045 1046 1047 1048 1049

	/* Change file size if needed */
	if (new_size) {
		struct iattr iattr;

		iattr.ia_valid = ATTR_SIZE;
		iattr.ia_size = new_size;
C
Christoph Hellwig 已提交
1050 1051
		error = xfs_vn_setattr_size(file_mnt_user_ns(file),
					    file_dentry(file), &iattr);
1052 1053
		if (error)
			goto out_unlock;
1054 1055
	}

1056 1057 1058 1059 1060 1061
	/*
	 * Perform hole insertion now that the file size has been
	 * updated so that if we crash during the operation we don't
	 * leave shifted extents past EOF and hence losing access to
	 * the data that is contained within them.
	 */
D
Dave Chinner 已提交
1062
	if (do_file_insert) {
1063
		error = xfs_insert_file_space(ip, offset, len);
D
Dave Chinner 已提交
1064 1065 1066 1067
		if (error)
			goto out_unlock;
	}

1068
	if (xfs_file_sync_writes(file))
D
Dave Chinner 已提交
1069
		error = xfs_log_force_inode(ip);
1070

1071
out_unlock:
1072
	xfs_iunlock(ip, iolock);
D
Dave Chinner 已提交
1073
	return error;
1074 1075
}

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
STATIC int
xfs_file_fadvise(
	struct file	*file,
	loff_t		start,
	loff_t		end,
	int		advice)
{
	struct xfs_inode *ip = XFS_I(file_inode(file));
	int ret;
	int lockflags = 0;

	/*
	 * Operations creating pages in page cache need protection from hole
	 * punching and similar ops
	 */
	if (advice == POSIX_FADV_WILLNEED) {
		lockflags = XFS_IOLOCK_SHARED;
		xfs_ilock(ip, lockflags);
	}
	ret = generic_fadvise(file, start, end, advice);
	if (lockflags)
		xfs_iunlock(ip, lockflags);
	return ret;
}
1100

1101
STATIC loff_t
1102
xfs_file_remap_range(
1103 1104 1105 1106 1107 1108
	struct file		*file_in,
	loff_t			pos_in,
	struct file		*file_out,
	loff_t			pos_out,
	loff_t			len,
	unsigned int		remap_flags)
1109
{
1110 1111 1112 1113 1114 1115 1116 1117 1118
	struct inode		*inode_in = file_inode(file_in);
	struct xfs_inode	*src = XFS_I(inode_in);
	struct inode		*inode_out = file_inode(file_out);
	struct xfs_inode	*dest = XFS_I(inode_out);
	struct xfs_mount	*mp = src->i_mount;
	loff_t			remapped = 0;
	xfs_extlen_t		cowextsize;
	int			ret;

1119 1120
	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
		return -EINVAL;
1121

1122
	if (!xfs_has_reflink(mp))
1123 1124
		return -EOPNOTSUPP;

1125
	if (xfs_is_shutdown(mp))
1126 1127 1128 1129 1130
		return -EIO;

	/* Prepare and then clone file data. */
	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
			&len, remap_flags);
1131
	if (ret || len == 0)
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		return ret;

	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);

	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
			&remapped);
	if (ret)
		goto out_unlock;

	/*
	 * Carry the cowextsize hint from src to dest if we're sharing the
	 * entire source file to the entire destination file, the source file
	 * has a cowextsize hint, and the destination file does not.
	 */
	cowextsize = 0;
	if (pos_in == 0 && len == i_size_read(inode_in) &&
1148
	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1149
	    pos_out == 0 && len >= i_size_read(inode_out) &&
1150
	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1151
		cowextsize = src->i_cowextsize;
1152 1153 1154

	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
			remap_flags);
1155 1156
	if (ret)
		goto out_unlock;
1157

1158
	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1159
		xfs_log_force_inode(dest);
1160
out_unlock:
1161
	xfs_iunlock2_io_mmap(src, dest);
1162 1163 1164
	if (ret)
		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
	return remapped > 0 ? remapped : ret;
1165
}
1166

L
Linus Torvalds 已提交
1167
STATIC int
1168
xfs_file_open(
L
Linus Torvalds 已提交
1169
	struct inode	*inode,
1170
	struct file	*file)
L
Linus Torvalds 已提交
1171
{
1172
	if (xfs_is_shutdown(XFS_M(inode->i_sb)))
1173
		return -EIO;
1174
	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC;
1175
	return generic_file_open(inode, file);
1176 1177 1178 1179 1180 1181 1182 1183
}

STATIC int
xfs_dir_open(
	struct inode	*inode,
	struct file	*file)
{
	struct xfs_inode *ip = XFS_I(inode);
1184
	unsigned int	mode;
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
	int		error;

	error = xfs_file_open(inode, file);
	if (error)
		return error;

	/*
	 * If there are any blocks, read-ahead block 0 as we're almost
	 * certain to have the next operation be a read there.
	 */
1195
	mode = xfs_ilock_data_map_shared(ip);
1196
	if (ip->i_df.if_nextents > 0)
1197
		error = xfs_dir3_data_readahead(ip, 0, 0);
1198
	xfs_iunlock(ip, mode);
1199
	return error;
L
Linus Torvalds 已提交
1200 1201 1202
}

STATIC int
1203
xfs_file_release(
L
Linus Torvalds 已提交
1204 1205 1206
	struct inode	*inode,
	struct file	*filp)
{
D
Dave Chinner 已提交
1207
	return xfs_release(XFS_I(inode));
L
Linus Torvalds 已提交
1208 1209 1210
}

STATIC int
1211
xfs_file_readdir(
A
Al Viro 已提交
1212 1213
	struct file	*file,
	struct dir_context *ctx)
L
Linus Torvalds 已提交
1214
{
A
Al Viro 已提交
1215
	struct inode	*inode = file_inode(file);
1216
	xfs_inode_t	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
	size_t		bufsize;

	/*
	 * The Linux API doesn't pass down the total size of the buffer
	 * we read into down to the filesystem.  With the filldir concept
	 * it's not needed for correct information, but the XFS dir2 leaf
	 * code wants an estimate of the buffer size to calculate it's
	 * readahead window and size the buffers used for mapping to
	 * physical blocks.
	 *
	 * Try to give it an estimate that's good enough, maybe at some
	 * point we can change the ->readdir prototype to include the
E
Eric Sandeen 已提交
1229
	 * buffer size.  For now we use the current glibc buffer size.
C
Christoph Hellwig 已提交
1230
	 */
1231
	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
C
Christoph Hellwig 已提交
1232

1233
	return xfs_readdir(NULL, ip, ctx, bufsize);
1234 1235 1236 1237 1238 1239
}

STATIC loff_t
xfs_file_llseek(
	struct file	*file,
	loff_t		offset,
1240
	int		whence)
1241
{
1242 1243
	struct inode		*inode = file->f_mapping->host;

1244
	if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1245 1246
		return -EIO;

1247
	switch (whence) {
1248
	default:
1249
		return generic_file_llseek(file, offset, whence);
1250
	case SEEK_HOLE:
1251
		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1252
		break;
1253
	case SEEK_DATA:
1254
		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1255
		break;
1256
	}
1257 1258 1259 1260

	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1261 1262
}

S
Shiyang Ruan 已提交
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
#ifdef CONFIG_FS_DAX
static int
xfs_dax_fault(
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size,
	bool			write_fault,
	pfn_t			*pfn)
{
	return dax_iomap_fault(vmf, pe_size, pfn, NULL,
			(write_fault && !vmf->cow_page) ?
				&xfs_dax_write_iomap_ops :
				&xfs_read_iomap_ops);
}
#else
static int
xfs_dax_fault(
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size,
	bool			write_fault,
	pfn_t			*pfn)
{
	return 0;
}
#endif

1288 1289 1290 1291
/*
 * Locking for serialisation of IO during page faults. This results in a lock
 * ordering of:
 *
1292
 * mmap_lock (MM)
1293
 *   sb_start_pagefault(vfs, freeze)
J
Jan Kara 已提交
1294
 *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1295 1296
 *       page_lock (MM)
 *         i_lock (XFS - extent map serialisation)
1297
 */
1298
static vm_fault_t
1299 1300 1301 1302
__xfs_filemap_fault(
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size,
	bool			write_fault)
1303
{
1304
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1305
	struct xfs_inode	*ip = XFS_I(inode);
1306
	vm_fault_t		ret;
1307

1308
	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1309

1310 1311 1312 1313
	if (write_fault) {
		sb_start_pagefault(inode->i_sb);
		file_update_time(vmf->vma->vm_file);
	}
1314

1315
	if (IS_DAX(inode)) {
1316 1317
		pfn_t pfn;

J
Jan Kara 已提交
1318
		xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
S
Shiyang Ruan 已提交
1319
		ret = xfs_dax_fault(vmf, pe_size, write_fault, &pfn);
1320 1321
		if (ret & VM_FAULT_NEEDDSYNC)
			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
J
Jan Kara 已提交
1322
		xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1323
	} else {
J
Jan Kara 已提交
1324 1325
		if (write_fault) {
			xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1326 1327
			ret = iomap_page_mkwrite(vmf,
					&xfs_buffered_write_iomap_ops);
J
Jan Kara 已提交
1328 1329
			xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
		} else {
1330
			ret = filemap_fault(vmf);
J
Jan Kara 已提交
1331
		}
1332 1333
	}

1334 1335
	if (write_fault)
		sb_end_pagefault(inode->i_sb);
1336
	return ret;
1337 1338
}

1339 1340 1341 1342 1343 1344 1345 1346
static inline bool
xfs_is_write_fault(
	struct vm_fault		*vmf)
{
	return (vmf->flags & FAULT_FLAG_WRITE) &&
	       (vmf->vma->vm_flags & VM_SHARED);
}

1347
static vm_fault_t
1348
xfs_filemap_fault(
1349 1350
	struct vm_fault		*vmf)
{
1351
	/* DAX can shortcut the normal fault path on write faults! */
1352 1353
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1354
			xfs_is_write_fault(vmf));
1355 1356
}

1357
static vm_fault_t
1358
xfs_filemap_huge_fault(
1359 1360
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size)
M
Matthew Wilcox 已提交
1361
{
1362
	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
M
Matthew Wilcox 已提交
1363 1364
		return VM_FAULT_FALLBACK;

1365 1366
	/* DAX can shortcut the normal fault path on write faults! */
	return __xfs_filemap_fault(vmf, pe_size,
1367
			xfs_is_write_fault(vmf));
1368
}
M
Matthew Wilcox 已提交
1369

1370
static vm_fault_t
1371 1372 1373 1374
xfs_filemap_page_mkwrite(
	struct vm_fault		*vmf)
{
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1375 1376
}

1377
/*
1378 1379 1380
 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
 * on write faults. In reality, it needs to serialise against truncate and
 * prepare memory for writing so handle is as standard write fault.
1381
 */
1382
static vm_fault_t
1383 1384 1385 1386
xfs_filemap_pfn_mkwrite(
	struct vm_fault		*vmf)
{

1387
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1388 1389
}

1390
static vm_fault_t
1391 1392 1393 1394 1395 1396
xfs_filemap_map_pages(
	struct vm_fault		*vmf,
	pgoff_t			start_pgoff,
	pgoff_t			end_pgoff)
{
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1397
	vm_fault_t ret;
1398 1399

	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1400
	ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
1401
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1402
	return ret;
1403 1404
}

1405 1406
static const struct vm_operations_struct xfs_file_vm_ops = {
	.fault		= xfs_filemap_fault,
1407
	.huge_fault	= xfs_filemap_huge_fault,
1408
	.map_pages	= xfs_filemap_map_pages,
1409
	.page_mkwrite	= xfs_filemap_page_mkwrite,
1410
	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1411 1412 1413 1414
};

STATIC int
xfs_file_mmap(
1415 1416
	struct file		*file,
	struct vm_area_struct	*vma)
1417
{
1418 1419
	struct inode		*inode = file_inode(file);
	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1420

1421
	/*
1422 1423
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
1424
	 */
1425
	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1426 1427
		return -EOPNOTSUPP;

1428
	file_accessed(file);
1429
	vma->vm_ops = &xfs_file_vm_ops;
1430
	if (IS_DAX(inode))
1431
		vma->vm_flags |= VM_HUGEPAGE;
1432
	return 0;
1433 1434
}

1435
const struct file_operations xfs_file_operations = {
1436
	.llseek		= xfs_file_llseek,
A
Al Viro 已提交
1437
	.read_iter	= xfs_file_read_iter,
A
Al Viro 已提交
1438
	.write_iter	= xfs_file_write_iter,
1439
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
1440
	.splice_write	= iter_file_splice_write,
1441
	.iopoll		= iocb_bio_iopoll,
1442
	.unlocked_ioctl	= xfs_file_ioctl,
L
Linus Torvalds 已提交
1443
#ifdef CONFIG_COMPAT
1444
	.compat_ioctl	= xfs_file_compat_ioctl,
L
Linus Torvalds 已提交
1445
#endif
1446
	.mmap		= xfs_file_mmap,
1447
	.mmap_supported_flags = MAP_SYNC,
1448 1449 1450
	.open		= xfs_file_open,
	.release	= xfs_file_release,
	.fsync		= xfs_file_fsync,
1451
	.get_unmapped_area = thp_get_unmapped_area,
1452
	.fallocate	= xfs_file_fallocate,
1453
	.fadvise	= xfs_file_fadvise,
1454
	.remap_file_range = xfs_file_remap_range,
L
Linus Torvalds 已提交
1455 1456
};

1457
const struct file_operations xfs_dir_file_operations = {
1458
	.open		= xfs_dir_open,
L
Linus Torvalds 已提交
1459
	.read		= generic_read_dir,
1460
	.iterate_shared	= xfs_file_readdir,
1461
	.llseek		= generic_file_llseek,
1462
	.unlocked_ioctl	= xfs_file_ioctl,
1463
#ifdef CONFIG_COMPAT
1464
	.compat_ioctl	= xfs_file_compat_ioctl,
1465
#endif
1466
	.fsync		= xfs_dir_fsync,
L
Linus Torvalds 已提交
1467
};