xfs_file.c 36.1 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3 4
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
5 6
 */
#include "xfs.h"
7
#include "xfs_fs.h"
8
#include "xfs_shared.h"
9
#include "xfs_format.h"
10 11
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
15
#include "xfs_inode_item.h"
16
#include "xfs_bmap.h"
D
Dave Chinner 已提交
17
#include "xfs_bmap_util.h"
18
#include "xfs_dir2.h"
D
Dave Chinner 已提交
19
#include "xfs_dir2_priv.h"
20
#include "xfs_ioctl.h"
21
#include "xfs_trace.h"
22
#include "xfs_log.h"
23
#include "xfs_icache.h"
24
#include "xfs_pnfs.h"
25
#include "xfs_iomap.h"
26
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
27

28
#include <linux/falloc.h>
29
#include <linux/backing-dev.h>
30
#include <linux/mman.h>
31
#include <linux/fadvise.h>
L
Linus Torvalds 已提交
32

33
static const struct vm_operations_struct xfs_file_vm_ops;
L
Linus Torvalds 已提交
34

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
/*
 * Decide if the given file range is aligned to the size of the fundamental
 * allocation unit for the file.
 */
static bool
xfs_is_falloc_aligned(
	struct xfs_inode	*ip,
	loff_t			pos,
	long long int		len)
{
	struct xfs_mount	*mp = ip->i_mount;
	uint64_t		mask;

	if (XFS_IS_REALTIME_INODE(ip)) {
		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
			u64	rextbytes;
			u32	mod;

			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
			div_u64_rem(pos, rextbytes, &mod);
			if (mod)
				return false;
			div_u64_rem(len, rextbytes, &mod);
			return mod == 0;
		}
		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
	} else {
		mask = mp->m_sb.sb_blocksize - 1;
	}

	return !((pos | len) & mask);
}

68 69 70 71 72 73 74 75
int
xfs_update_prealloc_flags(
	struct xfs_inode	*ip,
	enum xfs_prealloc_flags	flags)
{
	struct xfs_trans	*tp;
	int			error;

76 77 78
	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
			0, 0, 0, &tp);
	if (error)
79 80 81 82 83 84
		return error;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);

	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
D
Dave Chinner 已提交
85 86 87
		VFS_I(ip)->i_mode &= ~S_ISUID;
		if (VFS_I(ip)->i_mode & S_IXGRP)
			VFS_I(ip)->i_mode &= ~S_ISGID;
88 89 90 91 92 93 94 95 96 97 98
		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	}

	if (flags & XFS_PREALLOC_SET)
		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
	if (flags & XFS_PREALLOC_CLEAR)
		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;

	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
	if (flags & XFS_PREALLOC_SYNC)
		xfs_trans_set_sync(tp);
99
	return xfs_trans_commit(tp);
100 101
}

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
/*
 * Fsync operations on directories are much simpler than on regular files,
 * as there is no file data to flush, and thus also no need for explicit
 * cache flush operations, and there are no non-transaction metadata updates
 * on directories either.
 */
STATIC int
xfs_dir_fsync(
	struct file		*file,
	loff_t			start,
	loff_t			end,
	int			datasync)
{
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);

	trace_xfs_dir_fsync(ip);
118
	return xfs_log_force_inode(ip);
119 120
}

C
Christoph Hellwig 已提交
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
static xfs_lsn_t
xfs_fsync_lsn(
	struct xfs_inode	*ip,
	bool			datasync)
{
	if (!xfs_ipincount(ip))
		return 0;
	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
		return 0;
	return ip->i_itemp->ili_last_lsn;
}

/*
 * All metadata updates are logged, which means that we just have to flush the
 * log up to the latest LSN that touched the inode.
 *
 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
 * the log force before we clear the ili_fsync_fields field. This ensures that
 * we don't get a racing sync operation that does not wait for the metadata to
 * hit the journal before returning.  If we race with clearing ili_fsync_fields,
 * then all that will happen is the log force will do nothing as the lsn will
 * already be on disk.  We can't race with setting ili_fsync_fields because that
 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
 * shared until after the ili_fsync_fields is cleared.
 */
static  int
xfs_fsync_flush_log(
	struct xfs_inode	*ip,
	bool			datasync,
	int			*log_flushed)
{
	int			error = 0;
	xfs_lsn_t		lsn;

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	lsn = xfs_fsync_lsn(ip, datasync);
	if (lsn) {
		error = xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC,
					  log_flushed);

		spin_lock(&ip->i_itemp->ili_lock);
		ip->i_itemp->ili_fsync_fields = 0;
		spin_unlock(&ip->i_itemp->ili_lock);
	}
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	return error;
}

169 170 171
STATIC int
xfs_file_fsync(
	struct file		*file,
172 173
	loff_t			start,
	loff_t			end,
174 175
	int			datasync)
{
C
Christoph Hellwig 已提交
176
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
177
	struct xfs_mount	*mp = ip->i_mount;
178 179 180
	int			error = 0;
	int			log_flushed = 0;

C
Christoph Hellwig 已提交
181
	trace_xfs_file_fsync(ip);
182

183
	error = file_write_and_wait_range(file, start, end);
184 185 186
	if (error)
		return error;

187
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
188
		return -EIO;
189 190 191

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

192 193 194 195 196 197 198 199 200 201
	/*
	 * If we have an RT and/or log subvolume we need to make sure to flush
	 * the write cache the device used for file data first.  This is to
	 * ensure newly written file data make it to disk before logging the new
	 * inode size in case of an extending write.
	 */
	if (XFS_IS_REALTIME_INODE(ip))
		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
	else if (mp->m_logdev_targp != mp->m_ddev_targp)
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
202

203 204 205 206 207 208 209 210
	/*
	 * Any inode that has dirty modifications in the log is pinned.  The
	 * racy check here for a pinned inode while not catch modifications
	 * that happen concurrently to the fsync call, but fsync semantics
	 * only require to sync previously completed I/O.
	 */
	if (xfs_ipincount(ip))
		error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
211

212 213 214 215 216 217 218
	/*
	 * If we only have a single device, and the log force about was
	 * a no-op we might have to flush the data device cache here.
	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
219 220
	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
	    mp->m_logdev_targp == mp->m_ddev_targp)
221
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
222

D
Dave Chinner 已提交
223
	return error;
224 225
}

226
STATIC ssize_t
227
xfs_file_dio_aio_read(
228
	struct kiocb		*iocb,
A
Al Viro 已提交
229
	struct iov_iter		*to)
230
{
C
Christoph Hellwig 已提交
231
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
232
	size_t			count = iov_iter_count(to);
C
Christoph Hellwig 已提交
233
	ssize_t			ret;
234

235
	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
236

237 238
	if (!count)
		return 0; /* skip atime */
239

240 241
	file_accessed(iocb->ki_filp);

242 243 244 245 246 247
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
			return -EAGAIN;
	} else {
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}
248 249
	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
			is_sync_kiocb(iocb));
250
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
251

252 253 254
	return ret;
}

255
static noinline ssize_t
256 257 258 259
xfs_file_dax_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
260
	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
261 262 263 264 265 266 267 268
	size_t			count = iov_iter_count(to);
	ssize_t			ret = 0;

	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);

	if (!count)
		return 0; /* skip atime */

C
Christoph Hellwig 已提交
269 270
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
G
Goldwyn Rodrigues 已提交
271
			return -EAGAIN;
C
Christoph Hellwig 已提交
272
	} else {
G
Goldwyn Rodrigues 已提交
273 274
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}
C
Christoph Hellwig 已提交
275

276
	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
277
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
278

279
	file_accessed(iocb->ki_filp);
280 281 282 283 284 285 286 287 288 289 290 291
	return ret;
}

STATIC ssize_t
xfs_file_buffered_aio_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;

	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
292

C
Christoph Hellwig 已提交
293 294
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
295
			return -EAGAIN;
C
Christoph Hellwig 已提交
296
	} else {
297 298
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}
A
Al Viro 已提交
299
	ret = generic_file_read_iter(iocb, to);
300
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
301 302 303 304 305 306 307 308 309

	return ret;
}

STATIC ssize_t
xfs_file_read_iter(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
310 311
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
312 313 314 315 316 317 318
	ssize_t			ret = 0;

	XFS_STATS_INC(mp, xs_read_calls);

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

319 320 321
	if (IS_DAX(inode))
		ret = xfs_file_dax_read(iocb, to);
	else if (iocb->ki_flags & IOCB_DIRECT)
322
		ret = xfs_file_dio_aio_read(iocb, to);
C
Christoph Hellwig 已提交
323
	else
324
		ret = xfs_file_buffered_aio_read(iocb, to);
325 326

	if (ret > 0)
327
		XFS_STATS_ADD(mp, xs_read_bytes, ret);
328 329 330
	return ret;
}

331 332 333
/*
 * Common pre-write limit and setup checks.
 *
334 335 336
 * Called with the iolocked held either shared and exclusive according to
 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 * if called for a direct write beyond i_size.
337 338 339
 */
STATIC ssize_t
xfs_file_aio_write_checks(
340 341
	struct kiocb		*iocb,
	struct iov_iter		*from,
342 343
	int			*iolock)
{
344
	struct file		*file = iocb->ki_filp;
345 346
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
347
	ssize_t			error = 0;
348
	size_t			count = iov_iter_count(from);
349
	bool			drained_dio = false;
C
Christoph Hellwig 已提交
350
	loff_t			isize;
351

352
restart:
353 354
	error = generic_write_checks(iocb, from);
	if (error <= 0)
355 356
		return error;

357
	error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
358 359 360
	if (error)
		return error;

361 362 363 364
	/*
	 * For changing security info in file_remove_privs() we need i_rwsem
	 * exclusively.
	 */
365
	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
366
		xfs_iunlock(ip, *iolock);
367
		*iolock = XFS_IOLOCK_EXCL;
368
		xfs_ilock(ip, *iolock);
369 370
		goto restart;
	}
371 372 373
	/*
	 * If the offset is beyond the size of the file, we need to zero any
	 * blocks that fall between the existing EOF and the start of this
374
	 * write.  If zeroing is needed and we are currently holding the
375 376
	 * iolock shared, we need to update it to exclusive which implies
	 * having to redo all checks before.
377 378 379 380 381 382 383 384
	 *
	 * We need to serialise against EOF updates that occur in IO
	 * completions here. We want to make sure that nobody is changing the
	 * size while we do this check until we have placed an IO barrier (i.e.
	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
	 * The spinlock effectively forms a memory barrier once we have the
	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
	 * and hence be able to correctly determine if we need to run zeroing.
385
	 */
386
	spin_lock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
387 388
	isize = i_size_read(inode);
	if (iocb->ki_pos > isize) {
389
		spin_unlock(&ip->i_flags_lock);
390 391
		if (!drained_dio) {
			if (*iolock == XFS_IOLOCK_SHARED) {
392
				xfs_iunlock(ip, *iolock);
393
				*iolock = XFS_IOLOCK_EXCL;
394
				xfs_ilock(ip, *iolock);
395 396
				iov_iter_reexpand(from, count);
			}
397 398 399 400 401 402 403 404 405
			/*
			 * We now have an IO submission barrier in place, but
			 * AIO can do EOF updates during IO completion and hence
			 * we now need to wait for all of them to drain. Non-AIO
			 * DIO will have drained before we are given the
			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
			 * no-op.
			 */
			inode_dio_wait(inode);
406
			drained_dio = true;
407 408
			goto restart;
		}
C
Christoph Hellwig 已提交
409 410 411
	
		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
		error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
412
				NULL, &xfs_buffered_write_iomap_ops);
413 414
		if (error)
			return error;
415 416
	} else
		spin_unlock(&ip->i_flags_lock);
417

A
Amir Goldstein 已提交
418
	return file_modified(file);
419 420
}

C
Christoph Hellwig 已提交
421 422 423 424
static int
xfs_dio_write_end_io(
	struct kiocb		*iocb,
	ssize_t			size,
425
	int			error,
C
Christoph Hellwig 已提交
426 427 428 429 430
	unsigned		flags)
{
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_inode	*ip = XFS_I(inode);
	loff_t			offset = iocb->ki_pos;
C
Christoph Hellwig 已提交
431
	unsigned int		nofs_flag;
C
Christoph Hellwig 已提交
432 433 434 435 436 437

	trace_xfs_end_io_direct_write(ip, offset, size);

	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;

438 439 440 441
	if (error)
		return error;
	if (!size)
		return 0;
C
Christoph Hellwig 已提交
442

443 444 445 446 447 448
	/*
	 * Capture amount written on completion as we can't reliably account
	 * for it on submission.
	 */
	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);

C
Christoph Hellwig 已提交
449 450 451 452 453 454 455
	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

456 457 458
	if (flags & IOMAP_DIO_COW) {
		error = xfs_reflink_end_cow(ip, offset, size);
		if (error)
C
Christoph Hellwig 已提交
459
			goto out;
460 461 462 463 464 465 466 467
	}

	/*
	 * Unwritten conversion updates the in-core isize after extent
	 * conversion but before updating the on-disk size. Updating isize any
	 * earlier allows a racing dio read to find unwritten extents before
	 * they are converted.
	 */
C
Christoph Hellwig 已提交
468 469 470 471
	if (flags & IOMAP_DIO_UNWRITTEN) {
		error = xfs_iomap_write_unwritten(ip, offset, size, true);
		goto out;
	}
472

C
Christoph Hellwig 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486
	/*
	 * We need to update the in-core inode size here so that we don't end up
	 * with the on-disk inode size being outside the in-core inode size. We
	 * have no other method of updating EOF for AIO, so always do it here
	 * if necessary.
	 *
	 * We need to lock the test/set EOF update as we can be racing with
	 * other IO completions here to update the EOF. Failing to serialise
	 * here can result in EOF moving backwards and Bad Things Happen when
	 * that occurs.
	 */
	spin_lock(&ip->i_flags_lock);
	if (offset + size > i_size_read(inode)) {
		i_size_write(inode, offset + size);
487
		spin_unlock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
488
		error = xfs_setfilesize(ip, offset, size);
489 490 491
	} else {
		spin_unlock(&ip->i_flags_lock);
	}
C
Christoph Hellwig 已提交
492

C
Christoph Hellwig 已提交
493 494
out:
	memalloc_nofs_restore(nofs_flag);
C
Christoph Hellwig 已提交
495 496 497
	return error;
}

498 499 500 501
static const struct iomap_dio_ops xfs_dio_write_ops = {
	.end_io		= xfs_dio_write_end_io,
};

502 503 504 505
/*
 * xfs_file_dio_aio_write - handle direct IO writes
 *
 * Lock the inode appropriately to prepare for and issue a direct IO write.
506
 * By separating it from the buffered write path we remove all the tricky to
507 508
 * follow locking changes and looping.
 *
509 510 511 512 513 514 515 516 517 518 519 520 521
 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 * pages are flushed out.
 *
 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 * allowing them to be done in parallel with reads and other direct IO writes.
 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 * needs to do sub-block zeroing and that requires serialisation against other
 * direct IOs to the same block. In this case we need to serialise the
 * submission of the unaligned IOs so that we don't get racing block zeroing in
 * the dio layer.  To avoid the problem with aio, we also need to wait for
 * outstanding IOs to complete so that unwritten extent conversion is completed
 * before we try to map the overlapping block. This is currently implemented by
C
Christoph Hellwig 已提交
522
 * hitting it with a big hammer (i.e. inode_dio_wait()).
523
 *
524 525 526 527 528 529
 * Returns with locks held indicated by @iolock and errors indicated by
 * negative return values.
 */
STATIC ssize_t
xfs_file_dio_aio_write(
	struct kiocb		*iocb,
530
	struct iov_iter		*from)
531 532 533 534 535 536 537
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	ssize_t			ret = 0;
538
	int			unaligned_io = 0;
539
	int			iolock;
540
	size_t			count = iov_iter_count(from);
541
	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
542

543
	/* DIO must be aligned to device logical sector size */
544
	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
E
Eric Sandeen 已提交
545
		return -EINVAL;
546

547
	/*
548 549 550 551 552
	 * Don't take the exclusive iolock here unless the I/O is unaligned to
	 * the file system block size.  We don't need to consider the EOF
	 * extension case here because xfs_file_aio_write_checks() will relock
	 * the inode as necessary for EOF zeroing cases and fill out the new
	 * inode size as appropriate.
553
	 */
554 555 556
	if ((iocb->ki_pos & mp->m_blockmask) ||
	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
		unaligned_io = 1;
557 558 559 560 561

		/*
		 * We can't properly handle unaligned direct I/O to reflink
		 * files yet, as we can't unshare a partial block.
		 */
562
		if (xfs_is_cow_inode(ip)) {
563
			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
564
			return -ENOTBLK;
565
		}
566
		iolock = XFS_IOLOCK_EXCL;
567
	} else {
568
		iolock = XFS_IOLOCK_SHARED;
569
	}
570

C
Christoph Hellwig 已提交
571
	if (iocb->ki_flags & IOCB_NOWAIT) {
572 573 574
		/* unaligned dio always waits, bail */
		if (unaligned_io)
			return -EAGAIN;
C
Christoph Hellwig 已提交
575
		if (!xfs_ilock_nowait(ip, iolock))
G
Goldwyn Rodrigues 已提交
576
			return -EAGAIN;
C
Christoph Hellwig 已提交
577
	} else {
G
Goldwyn Rodrigues 已提交
578 579
		xfs_ilock(ip, iolock);
	}
580

581
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
582
	if (ret)
583
		goto out;
584
	count = iov_iter_count(from);
585

586
	/*
587 588 589 590 591
	 * If we are doing unaligned IO, we can't allow any other overlapping IO
	 * in-flight at the same time or we risk data corruption. Wait for all
	 * other IO to drain before we submit. If the IO is aligned, demote the
	 * iolock if we had to take the exclusive lock in
	 * xfs_file_aio_write_checks() for other reasons.
592
	 */
G
Goldwyn Rodrigues 已提交
593
	if (unaligned_io) {
594
		inode_dio_wait(inode);
G
Goldwyn Rodrigues 已提交
595
	} else if (iolock == XFS_IOLOCK_EXCL) {
596
		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
597
		iolock = XFS_IOLOCK_SHARED;
598 599
	}

C
Christoph Hellwig 已提交
600
	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
601
	/*
602 603
	 * If unaligned, this is the only IO in-flight. Wait on it before we
	 * release the iolock to prevent subsequent overlapping IO.
604
	 */
605 606
	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
			   &xfs_dio_write_ops,
607
			   is_sync_kiocb(iocb) || unaligned_io);
608
out:
609
	xfs_iunlock(ip, iolock);
610

611
	/*
612 613
	 * No fallback to buffered IO after short writes for XFS, direct I/O
	 * will either complete fully or return an error.
614
	 */
615 616 617 618
	ASSERT(ret < 0 || ret == count);
	return ret;
}

619
static noinline ssize_t
620 621 622 623
xfs_file_dax_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
624
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
625
	struct xfs_inode	*ip = XFS_I(inode);
626
	int			iolock = XFS_IOLOCK_EXCL;
627 628 629
	ssize_t			ret, error = 0;
	size_t			count;
	loff_t			pos;
630

C
Christoph Hellwig 已提交
631 632
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, iolock))
G
Goldwyn Rodrigues 已提交
633
			return -EAGAIN;
C
Christoph Hellwig 已提交
634
	} else {
G
Goldwyn Rodrigues 已提交
635 636 637
		xfs_ilock(ip, iolock);
	}

638 639 640 641
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
	if (ret)
		goto out;

642 643
	pos = iocb->ki_pos;
	count = iov_iter_count(from);
644

645
	trace_xfs_file_dax_write(ip, count, pos);
646
	ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
647 648 649
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
		i_size_write(inode, iocb->ki_pos);
		error = xfs_setfilesize(ip, pos, ret);
650 651
	}
out:
652
	xfs_iunlock(ip, iolock);
653 654 655 656 657 658 659 660 661 662
	if (error)
		return error;

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);

		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
	return ret;
663 664
}

665
STATIC ssize_t
666
xfs_file_buffered_aio_write(
667
	struct kiocb		*iocb,
668
	struct iov_iter		*from)
669 670 671 672
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
673
	struct xfs_inode	*ip = XFS_I(inode);
674
	ssize_t			ret;
675
	bool			cleared_space = false;
676
	int			iolock;
677

678 679 680
	if (iocb->ki_flags & IOCB_NOWAIT)
		return -EOPNOTSUPP;

681 682
write_retry:
	iolock = XFS_IOLOCK_EXCL;
683
	xfs_ilock(ip, iolock);
684

685
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
686
	if (ret)
687
		goto out;
688 689

	/* We can write back this queue in page reclaim */
690
	current->backing_dev_info = inode_to_bdi(inode);
691

C
Christoph Hellwig 已提交
692
	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
693 694
	ret = iomap_file_buffered_write(iocb, from,
			&xfs_buffered_write_iomap_ops);
695
	if (likely(ret >= 0))
696
		iocb->ki_pos += ret;
697

698
	/*
699 700 701 702 703 704
	 * If we hit a space limit, try to free up some lingering preallocated
	 * space before returning an error. In the case of ENOSPC, first try to
	 * write back all dirty inodes to free up some of the excess reserved
	 * metadata space. This reduces the chances that the eofblocks scan
	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
	 * also behaves as a filter to prevent too many eofblocks scans from
705 706
	 * running at the same time.  Use a synchronous scan to increase the
	 * effectiveness of the scan.
707
	 */
708
	if (ret == -EDQUOT && !cleared_space) {
709
		xfs_iunlock(ip, iolock);
710 711 712
		xfs_blockgc_free_quota(ip, XFS_EOF_FLAGS_SYNC);
		cleared_space = true;
		goto write_retry;
713
	} else if (ret == -ENOSPC && !cleared_space) {
714 715
		struct xfs_eofblocks eofb = {0};

716
		cleared_space = true;
D
Dave Chinner 已提交
717
		xfs_flush_inodes(ip->i_mount);
718 719

		xfs_iunlock(ip, iolock);
720 721
		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
722
		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
D
Dave Chinner 已提交
723
		goto write_retry;
724
	}
725

726
	current->backing_dev_info = NULL;
727
out:
728 729
	if (iolock)
		xfs_iunlock(ip, iolock);
730 731 732 733 734 735

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
736 737 738 739
	return ret;
}

STATIC ssize_t
A
Al Viro 已提交
740
xfs_file_write_iter(
741
	struct kiocb		*iocb,
A
Al Viro 已提交
742
	struct iov_iter		*from)
743 744 745 746 747 748
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	ssize_t			ret;
A
Al Viro 已提交
749
	size_t			ocount = iov_iter_count(from);
750

751
	XFS_STATS_INC(ip->i_mount, xs_write_calls);
752 753 754 755

	if (ocount == 0)
		return 0;

A
Al Viro 已提交
756 757
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;
758

759
	if (IS_DAX(inode))
760 761 762
		return xfs_file_dax_write(iocb, from);

	if (iocb->ki_flags & IOCB_DIRECT) {
763 764 765 766 767 768
		/*
		 * Allow a directio write to fall back to a buffered
		 * write *only* in the case that we're doing a reflink
		 * CoW.  In all other directio scenarios we do not
		 * allow an operation to fall back to buffered mode.
		 */
A
Al Viro 已提交
769
		ret = xfs_file_dio_aio_write(iocb, from);
770
		if (ret != -ENOTBLK)
771
			return ret;
772
	}
773

774
	return xfs_file_buffered_aio_write(iocb, from);
775 776
}

777 778
static void
xfs_wait_dax_page(
779
	struct inode		*inode)
780 781 782 783 784 785 786 787 788 789 790
{
	struct xfs_inode        *ip = XFS_I(inode);

	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
	schedule();
	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
}

static int
xfs_break_dax_layouts(
	struct inode		*inode,
791
	bool			*retry)
792 793 794 795 796 797 798 799 800
{
	struct page		*page;

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));

	page = dax_layout_busy_page(inode->i_mapping);
	if (!page)
		return 0;

801
	*retry = true;
802 803
	return ___wait_var_event(&page->_refcount,
			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
804
			0, 0, xfs_wait_dax_page(inode));
805 806
}

807 808 809 810 811 812 813
int
xfs_break_layouts(
	struct inode		*inode,
	uint			*iolock,
	enum layout_break_reason reason)
{
	bool			retry;
814
	int			error;
815 816 817

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));

818 819 820 821
	do {
		retry = false;
		switch (reason) {
		case BREAK_UNMAP:
822
			error = xfs_break_dax_layouts(inode, &retry);
823 824 825 826 827 828 829 830 831 832 833 834 835
			if (error || retry)
				break;
			/* fall through */
		case BREAK_WRITE:
			error = xfs_break_leased_layouts(inode, iolock, &retry);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EINVAL;
		}
	} while (error == 0 && retry);

	return error;
836 837
}

838 839 840
#define	XFS_FALLOC_FL_SUPPORTED						\
		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
841
		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
842

843 844
STATIC long
xfs_file_fallocate(
845 846 847 848
	struct file		*file,
	int			mode,
	loff_t			offset,
	loff_t			len)
849
{
850 851 852
	struct inode		*inode = file_inode(file);
	struct xfs_inode	*ip = XFS_I(inode);
	long			error;
853
	enum xfs_prealloc_flags	flags = 0;
854
	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
855
	loff_t			new_size = 0;
856
	bool			do_file_insert = false;
857

858 859
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;
860
	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
861 862
		return -EOPNOTSUPP;

863
	xfs_ilock(ip, iolock);
864
	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
865 866 867
	if (error)
		goto out_unlock;

868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
	/*
	 * Must wait for all AIO to complete before we continue as AIO can
	 * change the file size on completion without holding any locks we
	 * currently hold. We must do this first because AIO can update both
	 * the on disk and in memory inode sizes, and the operations that follow
	 * require the in-memory size to be fully up-to-date.
	 */
	inode_dio_wait(inode);

	/*
	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
	 * the cached range over the first operation we are about to run.
	 *
	 * We care about zero and collapse here because they both run a hole
	 * punch over the range first. Because that can zero data, and the range
	 * of invalidation for the shift operations is much larger, we still do
	 * the required flush for collapse in xfs_prepare_shift().
	 *
	 * Insert has the same range requirements as collapse, and we extend the
	 * file first which can zero data. Hence insert has the same
	 * flush/invalidate requirements as collapse and so they are both
	 * handled at the right time by xfs_prepare_shift().
	 */
	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
		    FALLOC_FL_COLLAPSE_RANGE)) {
		error = xfs_flush_unmap_range(ip, offset, len);
		if (error)
			goto out_unlock;
	}

898 899 900 901
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		error = xfs_free_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
902
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
903
		if (!xfs_is_falloc_aligned(ip, offset, len)) {
D
Dave Chinner 已提交
904
			error = -EINVAL;
905 906 907
			goto out_unlock;
		}

908 909 910 911 912
		/*
		 * There is no need to overlap collapse range with EOF,
		 * in which case it is effectively a truncate operation
		 */
		if (offset + len >= i_size_read(inode)) {
D
Dave Chinner 已提交
913
			error = -EINVAL;
914 915 916
			goto out_unlock;
		}

917 918 919 920 921
		new_size = i_size_read(inode) - len;

		error = xfs_collapse_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
922
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
923
		loff_t		isize = i_size_read(inode);
924

925
		if (!xfs_is_falloc_aligned(ip, offset, len)) {
926 927 928 929
			error = -EINVAL;
			goto out_unlock;
		}

930 931 932 933 934
		/*
		 * New inode size must not exceed ->s_maxbytes, accounting for
		 * possible signed overflow.
		 */
		if (inode->i_sb->s_maxbytes - isize < len) {
935 936 937
			error = -EFBIG;
			goto out_unlock;
		}
938
		new_size = isize + len;
939 940

		/* Offset should be less than i_size */
941
		if (offset >= isize) {
942 943 944
			error = -EINVAL;
			goto out_unlock;
		}
945
		do_file_insert = true;
946
	} else {
947 948
		flags |= XFS_PREALLOC_SET;

949 950 951
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
		    offset + len > i_size_read(inode)) {
			new_size = offset + len;
D
Dave Chinner 已提交
952
			error = inode_newsize_ok(inode, new_size);
953 954 955
			if (error)
				goto out_unlock;
		}
956

957
		if (mode & FALLOC_FL_ZERO_RANGE) {
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
			/*
			 * Punch a hole and prealloc the range.  We use a hole
			 * punch rather than unwritten extent conversion for two
			 * reasons:
			 *
			 *   1.) Hole punch handles partial block zeroing for us.
			 *   2.) If prealloc returns ENOSPC, the file range is
			 *       still zero-valued by virtue of the hole punch.
			 */
			unsigned int blksize = i_blocksize(inode);

			trace_xfs_zero_file_space(ip);

			error = xfs_free_file_space(ip, offset, len);
			if (error)
				goto out_unlock;

			len = round_up(offset + len, blksize) -
			      round_down(offset, blksize);
			offset = round_down(offset, blksize);
978 979 980 981 982 983 984 985 986 987 988 989 990
		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
			error = xfs_reflink_unshare(ip, offset, len);
			if (error)
				goto out_unlock;
		} else {
			/*
			 * If always_cow mode we can't use preallocations and
			 * thus should not create them.
			 */
			if (xfs_is_always_cow_inode(ip)) {
				error = -EOPNOTSUPP;
				goto out_unlock;
			}
991
		}
992

993
		if (!xfs_is_always_cow_inode(ip)) {
994 995
			error = xfs_alloc_file_space(ip, offset, len,
						     XFS_BMAPI_PREALLOC);
996 997
			if (error)
				goto out_unlock;
998
		}
999 1000
	}

1001
	if (file->f_flags & O_DSYNC)
1002 1003 1004
		flags |= XFS_PREALLOC_SYNC;

	error = xfs_update_prealloc_flags(ip, flags);
1005 1006 1007 1008 1009 1010 1011 1012 1013
	if (error)
		goto out_unlock;

	/* Change file size if needed */
	if (new_size) {
		struct iattr iattr;

		iattr.ia_valid = ATTR_SIZE;
		iattr.ia_size = new_size;
1014
		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
1015 1016
		if (error)
			goto out_unlock;
1017 1018
	}

1019 1020 1021 1022 1023 1024 1025 1026 1027
	/*
	 * Perform hole insertion now that the file size has been
	 * updated so that if we crash during the operation we don't
	 * leave shifted extents past EOF and hence losing access to
	 * the data that is contained within them.
	 */
	if (do_file_insert)
		error = xfs_insert_file_space(ip, offset, len);

1028
out_unlock:
1029
	xfs_iunlock(ip, iolock);
D
Dave Chinner 已提交
1030
	return error;
1031 1032
}

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
STATIC int
xfs_file_fadvise(
	struct file	*file,
	loff_t		start,
	loff_t		end,
	int		advice)
{
	struct xfs_inode *ip = XFS_I(file_inode(file));
	int ret;
	int lockflags = 0;

	/*
	 * Operations creating pages in page cache need protection from hole
	 * punching and similar ops
	 */
	if (advice == POSIX_FADV_WILLNEED) {
		lockflags = XFS_IOLOCK_SHARED;
		xfs_ilock(ip, lockflags);
	}
	ret = generic_fadvise(file, start, end, advice);
	if (lockflags)
		xfs_iunlock(ip, lockflags);
	return ret;
}
1057

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
/* Does this file, inode, or mount want synchronous writes? */
static inline bool xfs_file_sync_writes(struct file *filp)
{
	struct xfs_inode	*ip = XFS_I(file_inode(filp));

	if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC)
		return true;
	if (filp->f_flags & (__O_SYNC | O_DSYNC))
		return true;
	if (IS_SYNC(file_inode(filp)))
		return true;

	return false;
}

1073
STATIC loff_t
1074
xfs_file_remap_range(
1075 1076 1077 1078 1079 1080
	struct file		*file_in,
	loff_t			pos_in,
	struct file		*file_out,
	loff_t			pos_out,
	loff_t			len,
	unsigned int		remap_flags)
1081
{
1082 1083 1084 1085 1086 1087 1088 1089 1090
	struct inode		*inode_in = file_inode(file_in);
	struct xfs_inode	*src = XFS_I(inode_in);
	struct inode		*inode_out = file_inode(file_out);
	struct xfs_inode	*dest = XFS_I(inode_out);
	struct xfs_mount	*mp = src->i_mount;
	loff_t			remapped = 0;
	xfs_extlen_t		cowextsize;
	int			ret;

1091 1092
	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
		return -EINVAL;
1093

1094 1095 1096 1097 1098 1099 1100 1101 1102
	if (!xfs_sb_version_hasreflink(&mp->m_sb))
		return -EOPNOTSUPP;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	/* Prepare and then clone file data. */
	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
			&len, remap_flags);
1103
	if (ret || len == 0)
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
		return ret;

	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);

	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
			&remapped);
	if (ret)
		goto out_unlock;

	/*
	 * Carry the cowextsize hint from src to dest if we're sharing the
	 * entire source file to the entire destination file, the source file
	 * has a cowextsize hint, and the destination file does not.
	 */
	cowextsize = 0;
	if (pos_in == 0 && len == i_size_read(inode_in) &&
	    (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
	    pos_out == 0 && len >= i_size_read(inode_out) &&
	    !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
		cowextsize = src->i_d.di_cowextsize;

	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
			remap_flags);
1127 1128
	if (ret)
		goto out_unlock;
1129

1130
	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1131
		xfs_log_force_inode(dest);
1132
out_unlock:
1133
	xfs_iunlock2_io_mmap(src, dest);
1134 1135 1136
	if (ret)
		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
	return remapped > 0 ? remapped : ret;
1137
}
1138

L
Linus Torvalds 已提交
1139
STATIC int
1140
xfs_file_open(
L
Linus Torvalds 已提交
1141
	struct inode	*inode,
1142
	struct file	*file)
L
Linus Torvalds 已提交
1143
{
1144
	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
L
Linus Torvalds 已提交
1145
		return -EFBIG;
1146 1147
	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
		return -EIO;
1148
	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	return 0;
}

STATIC int
xfs_dir_open(
	struct inode	*inode,
	struct file	*file)
{
	struct xfs_inode *ip = XFS_I(inode);
	int		mode;
	int		error;

	error = xfs_file_open(inode, file);
	if (error)
		return error;

	/*
	 * If there are any blocks, read-ahead block 0 as we're almost
	 * certain to have the next operation be a read there.
	 */
1169
	mode = xfs_ilock_data_map_shared(ip);
1170
	if (ip->i_df.if_nextents > 0)
1171
		error = xfs_dir3_data_readahead(ip, 0, 0);
1172
	xfs_iunlock(ip, mode);
1173
	return error;
L
Linus Torvalds 已提交
1174 1175 1176
}

STATIC int
1177
xfs_file_release(
L
Linus Torvalds 已提交
1178 1179 1180
	struct inode	*inode,
	struct file	*filp)
{
D
Dave Chinner 已提交
1181
	return xfs_release(XFS_I(inode));
L
Linus Torvalds 已提交
1182 1183 1184
}

STATIC int
1185
xfs_file_readdir(
A
Al Viro 已提交
1186 1187
	struct file	*file,
	struct dir_context *ctx)
L
Linus Torvalds 已提交
1188
{
A
Al Viro 已提交
1189
	struct inode	*inode = file_inode(file);
1190
	xfs_inode_t	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
	size_t		bufsize;

	/*
	 * The Linux API doesn't pass down the total size of the buffer
	 * we read into down to the filesystem.  With the filldir concept
	 * it's not needed for correct information, but the XFS dir2 leaf
	 * code wants an estimate of the buffer size to calculate it's
	 * readahead window and size the buffers used for mapping to
	 * physical blocks.
	 *
	 * Try to give it an estimate that's good enough, maybe at some
	 * point we can change the ->readdir prototype to include the
E
Eric Sandeen 已提交
1203
	 * buffer size.  For now we use the current glibc buffer size.
C
Christoph Hellwig 已提交
1204
	 */
D
Darrick J. Wong 已提交
1205
	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
C
Christoph Hellwig 已提交
1206

1207
	return xfs_readdir(NULL, ip, ctx, bufsize);
1208 1209 1210 1211 1212 1213
}

STATIC loff_t
xfs_file_llseek(
	struct file	*file,
	loff_t		offset,
1214
	int		whence)
1215
{
1216 1217 1218 1219 1220
	struct inode		*inode = file->f_mapping->host;

	if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
		return -EIO;

1221
	switch (whence) {
1222
	default:
1223
		return generic_file_llseek(file, offset, whence);
1224
	case SEEK_HOLE:
1225
		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1226
		break;
1227
	case SEEK_DATA:
1228
		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1229
		break;
1230
	}
1231 1232 1233 1234

	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1235 1236
}

1237 1238 1239 1240
/*
 * Locking for serialisation of IO during page faults. This results in a lock
 * ordering of:
 *
1241
 * mmap_lock (MM)
1242
 *   sb_start_pagefault(vfs, freeze)
1243
 *     i_mmaplock (XFS - truncate serialisation)
1244 1245
 *       page_lock (MM)
 *         i_lock (XFS - extent map serialisation)
1246
 */
1247
static vm_fault_t
1248 1249 1250 1251
__xfs_filemap_fault(
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size,
	bool			write_fault)
1252
{
1253
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1254
	struct xfs_inode	*ip = XFS_I(inode);
1255
	vm_fault_t		ret;
1256

1257
	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1258

1259 1260 1261 1262
	if (write_fault) {
		sb_start_pagefault(inode->i_sb);
		file_update_time(vmf->vma->vm_file);
	}
1263

1264
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1265
	if (IS_DAX(inode)) {
1266 1267
		pfn_t pfn;

1268 1269
		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
				(write_fault && !vmf->cow_page) ?
1270 1271
				 &xfs_direct_write_iomap_ops :
				 &xfs_read_iomap_ops);
1272 1273
		if (ret & VM_FAULT_NEEDDSYNC)
			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1274
	} else {
1275
		if (write_fault)
1276 1277
			ret = iomap_page_mkwrite(vmf,
					&xfs_buffered_write_iomap_ops);
1278 1279
		else
			ret = filemap_fault(vmf);
1280 1281 1282
	}
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);

1283 1284
	if (write_fault)
		sb_end_pagefault(inode->i_sb);
1285
	return ret;
1286 1287
}

1288 1289 1290 1291 1292 1293 1294 1295
static inline bool
xfs_is_write_fault(
	struct vm_fault		*vmf)
{
	return (vmf->flags & FAULT_FLAG_WRITE) &&
	       (vmf->vma->vm_flags & VM_SHARED);
}

1296
static vm_fault_t
1297
xfs_filemap_fault(
1298 1299
	struct vm_fault		*vmf)
{
1300
	/* DAX can shortcut the normal fault path on write faults! */
1301 1302
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1303
			xfs_is_write_fault(vmf));
1304 1305
}

1306
static vm_fault_t
1307
xfs_filemap_huge_fault(
1308 1309
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size)
M
Matthew Wilcox 已提交
1310
{
1311
	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
M
Matthew Wilcox 已提交
1312 1313
		return VM_FAULT_FALLBACK;

1314 1315
	/* DAX can shortcut the normal fault path on write faults! */
	return __xfs_filemap_fault(vmf, pe_size,
1316
			xfs_is_write_fault(vmf));
1317
}
M
Matthew Wilcox 已提交
1318

1319
static vm_fault_t
1320 1321 1322 1323
xfs_filemap_page_mkwrite(
	struct vm_fault		*vmf)
{
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1324 1325
}

1326
/*
1327 1328 1329
 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
 * on write faults. In reality, it needs to serialise against truncate and
 * prepare memory for writing so handle is as standard write fault.
1330
 */
1331
static vm_fault_t
1332 1333 1334 1335
xfs_filemap_pfn_mkwrite(
	struct vm_fault		*vmf)
{

1336
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1337 1338
}

1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
static void
xfs_filemap_map_pages(
	struct vm_fault		*vmf,
	pgoff_t			start_pgoff,
	pgoff_t			end_pgoff)
{
	struct inode		*inode = file_inode(vmf->vma->vm_file);

	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
	filemap_map_pages(vmf, start_pgoff, end_pgoff);
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
}

1352 1353
static const struct vm_operations_struct xfs_file_vm_ops = {
	.fault		= xfs_filemap_fault,
1354
	.huge_fault	= xfs_filemap_huge_fault,
1355
	.map_pages	= xfs_filemap_map_pages,
1356
	.page_mkwrite	= xfs_filemap_page_mkwrite,
1357
	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1358 1359 1360 1361
};

STATIC int
xfs_file_mmap(
1362 1363
	struct file		*file,
	struct vm_area_struct	*vma)
1364
{
1365 1366
	struct inode		*inode = file_inode(file);
	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1367

1368
	/*
1369 1370
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
1371
	 */
1372
	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1373 1374
		return -EOPNOTSUPP;

1375
	file_accessed(file);
1376
	vma->vm_ops = &xfs_file_vm_ops;
1377
	if (IS_DAX(inode))
1378
		vma->vm_flags |= VM_HUGEPAGE;
1379
	return 0;
1380 1381
}

1382
const struct file_operations xfs_file_operations = {
1383
	.llseek		= xfs_file_llseek,
A
Al Viro 已提交
1384
	.read_iter	= xfs_file_read_iter,
A
Al Viro 已提交
1385
	.write_iter	= xfs_file_write_iter,
1386
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
1387
	.splice_write	= iter_file_splice_write,
1388
	.iopoll		= iomap_dio_iopoll,
1389
	.unlocked_ioctl	= xfs_file_ioctl,
L
Linus Torvalds 已提交
1390
#ifdef CONFIG_COMPAT
1391
	.compat_ioctl	= xfs_file_compat_ioctl,
L
Linus Torvalds 已提交
1392
#endif
1393
	.mmap		= xfs_file_mmap,
1394
	.mmap_supported_flags = MAP_SYNC,
1395 1396 1397
	.open		= xfs_file_open,
	.release	= xfs_file_release,
	.fsync		= xfs_file_fsync,
1398
	.get_unmapped_area = thp_get_unmapped_area,
1399
	.fallocate	= xfs_file_fallocate,
1400
	.fadvise	= xfs_file_fadvise,
1401
	.remap_file_range = xfs_file_remap_range,
L
Linus Torvalds 已提交
1402 1403
};

1404
const struct file_operations xfs_dir_file_operations = {
1405
	.open		= xfs_dir_open,
L
Linus Torvalds 已提交
1406
	.read		= generic_read_dir,
1407
	.iterate_shared	= xfs_file_readdir,
1408
	.llseek		= generic_file_llseek,
1409
	.unlocked_ioctl	= xfs_file_ioctl,
1410
#ifdef CONFIG_COMPAT
1411
	.compat_ioctl	= xfs_file_compat_ioctl,
1412
#endif
1413
	.fsync		= xfs_dir_fsync,
L
Linus Torvalds 已提交
1414
};