xfs_file.c 32.6 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3 4
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
5 6
 */
#include "xfs.h"
7
#include "xfs_fs.h"
8
#include "xfs_shared.h"
9
#include "xfs_format.h"
10 11
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
15
#include "xfs_inode_item.h"
16
#include "xfs_bmap.h"
D
Dave Chinner 已提交
17
#include "xfs_bmap_util.h"
18
#include "xfs_dir2.h"
D
Dave Chinner 已提交
19
#include "xfs_dir2_priv.h"
20
#include "xfs_ioctl.h"
21
#include "xfs_trace.h"
22
#include "xfs_log.h"
23
#include "xfs_icache.h"
24
#include "xfs_pnfs.h"
25
#include "xfs_iomap.h"
26
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
27

28
#include <linux/falloc.h>
29
#include <linux/backing-dev.h>
30
#include <linux/mman.h>
31
#include <linux/fadvise.h>
L
Linus Torvalds 已提交
32

33
static const struct vm_operations_struct xfs_file_vm_ops;
L
Linus Torvalds 已提交
34

35 36 37 38 39 40 41 42
int
xfs_update_prealloc_flags(
	struct xfs_inode	*ip,
	enum xfs_prealloc_flags	flags)
{
	struct xfs_trans	*tp;
	int			error;

43 44 45
	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
			0, 0, 0, &tp);
	if (error)
46 47 48 49 50 51
		return error;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);

	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
D
Dave Chinner 已提交
52 53 54
		VFS_I(ip)->i_mode &= ~S_ISUID;
		if (VFS_I(ip)->i_mode & S_IXGRP)
			VFS_I(ip)->i_mode &= ~S_ISGID;
55 56 57 58 59 60 61 62 63 64 65
		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	}

	if (flags & XFS_PREALLOC_SET)
		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
	if (flags & XFS_PREALLOC_CLEAR)
		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;

	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
	if (flags & XFS_PREALLOC_SYNC)
		xfs_trans_set_sync(tp);
66
	return xfs_trans_commit(tp);
67 68
}

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/*
 * Fsync operations on directories are much simpler than on regular files,
 * as there is no file data to flush, and thus also no need for explicit
 * cache flush operations, and there are no non-transaction metadata updates
 * on directories either.
 */
STATIC int
xfs_dir_fsync(
	struct file		*file,
	loff_t			start,
	loff_t			end,
	int			datasync)
{
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_lsn_t		lsn = 0;

	trace_xfs_dir_fsync(ip);

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	if (xfs_ipincount(ip))
		lsn = ip->i_itemp->ili_last_lsn;
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!lsn)
		return 0;
95
	return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
96 97
}

98 99 100
STATIC int
xfs_file_fsync(
	struct file		*file,
101 102
	loff_t			start,
	loff_t			end,
103 104
	int			datasync)
{
105 106
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
107
	struct xfs_mount	*mp = ip->i_mount;
108 109
	int			error = 0;
	int			log_flushed = 0;
110
	xfs_lsn_t		lsn = 0;
111

C
Christoph Hellwig 已提交
112
	trace_xfs_file_fsync(ip);
113

114
	error = file_write_and_wait_range(file, start, end);
115 116 117
	if (error)
		return error;

118
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
119
		return -EIO;
120 121 122

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

123 124 125 126 127 128 129 130 131 132
	/*
	 * If we have an RT and/or log subvolume we need to make sure to flush
	 * the write cache the device used for file data first.  This is to
	 * ensure newly written file data make it to disk before logging the new
	 * inode size in case of an extending write.
	 */
	if (XFS_IS_REALTIME_INODE(ip))
		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
	else if (mp->m_logdev_targp != mp->m_ddev_targp)
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
133

134
	/*
135 136 137 138 139 140 141 142 143 144 145
	 * All metadata updates are logged, which means that we just have to
	 * flush the log up to the latest LSN that touched the inode. If we have
	 * concurrent fsync/fdatasync() calls, we need them to all block on the
	 * log force before we clear the ili_fsync_fields field. This ensures
	 * that we don't get a racing sync operation that does not wait for the
	 * metadata to hit the journal before returning. If we race with
	 * clearing the ili_fsync_fields, then all that will happen is the log
	 * force will do nothing as the lsn will already be on disk. We can't
	 * race with setting ili_fsync_fields because that is done under
	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
	 * until after the ili_fsync_fields is cleared.
146 147
	 */
	xfs_ilock(ip, XFS_ILOCK_SHARED);
148 149
	if (xfs_ipincount(ip)) {
		if (!datasync ||
150
		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
151 152
			lsn = ip->i_itemp->ili_last_lsn;
	}
153

154
	if (lsn) {
155
		error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
156 157 158
		ip->i_itemp->ili_fsync_fields = 0;
	}
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
159

160 161 162 163 164 165 166
	/*
	 * If we only have a single device, and the log force about was
	 * a no-op we might have to flush the data device cache here.
	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
167 168
	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
	    mp->m_logdev_targp == mp->m_ddev_targp)
169
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
170

D
Dave Chinner 已提交
171
	return error;
172 173
}

174
STATIC ssize_t
175
xfs_file_dio_aio_read(
176
	struct kiocb		*iocb,
A
Al Viro 已提交
177
	struct iov_iter		*to)
178
{
C
Christoph Hellwig 已提交
179
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
180
	size_t			count = iov_iter_count(to);
C
Christoph Hellwig 已提交
181
	ssize_t			ret;
182

183
	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
184

185 186
	if (!count)
		return 0; /* skip atime */
187

188 189
	file_accessed(iocb->ki_filp);

190
	xfs_ilock(ip, XFS_IOLOCK_SHARED);
191 192
	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
			is_sync_kiocb(iocb));
193
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
194

195 196 197
	return ret;
}

198
static noinline ssize_t
199 200 201 202
xfs_file_dax_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
203
	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
204 205 206 207 208 209 210 211
	size_t			count = iov_iter_count(to);
	ssize_t			ret = 0;

	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);

	if (!count)
		return 0; /* skip atime */

C
Christoph Hellwig 已提交
212 213
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
G
Goldwyn Rodrigues 已提交
214
			return -EAGAIN;
C
Christoph Hellwig 已提交
215
	} else {
G
Goldwyn Rodrigues 已提交
216 217
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}
C
Christoph Hellwig 已提交
218

219
	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
220
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
221

222
	file_accessed(iocb->ki_filp);
223 224 225 226 227 228 229 230 231 232 233 234
	return ret;
}

STATIC ssize_t
xfs_file_buffered_aio_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;

	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
235

C
Christoph Hellwig 已提交
236 237
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
238
			return -EAGAIN;
C
Christoph Hellwig 已提交
239
	} else {
240 241
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}
A
Al Viro 已提交
242
	ret = generic_file_read_iter(iocb, to);
243
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
244 245 246 247 248 249 250 251 252

	return ret;
}

STATIC ssize_t
xfs_file_read_iter(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
253 254
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
255 256 257 258 259 260 261
	ssize_t			ret = 0;

	XFS_STATS_INC(mp, xs_read_calls);

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

262 263 264
	if (IS_DAX(inode))
		ret = xfs_file_dax_read(iocb, to);
	else if (iocb->ki_flags & IOCB_DIRECT)
265
		ret = xfs_file_dio_aio_read(iocb, to);
C
Christoph Hellwig 已提交
266
	else
267
		ret = xfs_file_buffered_aio_read(iocb, to);
268 269

	if (ret > 0)
270
		XFS_STATS_ADD(mp, xs_read_bytes, ret);
271 272 273
	return ret;
}

274 275 276
/*
 * Common pre-write limit and setup checks.
 *
277 278 279
 * Called with the iolocked held either shared and exclusive according to
 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 * if called for a direct write beyond i_size.
280 281 282
 */
STATIC ssize_t
xfs_file_aio_write_checks(
283 284
	struct kiocb		*iocb,
	struct iov_iter		*from,
285 286
	int			*iolock)
{
287
	struct file		*file = iocb->ki_filp;
288 289
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
290
	ssize_t			error = 0;
291
	size_t			count = iov_iter_count(from);
292
	bool			drained_dio = false;
C
Christoph Hellwig 已提交
293
	loff_t			isize;
294

295
restart:
296 297
	error = generic_write_checks(iocb, from);
	if (error <= 0)
298 299
		return error;

300
	error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
301 302 303
	if (error)
		return error;

304 305 306 307
	/*
	 * For changing security info in file_remove_privs() we need i_rwsem
	 * exclusively.
	 */
308
	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
309
		xfs_iunlock(ip, *iolock);
310
		*iolock = XFS_IOLOCK_EXCL;
311
		xfs_ilock(ip, *iolock);
312 313
		goto restart;
	}
314 315 316
	/*
	 * If the offset is beyond the size of the file, we need to zero any
	 * blocks that fall between the existing EOF and the start of this
317
	 * write.  If zeroing is needed and we are currently holding the
318 319
	 * iolock shared, we need to update it to exclusive which implies
	 * having to redo all checks before.
320 321 322 323 324 325 326 327
	 *
	 * We need to serialise against EOF updates that occur in IO
	 * completions here. We want to make sure that nobody is changing the
	 * size while we do this check until we have placed an IO barrier (i.e.
	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
	 * The spinlock effectively forms a memory barrier once we have the
	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
	 * and hence be able to correctly determine if we need to run zeroing.
328
	 */
329
	spin_lock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
330 331
	isize = i_size_read(inode);
	if (iocb->ki_pos > isize) {
332
		spin_unlock(&ip->i_flags_lock);
333 334
		if (!drained_dio) {
			if (*iolock == XFS_IOLOCK_SHARED) {
335
				xfs_iunlock(ip, *iolock);
336
				*iolock = XFS_IOLOCK_EXCL;
337
				xfs_ilock(ip, *iolock);
338 339
				iov_iter_reexpand(from, count);
			}
340 341 342 343 344 345 346 347 348
			/*
			 * We now have an IO submission barrier in place, but
			 * AIO can do EOF updates during IO completion and hence
			 * we now need to wait for all of them to drain. Non-AIO
			 * DIO will have drained before we are given the
			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
			 * no-op.
			 */
			inode_dio_wait(inode);
349
			drained_dio = true;
350 351
			goto restart;
		}
C
Christoph Hellwig 已提交
352 353 354
	
		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
		error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
355
				NULL, &xfs_buffered_write_iomap_ops);
356 357
		if (error)
			return error;
358 359
	} else
		spin_unlock(&ip->i_flags_lock);
360

C
Christoph Hellwig 已提交
361 362 363 364 365 366
	/*
	 * Updating the timestamps will grab the ilock again from
	 * xfs_fs_dirty_inode, so we have to call it after dropping the
	 * lock above.  Eventually we should look into a way to avoid
	 * the pointless lock roundtrip.
	 */
A
Amir Goldstein 已提交
367
	return file_modified(file);
368 369
}

C
Christoph Hellwig 已提交
370 371 372 373
static int
xfs_dio_write_end_io(
	struct kiocb		*iocb,
	ssize_t			size,
374
	int			error,
C
Christoph Hellwig 已提交
375 376 377 378 379
	unsigned		flags)
{
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_inode	*ip = XFS_I(inode);
	loff_t			offset = iocb->ki_pos;
C
Christoph Hellwig 已提交
380
	unsigned int		nofs_flag;
C
Christoph Hellwig 已提交
381 382 383 384 385 386

	trace_xfs_end_io_direct_write(ip, offset, size);

	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;

387 388 389 390
	if (error)
		return error;
	if (!size)
		return 0;
C
Christoph Hellwig 已提交
391

392 393 394 395 396 397
	/*
	 * Capture amount written on completion as we can't reliably account
	 * for it on submission.
	 */
	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);

C
Christoph Hellwig 已提交
398 399 400 401 402 403 404
	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

405 406 407
	if (flags & IOMAP_DIO_COW) {
		error = xfs_reflink_end_cow(ip, offset, size);
		if (error)
C
Christoph Hellwig 已提交
408
			goto out;
409 410 411 412 413 414 415 416
	}

	/*
	 * Unwritten conversion updates the in-core isize after extent
	 * conversion but before updating the on-disk size. Updating isize any
	 * earlier allows a racing dio read to find unwritten extents before
	 * they are converted.
	 */
C
Christoph Hellwig 已提交
417 418 419 420
	if (flags & IOMAP_DIO_UNWRITTEN) {
		error = xfs_iomap_write_unwritten(ip, offset, size, true);
		goto out;
	}
421

C
Christoph Hellwig 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435
	/*
	 * We need to update the in-core inode size here so that we don't end up
	 * with the on-disk inode size being outside the in-core inode size. We
	 * have no other method of updating EOF for AIO, so always do it here
	 * if necessary.
	 *
	 * We need to lock the test/set EOF update as we can be racing with
	 * other IO completions here to update the EOF. Failing to serialise
	 * here can result in EOF moving backwards and Bad Things Happen when
	 * that occurs.
	 */
	spin_lock(&ip->i_flags_lock);
	if (offset + size > i_size_read(inode)) {
		i_size_write(inode, offset + size);
436
		spin_unlock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
437
		error = xfs_setfilesize(ip, offset, size);
438 439 440
	} else {
		spin_unlock(&ip->i_flags_lock);
	}
C
Christoph Hellwig 已提交
441

C
Christoph Hellwig 已提交
442 443
out:
	memalloc_nofs_restore(nofs_flag);
C
Christoph Hellwig 已提交
444 445 446
	return error;
}

447 448 449 450
static const struct iomap_dio_ops xfs_dio_write_ops = {
	.end_io		= xfs_dio_write_end_io,
};

451 452 453 454
/*
 * xfs_file_dio_aio_write - handle direct IO writes
 *
 * Lock the inode appropriately to prepare for and issue a direct IO write.
455
 * By separating it from the buffered write path we remove all the tricky to
456 457
 * follow locking changes and looping.
 *
458 459 460 461 462 463 464 465 466 467 468 469 470
 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 * pages are flushed out.
 *
 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 * allowing them to be done in parallel with reads and other direct IO writes.
 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 * needs to do sub-block zeroing and that requires serialisation against other
 * direct IOs to the same block. In this case we need to serialise the
 * submission of the unaligned IOs so that we don't get racing block zeroing in
 * the dio layer.  To avoid the problem with aio, we also need to wait for
 * outstanding IOs to complete so that unwritten extent conversion is completed
 * before we try to map the overlapping block. This is currently implemented by
C
Christoph Hellwig 已提交
471
 * hitting it with a big hammer (i.e. inode_dio_wait()).
472
 *
473 474 475 476 477 478
 * Returns with locks held indicated by @iolock and errors indicated by
 * negative return values.
 */
STATIC ssize_t
xfs_file_dio_aio_write(
	struct kiocb		*iocb,
479
	struct iov_iter		*from)
480 481 482 483 484 485 486
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	ssize_t			ret = 0;
487
	int			unaligned_io = 0;
488
	int			iolock;
489
	size_t			count = iov_iter_count(from);
C
Christoph Hellwig 已提交
490
	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
491 492
					mp->m_rtdev_targp : mp->m_ddev_targp;

493
	/* DIO must be aligned to device logical sector size */
494
	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
E
Eric Sandeen 已提交
495
		return -EINVAL;
496

497
	/*
498 499 500 501 502
	 * Don't take the exclusive iolock here unless the I/O is unaligned to
	 * the file system block size.  We don't need to consider the EOF
	 * extension case here because xfs_file_aio_write_checks() will relock
	 * the inode as necessary for EOF zeroing cases and fill out the new
	 * inode size as appropriate.
503
	 */
504 505 506
	if ((iocb->ki_pos & mp->m_blockmask) ||
	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
		unaligned_io = 1;
507 508 509 510 511

		/*
		 * We can't properly handle unaligned direct I/O to reflink
		 * files yet, as we can't unshare a partial block.
		 */
512
		if (xfs_is_cow_inode(ip)) {
513 514 515
			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
			return -EREMCHG;
		}
516
		iolock = XFS_IOLOCK_EXCL;
517
	} else {
518
		iolock = XFS_IOLOCK_SHARED;
519
	}
520

C
Christoph Hellwig 已提交
521
	if (iocb->ki_flags & IOCB_NOWAIT) {
522 523 524
		/* unaligned dio always waits, bail */
		if (unaligned_io)
			return -EAGAIN;
C
Christoph Hellwig 已提交
525
		if (!xfs_ilock_nowait(ip, iolock))
G
Goldwyn Rodrigues 已提交
526
			return -EAGAIN;
C
Christoph Hellwig 已提交
527
	} else {
G
Goldwyn Rodrigues 已提交
528 529
		xfs_ilock(ip, iolock);
	}
530

531
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
532
	if (ret)
533
		goto out;
534
	count = iov_iter_count(from);
535

536
	/*
537 538 539 540 541
	 * If we are doing unaligned IO, we can't allow any other overlapping IO
	 * in-flight at the same time or we risk data corruption. Wait for all
	 * other IO to drain before we submit. If the IO is aligned, demote the
	 * iolock if we had to take the exclusive lock in
	 * xfs_file_aio_write_checks() for other reasons.
542
	 */
G
Goldwyn Rodrigues 已提交
543
	if (unaligned_io) {
544
		inode_dio_wait(inode);
G
Goldwyn Rodrigues 已提交
545
	} else if (iolock == XFS_IOLOCK_EXCL) {
546
		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
547
		iolock = XFS_IOLOCK_SHARED;
548 549
	}

C
Christoph Hellwig 已提交
550
	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
551
	/*
552 553
	 * If unaligned, this is the only IO in-flight. Wait on it before we
	 * release the iolock to prevent subsequent overlapping IO.
554
	 */
555 556
	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
			   &xfs_dio_write_ops,
557
			   is_sync_kiocb(iocb) || unaligned_io);
558
out:
559
	xfs_iunlock(ip, iolock);
560

561
	/*
562 563
	 * No fallback to buffered IO on errors for XFS, direct IO will either
	 * complete fully or fail.
564
	 */
565 566 567 568
	ASSERT(ret < 0 || ret == count);
	return ret;
}

569
static noinline ssize_t
570 571 572 573
xfs_file_dax_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
574
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
575
	struct xfs_inode	*ip = XFS_I(inode);
576
	int			iolock = XFS_IOLOCK_EXCL;
577 578 579
	ssize_t			ret, error = 0;
	size_t			count;
	loff_t			pos;
580

C
Christoph Hellwig 已提交
581 582
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, iolock))
G
Goldwyn Rodrigues 已提交
583
			return -EAGAIN;
C
Christoph Hellwig 已提交
584
	} else {
G
Goldwyn Rodrigues 已提交
585 586 587
		xfs_ilock(ip, iolock);
	}

588 589 590 591
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
	if (ret)
		goto out;

592 593
	pos = iocb->ki_pos;
	count = iov_iter_count(from);
594

595
	trace_xfs_file_dax_write(ip, count, pos);
596
	ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
597 598 599
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
		i_size_write(inode, iocb->ki_pos);
		error = xfs_setfilesize(ip, pos, ret);
600 601
	}
out:
602
	xfs_iunlock(ip, iolock);
603 604 605 606 607 608 609 610 611 612
	if (error)
		return error;

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);

		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
	return ret;
613 614
}

615
STATIC ssize_t
616
xfs_file_buffered_aio_write(
617
	struct kiocb		*iocb,
618
	struct iov_iter		*from)
619 620 621 622
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
623
	struct xfs_inode	*ip = XFS_I(inode);
624 625
	ssize_t			ret;
	int			enospc = 0;
626
	int			iolock;
627

628 629 630
	if (iocb->ki_flags & IOCB_NOWAIT)
		return -EOPNOTSUPP;

631 632
write_retry:
	iolock = XFS_IOLOCK_EXCL;
633
	xfs_ilock(ip, iolock);
634

635
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
636
	if (ret)
637
		goto out;
638 639

	/* We can write back this queue in page reclaim */
640
	current->backing_dev_info = inode_to_bdi(inode);
641

C
Christoph Hellwig 已提交
642
	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
643 644
	ret = iomap_file_buffered_write(iocb, from,
			&xfs_buffered_write_iomap_ops);
645
	if (likely(ret >= 0))
646
		iocb->ki_pos += ret;
647

648
	/*
649 650 651 652 653 654 655
	 * If we hit a space limit, try to free up some lingering preallocated
	 * space before returning an error. In the case of ENOSPC, first try to
	 * write back all dirty inodes to free up some of the excess reserved
	 * metadata space. This reduces the chances that the eofblocks scan
	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
	 * also behaves as a filter to prevent too many eofblocks scans from
	 * running at the same time.
656
	 */
657
	if (ret == -EDQUOT && !enospc) {
658
		xfs_iunlock(ip, iolock);
659 660 661
		enospc = xfs_inode_free_quota_eofblocks(ip);
		if (enospc)
			goto write_retry;
662 663 664
		enospc = xfs_inode_free_quota_cowblocks(ip);
		if (enospc)
			goto write_retry;
665
		iolock = 0;
666 667 668
	} else if (ret == -ENOSPC && !enospc) {
		struct xfs_eofblocks eofb = {0};

669
		enospc = 1;
D
Dave Chinner 已提交
670
		xfs_flush_inodes(ip->i_mount);
671 672

		xfs_iunlock(ip, iolock);
673 674
		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
675
		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
D
Dave Chinner 已提交
676
		goto write_retry;
677
	}
678

679
	current->backing_dev_info = NULL;
680
out:
681 682
	if (iolock)
		xfs_iunlock(ip, iolock);
683 684 685 686 687 688

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
689 690 691 692
	return ret;
}

STATIC ssize_t
A
Al Viro 已提交
693
xfs_file_write_iter(
694
	struct kiocb		*iocb,
A
Al Viro 已提交
695
	struct iov_iter		*from)
696 697 698 699 700 701
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	ssize_t			ret;
A
Al Viro 已提交
702
	size_t			ocount = iov_iter_count(from);
703

704
	XFS_STATS_INC(ip->i_mount, xs_write_calls);
705 706 707 708

	if (ocount == 0)
		return 0;

A
Al Viro 已提交
709 710
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;
711

712
	if (IS_DAX(inode))
713 714 715
		return xfs_file_dax_write(iocb, from);

	if (iocb->ki_flags & IOCB_DIRECT) {
716 717 718 719 720 721
		/*
		 * Allow a directio write to fall back to a buffered
		 * write *only* in the case that we're doing a reflink
		 * CoW.  In all other directio scenarios we do not
		 * allow an operation to fall back to buffered mode.
		 */
A
Al Viro 已提交
722
		ret = xfs_file_dio_aio_write(iocb, from);
723 724
		if (ret != -EREMCHG)
			return ret;
725
	}
726

727
	return xfs_file_buffered_aio_write(iocb, from);
728 729
}

730 731
static void
xfs_wait_dax_page(
732
	struct inode		*inode)
733 734 735 736 737 738 739 740 741 742 743
{
	struct xfs_inode        *ip = XFS_I(inode);

	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
	schedule();
	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
}

static int
xfs_break_dax_layouts(
	struct inode		*inode,
744
	bool			*retry)
745 746 747 748 749 750 751 752 753
{
	struct page		*page;

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));

	page = dax_layout_busy_page(inode->i_mapping);
	if (!page)
		return 0;

754
	*retry = true;
755 756
	return ___wait_var_event(&page->_refcount,
			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
757
			0, 0, xfs_wait_dax_page(inode));
758 759
}

760 761 762 763 764 765 766
int
xfs_break_layouts(
	struct inode		*inode,
	uint			*iolock,
	enum layout_break_reason reason)
{
	bool			retry;
767
	int			error;
768 769 770

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));

771 772 773 774
	do {
		retry = false;
		switch (reason) {
		case BREAK_UNMAP:
775
			error = xfs_break_dax_layouts(inode, &retry);
776 777 778 779 780 781 782 783 784 785 786 787 788
			if (error || retry)
				break;
			/* fall through */
		case BREAK_WRITE:
			error = xfs_break_leased_layouts(inode, iolock, &retry);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EINVAL;
		}
	} while (error == 0 && retry);

	return error;
789 790
}

791 792 793
#define	XFS_FALLOC_FL_SUPPORTED						\
		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
794
		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
795

796 797
STATIC long
xfs_file_fallocate(
798 799 800 801
	struct file		*file,
	int			mode,
	loff_t			offset,
	loff_t			len)
802
{
803 804 805
	struct inode		*inode = file_inode(file);
	struct xfs_inode	*ip = XFS_I(inode);
	long			error;
806
	enum xfs_prealloc_flags	flags = 0;
807
	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
808
	loff_t			new_size = 0;
809
	bool			do_file_insert = false;
810

811 812
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;
813
	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
814 815
		return -EOPNOTSUPP;

816
	xfs_ilock(ip, iolock);
817
	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
818 819 820
	if (error)
		goto out_unlock;

821 822 823 824
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		error = xfs_free_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
825
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
F
Fabian Frederick 已提交
826
		unsigned int blksize_mask = i_blocksize(inode) - 1;
827 828

		if (offset & blksize_mask || len & blksize_mask) {
D
Dave Chinner 已提交
829
			error = -EINVAL;
830 831 832
			goto out_unlock;
		}

833 834 835 836 837
		/*
		 * There is no need to overlap collapse range with EOF,
		 * in which case it is effectively a truncate operation
		 */
		if (offset + len >= i_size_read(inode)) {
D
Dave Chinner 已提交
838
			error = -EINVAL;
839 840 841
			goto out_unlock;
		}

842 843 844 845 846
		new_size = i_size_read(inode) - len;

		error = xfs_collapse_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
847
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
848 849
		unsigned int	blksize_mask = i_blocksize(inode) - 1;
		loff_t		isize = i_size_read(inode);
850 851 852 853 854 855

		if (offset & blksize_mask || len & blksize_mask) {
			error = -EINVAL;
			goto out_unlock;
		}

856 857 858 859 860
		/*
		 * New inode size must not exceed ->s_maxbytes, accounting for
		 * possible signed overflow.
		 */
		if (inode->i_sb->s_maxbytes - isize < len) {
861 862 863
			error = -EFBIG;
			goto out_unlock;
		}
864
		new_size = isize + len;
865 866

		/* Offset should be less than i_size */
867
		if (offset >= isize) {
868 869 870
			error = -EINVAL;
			goto out_unlock;
		}
871
		do_file_insert = true;
872
	} else {
873 874
		flags |= XFS_PREALLOC_SET;

875 876 877
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
		    offset + len > i_size_read(inode)) {
			new_size = offset + len;
D
Dave Chinner 已提交
878
			error = inode_newsize_ok(inode, new_size);
879 880 881
			if (error)
				goto out_unlock;
		}
882

883
		if (mode & FALLOC_FL_ZERO_RANGE) {
884
			error = xfs_zero_file_space(ip, offset, len);
885 886 887 888 889 890 891 892
		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
			error = xfs_reflink_unshare(ip, offset, len);
			if (error)
				goto out_unlock;

			if (!xfs_is_always_cow_inode(ip)) {
				error = xfs_alloc_file_space(ip, offset, len,
						XFS_BMAPI_PREALLOC);
893
			}
894 895 896 897 898 899 900 901 902 903
		} else {
			/*
			 * If always_cow mode we can't use preallocations and
			 * thus should not create them.
			 */
			if (xfs_is_always_cow_inode(ip)) {
				error = -EOPNOTSUPP;
				goto out_unlock;
			}

904 905
			error = xfs_alloc_file_space(ip, offset, len,
						     XFS_BMAPI_PREALLOC);
906
		}
907 908 909 910
		if (error)
			goto out_unlock;
	}

911
	if (file->f_flags & O_DSYNC)
912 913 914
		flags |= XFS_PREALLOC_SYNC;

	error = xfs_update_prealloc_flags(ip, flags);
915 916 917 918 919 920 921 922 923
	if (error)
		goto out_unlock;

	/* Change file size if needed */
	if (new_size) {
		struct iattr iattr;

		iattr.ia_valid = ATTR_SIZE;
		iattr.ia_size = new_size;
924
		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
925 926
		if (error)
			goto out_unlock;
927 928
	}

929 930 931 932 933 934 935 936 937
	/*
	 * Perform hole insertion now that the file size has been
	 * updated so that if we crash during the operation we don't
	 * leave shifted extents past EOF and hence losing access to
	 * the data that is contained within them.
	 */
	if (do_file_insert)
		error = xfs_insert_file_space(ip, offset, len);

938
out_unlock:
939
	xfs_iunlock(ip, iolock);
D
Dave Chinner 已提交
940
	return error;
941 942
}

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
STATIC int
xfs_file_fadvise(
	struct file	*file,
	loff_t		start,
	loff_t		end,
	int		advice)
{
	struct xfs_inode *ip = XFS_I(file_inode(file));
	int ret;
	int lockflags = 0;

	/*
	 * Operations creating pages in page cache need protection from hole
	 * punching and similar ops
	 */
	if (advice == POSIX_FADV_WILLNEED) {
		lockflags = XFS_IOLOCK_SHARED;
		xfs_ilock(ip, lockflags);
	}
	ret = generic_fadvise(file, start, end, advice);
	if (lockflags)
		xfs_iunlock(ip, lockflags);
	return ret;
}
967

968
STATIC loff_t
969
xfs_file_remap_range(
970 971 972 973 974 975
	struct file		*file_in,
	loff_t			pos_in,
	struct file		*file_out,
	loff_t			pos_out,
	loff_t			len,
	unsigned int		remap_flags)
976
{
977 978 979 980 981 982 983 984 985
	struct inode		*inode_in = file_inode(file_in);
	struct xfs_inode	*src = XFS_I(inode_in);
	struct inode		*inode_out = file_inode(file_out);
	struct xfs_inode	*dest = XFS_I(inode_out);
	struct xfs_mount	*mp = src->i_mount;
	loff_t			remapped = 0;
	xfs_extlen_t		cowextsize;
	int			ret;

986 987
	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
		return -EINVAL;
988

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
	if (!xfs_sb_version_hasreflink(&mp->m_sb))
		return -EOPNOTSUPP;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	/* Prepare and then clone file data. */
	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
			&len, remap_flags);
	if (ret < 0 || len == 0)
		return ret;

	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);

	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
			&remapped);
	if (ret)
		goto out_unlock;

	/*
	 * Carry the cowextsize hint from src to dest if we're sharing the
	 * entire source file to the entire destination file, the source file
	 * has a cowextsize hint, and the destination file does not.
	 */
	cowextsize = 0;
	if (pos_in == 0 && len == i_size_read(inode_in) &&
	    (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
	    pos_out == 0 && len >= i_size_read(inode_out) &&
	    !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
		cowextsize = src->i_d.di_cowextsize;

	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
			remap_flags);

out_unlock:
	xfs_reflink_remap_unlock(file_in, file_out);
	if (ret)
		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
	return remapped > 0 ? remapped : ret;
1028
}
1029

L
Linus Torvalds 已提交
1030
STATIC int
1031
xfs_file_open(
L
Linus Torvalds 已提交
1032
	struct inode	*inode,
1033
	struct file	*file)
L
Linus Torvalds 已提交
1034
{
1035
	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
L
Linus Torvalds 已提交
1036
		return -EFBIG;
1037 1038
	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
		return -EIO;
1039
	file->f_mode |= FMODE_NOWAIT;
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
	return 0;
}

STATIC int
xfs_dir_open(
	struct inode	*inode,
	struct file	*file)
{
	struct xfs_inode *ip = XFS_I(inode);
	int		mode;
	int		error;

	error = xfs_file_open(inode, file);
	if (error)
		return error;

	/*
	 * If there are any blocks, read-ahead block 0 as we're almost
	 * certain to have the next operation be a read there.
	 */
1060
	mode = xfs_ilock_data_map_shared(ip);
1061
	if (ip->i_d.di_nextents > 0)
1062
		error = xfs_dir3_data_readahead(ip, 0, -1);
1063
	xfs_iunlock(ip, mode);
1064
	return error;
L
Linus Torvalds 已提交
1065 1066 1067
}

STATIC int
1068
xfs_file_release(
L
Linus Torvalds 已提交
1069 1070 1071
	struct inode	*inode,
	struct file	*filp)
{
D
Dave Chinner 已提交
1072
	return xfs_release(XFS_I(inode));
L
Linus Torvalds 已提交
1073 1074 1075
}

STATIC int
1076
xfs_file_readdir(
A
Al Viro 已提交
1077 1078
	struct file	*file,
	struct dir_context *ctx)
L
Linus Torvalds 已提交
1079
{
A
Al Viro 已提交
1080
	struct inode	*inode = file_inode(file);
1081
	xfs_inode_t	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
	size_t		bufsize;

	/*
	 * The Linux API doesn't pass down the total size of the buffer
	 * we read into down to the filesystem.  With the filldir concept
	 * it's not needed for correct information, but the XFS dir2 leaf
	 * code wants an estimate of the buffer size to calculate it's
	 * readahead window and size the buffers used for mapping to
	 * physical blocks.
	 *
	 * Try to give it an estimate that's good enough, maybe at some
	 * point we can change the ->readdir prototype to include the
E
Eric Sandeen 已提交
1094
	 * buffer size.  For now we use the current glibc buffer size.
C
Christoph Hellwig 已提交
1095
	 */
D
Darrick J. Wong 已提交
1096
	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
C
Christoph Hellwig 已提交
1097

1098
	return xfs_readdir(NULL, ip, ctx, bufsize);
1099 1100 1101 1102 1103 1104
}

STATIC loff_t
xfs_file_llseek(
	struct file	*file,
	loff_t		offset,
1105
	int		whence)
1106
{
1107 1108 1109 1110 1111
	struct inode		*inode = file->f_mapping->host;

	if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
		return -EIO;

1112
	switch (whence) {
1113
	default:
1114
		return generic_file_llseek(file, offset, whence);
1115
	case SEEK_HOLE:
1116
		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1117
		break;
1118
	case SEEK_DATA:
1119
		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1120
		break;
1121
	}
1122 1123 1124 1125

	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1126 1127
}

1128 1129 1130 1131 1132
/*
 * Locking for serialisation of IO during page faults. This results in a lock
 * ordering of:
 *
 * mmap_sem (MM)
1133
 *   sb_start_pagefault(vfs, freeze)
1134
 *     i_mmaplock (XFS - truncate serialisation)
1135 1136
 *       page_lock (MM)
 *         i_lock (XFS - extent map serialisation)
1137
 */
1138
static vm_fault_t
1139 1140 1141 1142
__xfs_filemap_fault(
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size,
	bool			write_fault)
1143
{
1144
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1145
	struct xfs_inode	*ip = XFS_I(inode);
1146
	vm_fault_t		ret;
1147

1148
	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1149

1150 1151 1152 1153
	if (write_fault) {
		sb_start_pagefault(inode->i_sb);
		file_update_time(vmf->vma->vm_file);
	}
1154

1155
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1156
	if (IS_DAX(inode)) {
1157 1158
		pfn_t pfn;

1159 1160
		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
				(write_fault && !vmf->cow_page) ?
1161 1162
				 &xfs_direct_write_iomap_ops :
				 &xfs_read_iomap_ops);
1163 1164
		if (ret & VM_FAULT_NEEDDSYNC)
			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1165
	} else {
1166
		if (write_fault)
1167 1168
			ret = iomap_page_mkwrite(vmf,
					&xfs_buffered_write_iomap_ops);
1169 1170
		else
			ret = filemap_fault(vmf);
1171 1172 1173
	}
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);

1174 1175
	if (write_fault)
		sb_end_pagefault(inode->i_sb);
1176
	return ret;
1177 1178
}

1179
static vm_fault_t
1180
xfs_filemap_fault(
1181 1182
	struct vm_fault		*vmf)
{
1183
	/* DAX can shortcut the normal fault path on write faults! */
1184 1185 1186
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
			IS_DAX(file_inode(vmf->vma->vm_file)) &&
			(vmf->flags & FAULT_FLAG_WRITE));
1187 1188
}

1189
static vm_fault_t
1190
xfs_filemap_huge_fault(
1191 1192
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size)
M
Matthew Wilcox 已提交
1193
{
1194
	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
M
Matthew Wilcox 已提交
1195 1196
		return VM_FAULT_FALLBACK;

1197 1198 1199 1200
	/* DAX can shortcut the normal fault path on write faults! */
	return __xfs_filemap_fault(vmf, pe_size,
			(vmf->flags & FAULT_FLAG_WRITE));
}
M
Matthew Wilcox 已提交
1201

1202
static vm_fault_t
1203 1204 1205 1206
xfs_filemap_page_mkwrite(
	struct vm_fault		*vmf)
{
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1207 1208
}

1209
/*
1210 1211 1212
 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
 * on write faults. In reality, it needs to serialise against truncate and
 * prepare memory for writing so handle is as standard write fault.
1213
 */
1214
static vm_fault_t
1215 1216 1217 1218
xfs_filemap_pfn_mkwrite(
	struct vm_fault		*vmf)
{

1219
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1220 1221
}

1222 1223
static const struct vm_operations_struct xfs_file_vm_ops = {
	.fault		= xfs_filemap_fault,
1224
	.huge_fault	= xfs_filemap_huge_fault,
1225 1226
	.map_pages	= filemap_map_pages,
	.page_mkwrite	= xfs_filemap_page_mkwrite,
1227
	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1228 1229 1230 1231 1232 1233 1234
};

STATIC int
xfs_file_mmap(
	struct file	*filp,
	struct vm_area_struct *vma)
{
1235 1236 1237
	struct dax_device 	*dax_dev;

	dax_dev = xfs_find_daxdev_for_inode(file_inode(filp));
1238
	/*
1239 1240
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
1241
	 */
1242
	if (!daxdev_mapping_supported(vma, dax_dev))
1243 1244
		return -EOPNOTSUPP;

1245 1246 1247
	file_accessed(filp);
	vma->vm_ops = &xfs_file_vm_ops;
	if (IS_DAX(file_inode(filp)))
1248
		vma->vm_flags |= VM_HUGEPAGE;
1249
	return 0;
1250 1251
}

1252
const struct file_operations xfs_file_operations = {
1253
	.llseek		= xfs_file_llseek,
A
Al Viro 已提交
1254
	.read_iter	= xfs_file_read_iter,
A
Al Viro 已提交
1255
	.write_iter	= xfs_file_write_iter,
1256
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
1257
	.splice_write	= iter_file_splice_write,
1258
	.iopoll		= iomap_dio_iopoll,
1259
	.unlocked_ioctl	= xfs_file_ioctl,
L
Linus Torvalds 已提交
1260
#ifdef CONFIG_COMPAT
1261
	.compat_ioctl	= xfs_file_compat_ioctl,
L
Linus Torvalds 已提交
1262
#endif
1263
	.mmap		= xfs_file_mmap,
1264
	.mmap_supported_flags = MAP_SYNC,
1265 1266 1267
	.open		= xfs_file_open,
	.release	= xfs_file_release,
	.fsync		= xfs_file_fsync,
1268
	.get_unmapped_area = thp_get_unmapped_area,
1269
	.fallocate	= xfs_file_fallocate,
1270
	.fadvise	= xfs_file_fadvise,
1271
	.remap_file_range = xfs_file_remap_range,
L
Linus Torvalds 已提交
1272 1273
};

1274
const struct file_operations xfs_dir_file_operations = {
1275
	.open		= xfs_dir_open,
L
Linus Torvalds 已提交
1276
	.read		= generic_read_dir,
1277
	.iterate_shared	= xfs_file_readdir,
1278
	.llseek		= generic_file_llseek,
1279
	.unlocked_ioctl	= xfs_file_ioctl,
1280
#ifdef CONFIG_COMPAT
1281
	.compat_ioctl	= xfs_file_compat_ioctl,
1282
#endif
1283
	.fsync		= xfs_dir_fsync,
L
Linus Torvalds 已提交
1284
};