xfs_file.c 32.3 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3 4
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
5 6
 */
#include "xfs.h"
7
#include "xfs_fs.h"
8
#include "xfs_shared.h"
9
#include "xfs_format.h"
10 11
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
15
#include "xfs_inode_item.h"
16
#include "xfs_bmap.h"
D
Dave Chinner 已提交
17
#include "xfs_bmap_util.h"
18
#include "xfs_dir2.h"
D
Dave Chinner 已提交
19
#include "xfs_dir2_priv.h"
20
#include "xfs_ioctl.h"
21
#include "xfs_trace.h"
22
#include "xfs_log.h"
23
#include "xfs_icache.h"
24
#include "xfs_pnfs.h"
25
#include "xfs_iomap.h"
26
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
27

28
#include <linux/falloc.h>
29
#include <linux/backing-dev.h>
30
#include <linux/mman.h>
31
#include <linux/fadvise.h>
L
Linus Torvalds 已提交
32

33
static const struct vm_operations_struct xfs_file_vm_ops;
L
Linus Torvalds 已提交
34

35 36 37 38 39 40 41 42
int
xfs_update_prealloc_flags(
	struct xfs_inode	*ip,
	enum xfs_prealloc_flags	flags)
{
	struct xfs_trans	*tp;
	int			error;

43 44 45
	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
			0, 0, 0, &tp);
	if (error)
46 47 48 49 50 51
		return error;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);

	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
D
Dave Chinner 已提交
52 53 54
		VFS_I(ip)->i_mode &= ~S_ISUID;
		if (VFS_I(ip)->i_mode & S_IXGRP)
			VFS_I(ip)->i_mode &= ~S_ISGID;
55 56 57 58 59 60 61 62 63 64 65
		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	}

	if (flags & XFS_PREALLOC_SET)
		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
	if (flags & XFS_PREALLOC_CLEAR)
		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;

	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
	if (flags & XFS_PREALLOC_SYNC)
		xfs_trans_set_sync(tp);
66
	return xfs_trans_commit(tp);
67 68
}

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/*
 * Fsync operations on directories are much simpler than on regular files,
 * as there is no file data to flush, and thus also no need for explicit
 * cache flush operations, and there are no non-transaction metadata updates
 * on directories either.
 */
STATIC int
xfs_dir_fsync(
	struct file		*file,
	loff_t			start,
	loff_t			end,
	int			datasync)
{
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_lsn_t		lsn = 0;

	trace_xfs_dir_fsync(ip);

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	if (xfs_ipincount(ip))
		lsn = ip->i_itemp->ili_last_lsn;
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!lsn)
		return 0;
95
	return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
96 97
}

98 99 100
STATIC int
xfs_file_fsync(
	struct file		*file,
101 102
	loff_t			start,
	loff_t			end,
103 104
	int			datasync)
{
105 106
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
107
	struct xfs_mount	*mp = ip->i_mount;
108 109
	int			error = 0;
	int			log_flushed = 0;
110
	xfs_lsn_t		lsn = 0;
111

C
Christoph Hellwig 已提交
112
	trace_xfs_file_fsync(ip);
113

114
	error = file_write_and_wait_range(file, start, end);
115 116 117
	if (error)
		return error;

118
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
119
		return -EIO;
120 121 122

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

123 124 125 126 127 128 129 130 131 132
	/*
	 * If we have an RT and/or log subvolume we need to make sure to flush
	 * the write cache the device used for file data first.  This is to
	 * ensure newly written file data make it to disk before logging the new
	 * inode size in case of an extending write.
	 */
	if (XFS_IS_REALTIME_INODE(ip))
		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
	else if (mp->m_logdev_targp != mp->m_ddev_targp)
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
133

134
	/*
135 136 137 138 139 140 141 142 143 144 145
	 * All metadata updates are logged, which means that we just have to
	 * flush the log up to the latest LSN that touched the inode. If we have
	 * concurrent fsync/fdatasync() calls, we need them to all block on the
	 * log force before we clear the ili_fsync_fields field. This ensures
	 * that we don't get a racing sync operation that does not wait for the
	 * metadata to hit the journal before returning. If we race with
	 * clearing the ili_fsync_fields, then all that will happen is the log
	 * force will do nothing as the lsn will already be on disk. We can't
	 * race with setting ili_fsync_fields because that is done under
	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
	 * until after the ili_fsync_fields is cleared.
146 147
	 */
	xfs_ilock(ip, XFS_ILOCK_SHARED);
148 149
	if (xfs_ipincount(ip)) {
		if (!datasync ||
150
		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
151 152
			lsn = ip->i_itemp->ili_last_lsn;
	}
153

154
	if (lsn) {
155
		error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
156 157 158
		ip->i_itemp->ili_fsync_fields = 0;
	}
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
159

160 161 162 163 164 165 166
	/*
	 * If we only have a single device, and the log force about was
	 * a no-op we might have to flush the data device cache here.
	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
167 168
	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
	    mp->m_logdev_targp == mp->m_ddev_targp)
169
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
170

D
Dave Chinner 已提交
171
	return error;
172 173
}

174
STATIC ssize_t
175
xfs_file_dio_aio_read(
176
	struct kiocb		*iocb,
A
Al Viro 已提交
177
	struct iov_iter		*to)
178
{
C
Christoph Hellwig 已提交
179
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
180
	size_t			count = iov_iter_count(to);
C
Christoph Hellwig 已提交
181
	ssize_t			ret;
182

183
	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
184

185 186
	if (!count)
		return 0; /* skip atime */
187

188 189
	file_accessed(iocb->ki_filp);

190
	xfs_ilock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
191
	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
192
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
193

194 195 196
	return ret;
}

197
static noinline ssize_t
198 199 200 201
xfs_file_dax_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
202
	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
203 204 205 206 207 208 209 210
	size_t			count = iov_iter_count(to);
	ssize_t			ret = 0;

	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);

	if (!count)
		return 0; /* skip atime */

C
Christoph Hellwig 已提交
211 212
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
G
Goldwyn Rodrigues 已提交
213
			return -EAGAIN;
C
Christoph Hellwig 已提交
214
	} else {
G
Goldwyn Rodrigues 已提交
215 216
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}
C
Christoph Hellwig 已提交
217

218
	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
219
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
220

221
	file_accessed(iocb->ki_filp);
222 223 224 225 226 227 228 229 230 231 232 233
	return ret;
}

STATIC ssize_t
xfs_file_buffered_aio_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;

	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
234

C
Christoph Hellwig 已提交
235 236
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
237
			return -EAGAIN;
C
Christoph Hellwig 已提交
238
	} else {
239 240
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}
A
Al Viro 已提交
241
	ret = generic_file_read_iter(iocb, to);
242
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
243 244 245 246 247 248 249 250 251

	return ret;
}

STATIC ssize_t
xfs_file_read_iter(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
252 253
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
254 255 256 257 258 259 260
	ssize_t			ret = 0;

	XFS_STATS_INC(mp, xs_read_calls);

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

261 262 263
	if (IS_DAX(inode))
		ret = xfs_file_dax_read(iocb, to);
	else if (iocb->ki_flags & IOCB_DIRECT)
264
		ret = xfs_file_dio_aio_read(iocb, to);
C
Christoph Hellwig 已提交
265
	else
266
		ret = xfs_file_buffered_aio_read(iocb, to);
267 268

	if (ret > 0)
269
		XFS_STATS_ADD(mp, xs_read_bytes, ret);
270 271 272
	return ret;
}

273 274 275
/*
 * Common pre-write limit and setup checks.
 *
276 277 278
 * Called with the iolocked held either shared and exclusive according to
 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 * if called for a direct write beyond i_size.
279 280 281
 */
STATIC ssize_t
xfs_file_aio_write_checks(
282 283
	struct kiocb		*iocb,
	struct iov_iter		*from,
284 285
	int			*iolock)
{
286
	struct file		*file = iocb->ki_filp;
287 288
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
289
	ssize_t			error = 0;
290
	size_t			count = iov_iter_count(from);
291
	bool			drained_dio = false;
C
Christoph Hellwig 已提交
292
	loff_t			isize;
293

294
restart:
295 296
	error = generic_write_checks(iocb, from);
	if (error <= 0)
297 298
		return error;

299
	error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
300 301 302
	if (error)
		return error;

303 304 305 306
	/*
	 * For changing security info in file_remove_privs() we need i_rwsem
	 * exclusively.
	 */
307
	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
308
		xfs_iunlock(ip, *iolock);
309
		*iolock = XFS_IOLOCK_EXCL;
310
		xfs_ilock(ip, *iolock);
311 312
		goto restart;
	}
313 314 315
	/*
	 * If the offset is beyond the size of the file, we need to zero any
	 * blocks that fall between the existing EOF and the start of this
316
	 * write.  If zeroing is needed and we are currently holding the
317 318
	 * iolock shared, we need to update it to exclusive which implies
	 * having to redo all checks before.
319 320 321 322 323 324 325 326
	 *
	 * We need to serialise against EOF updates that occur in IO
	 * completions here. We want to make sure that nobody is changing the
	 * size while we do this check until we have placed an IO barrier (i.e.
	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
	 * The spinlock effectively forms a memory barrier once we have the
	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
	 * and hence be able to correctly determine if we need to run zeroing.
327
	 */
328
	spin_lock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
329 330
	isize = i_size_read(inode);
	if (iocb->ki_pos > isize) {
331
		spin_unlock(&ip->i_flags_lock);
332 333
		if (!drained_dio) {
			if (*iolock == XFS_IOLOCK_SHARED) {
334
				xfs_iunlock(ip, *iolock);
335
				*iolock = XFS_IOLOCK_EXCL;
336
				xfs_ilock(ip, *iolock);
337 338
				iov_iter_reexpand(from, count);
			}
339 340 341 342 343 344 345 346 347
			/*
			 * We now have an IO submission barrier in place, but
			 * AIO can do EOF updates during IO completion and hence
			 * we now need to wait for all of them to drain. Non-AIO
			 * DIO will have drained before we are given the
			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
			 * no-op.
			 */
			inode_dio_wait(inode);
348
			drained_dio = true;
349 350
			goto restart;
		}
C
Christoph Hellwig 已提交
351 352 353 354
	
		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
		error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
				NULL, &xfs_iomap_ops);
355 356
		if (error)
			return error;
357 358
	} else
		spin_unlock(&ip->i_flags_lock);
359

C
Christoph Hellwig 已提交
360 361 362 363 364 365
	/*
	 * Updating the timestamps will grab the ilock again from
	 * xfs_fs_dirty_inode, so we have to call it after dropping the
	 * lock above.  Eventually we should look into a way to avoid
	 * the pointless lock roundtrip.
	 */
A
Amir Goldstein 已提交
366
	return file_modified(file);
367 368
}

C
Christoph Hellwig 已提交
369 370 371 372 373 374 375 376 377
static int
xfs_dio_write_end_io(
	struct kiocb		*iocb,
	ssize_t			size,
	unsigned		flags)
{
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_inode	*ip = XFS_I(inode);
	loff_t			offset = iocb->ki_pos;
C
Christoph Hellwig 已提交
378
	unsigned int		nofs_flag;
C
Christoph Hellwig 已提交
379 380 381 382 383 384 385 386 387 388
	int			error = 0;

	trace_xfs_end_io_direct_write(ip, offset, size);

	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;

	if (size <= 0)
		return size;

389 390 391 392 393 394
	/*
	 * Capture amount written on completion as we can't reliably account
	 * for it on submission.
	 */
	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);

C
Christoph Hellwig 已提交
395 396 397 398 399 400 401
	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

402 403 404
	if (flags & IOMAP_DIO_COW) {
		error = xfs_reflink_end_cow(ip, offset, size);
		if (error)
C
Christoph Hellwig 已提交
405
			goto out;
406 407 408 409 410 411 412 413
	}

	/*
	 * Unwritten conversion updates the in-core isize after extent
	 * conversion but before updating the on-disk size. Updating isize any
	 * earlier allows a racing dio read to find unwritten extents before
	 * they are converted.
	 */
C
Christoph Hellwig 已提交
414 415 416 417
	if (flags & IOMAP_DIO_UNWRITTEN) {
		error = xfs_iomap_write_unwritten(ip, offset, size, true);
		goto out;
	}
418

C
Christoph Hellwig 已提交
419 420 421 422 423 424 425 426 427 428 429 430 431 432
	/*
	 * We need to update the in-core inode size here so that we don't end up
	 * with the on-disk inode size being outside the in-core inode size. We
	 * have no other method of updating EOF for AIO, so always do it here
	 * if necessary.
	 *
	 * We need to lock the test/set EOF update as we can be racing with
	 * other IO completions here to update the EOF. Failing to serialise
	 * here can result in EOF moving backwards and Bad Things Happen when
	 * that occurs.
	 */
	spin_lock(&ip->i_flags_lock);
	if (offset + size > i_size_read(inode)) {
		i_size_write(inode, offset + size);
433
		spin_unlock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
434
		error = xfs_setfilesize(ip, offset, size);
435 436 437
	} else {
		spin_unlock(&ip->i_flags_lock);
	}
C
Christoph Hellwig 已提交
438

C
Christoph Hellwig 已提交
439 440
out:
	memalloc_nofs_restore(nofs_flag);
C
Christoph Hellwig 已提交
441 442 443
	return error;
}

444 445 446 447
/*
 * xfs_file_dio_aio_write - handle direct IO writes
 *
 * Lock the inode appropriately to prepare for and issue a direct IO write.
448
 * By separating it from the buffered write path we remove all the tricky to
449 450
 * follow locking changes and looping.
 *
451 452 453 454 455 456 457 458 459 460 461 462 463
 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 * pages are flushed out.
 *
 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 * allowing them to be done in parallel with reads and other direct IO writes.
 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 * needs to do sub-block zeroing and that requires serialisation against other
 * direct IOs to the same block. In this case we need to serialise the
 * submission of the unaligned IOs so that we don't get racing block zeroing in
 * the dio layer.  To avoid the problem with aio, we also need to wait for
 * outstanding IOs to complete so that unwritten extent conversion is completed
 * before we try to map the overlapping block. This is currently implemented by
C
Christoph Hellwig 已提交
464
 * hitting it with a big hammer (i.e. inode_dio_wait()).
465
 *
466 467 468 469 470 471
 * Returns with locks held indicated by @iolock and errors indicated by
 * negative return values.
 */
STATIC ssize_t
xfs_file_dio_aio_write(
	struct kiocb		*iocb,
472
	struct iov_iter		*from)
473 474 475 476 477 478 479
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	ssize_t			ret = 0;
480
	int			unaligned_io = 0;
481
	int			iolock;
482
	size_t			count = iov_iter_count(from);
C
Christoph Hellwig 已提交
483
	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
484 485
					mp->m_rtdev_targp : mp->m_ddev_targp;

486
	/* DIO must be aligned to device logical sector size */
487
	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
E
Eric Sandeen 已提交
488
		return -EINVAL;
489

490
	/*
491 492 493 494 495
	 * Don't take the exclusive iolock here unless the I/O is unaligned to
	 * the file system block size.  We don't need to consider the EOF
	 * extension case here because xfs_file_aio_write_checks() will relock
	 * the inode as necessary for EOF zeroing cases and fill out the new
	 * inode size as appropriate.
496
	 */
497 498 499
	if ((iocb->ki_pos & mp->m_blockmask) ||
	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
		unaligned_io = 1;
500 501 502 503 504

		/*
		 * We can't properly handle unaligned direct I/O to reflink
		 * files yet, as we can't unshare a partial block.
		 */
505
		if (xfs_is_cow_inode(ip)) {
506 507 508
			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
			return -EREMCHG;
		}
509
		iolock = XFS_IOLOCK_EXCL;
510
	} else {
511
		iolock = XFS_IOLOCK_SHARED;
512
	}
513

C
Christoph Hellwig 已提交
514
	if (iocb->ki_flags & IOCB_NOWAIT) {
515 516 517
		/* unaligned dio always waits, bail */
		if (unaligned_io)
			return -EAGAIN;
C
Christoph Hellwig 已提交
518
		if (!xfs_ilock_nowait(ip, iolock))
G
Goldwyn Rodrigues 已提交
519
			return -EAGAIN;
C
Christoph Hellwig 已提交
520
	} else {
G
Goldwyn Rodrigues 已提交
521 522
		xfs_ilock(ip, iolock);
	}
523

524
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
525
	if (ret)
526
		goto out;
527
	count = iov_iter_count(from);
528

529
	/*
530 531 532 533 534
	 * If we are doing unaligned IO, we can't allow any other overlapping IO
	 * in-flight at the same time or we risk data corruption. Wait for all
	 * other IO to drain before we submit. If the IO is aligned, demote the
	 * iolock if we had to take the exclusive lock in
	 * xfs_file_aio_write_checks() for other reasons.
535
	 */
G
Goldwyn Rodrigues 已提交
536
	if (unaligned_io) {
537
		inode_dio_wait(inode);
G
Goldwyn Rodrigues 已提交
538
	} else if (iolock == XFS_IOLOCK_EXCL) {
539
		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
540
		iolock = XFS_IOLOCK_SHARED;
541 542
	}

C
Christoph Hellwig 已提交
543
	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
C
Christoph Hellwig 已提交
544
	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
545 546 547 548 549 550 551 552

	/*
	 * If unaligned, this is the only IO in-flight. If it has not yet
	 * completed, wait on it before we release the iolock to prevent
	 * subsequent overlapping IO.
	 */
	if (ret == -EIOCBQUEUED && unaligned_io)
		inode_dio_wait(inode);
553
out:
554
	xfs_iunlock(ip, iolock);
555

556
	/*
557 558
	 * No fallback to buffered IO on errors for XFS, direct IO will either
	 * complete fully or fail.
559
	 */
560 561 562 563
	ASSERT(ret < 0 || ret == count);
	return ret;
}

564
static noinline ssize_t
565 566 567 568
xfs_file_dax_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
569
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
570
	struct xfs_inode	*ip = XFS_I(inode);
571
	int			iolock = XFS_IOLOCK_EXCL;
572 573 574
	ssize_t			ret, error = 0;
	size_t			count;
	loff_t			pos;
575

C
Christoph Hellwig 已提交
576 577
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, iolock))
G
Goldwyn Rodrigues 已提交
578
			return -EAGAIN;
C
Christoph Hellwig 已提交
579
	} else {
G
Goldwyn Rodrigues 已提交
580 581 582
		xfs_ilock(ip, iolock);
	}

583 584 585 586
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
	if (ret)
		goto out;

587 588
	pos = iocb->ki_pos;
	count = iov_iter_count(from);
589

590
	trace_xfs_file_dax_write(ip, count, pos);
591
	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
592 593 594
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
		i_size_write(inode, iocb->ki_pos);
		error = xfs_setfilesize(ip, pos, ret);
595 596
	}
out:
597
	xfs_iunlock(ip, iolock);
598 599 600 601 602 603 604 605 606 607
	if (error)
		return error;

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);

		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
	return ret;
608 609
}

610
STATIC ssize_t
611
xfs_file_buffered_aio_write(
612
	struct kiocb		*iocb,
613
	struct iov_iter		*from)
614 615 616 617
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
618
	struct xfs_inode	*ip = XFS_I(inode);
619 620
	ssize_t			ret;
	int			enospc = 0;
621
	int			iolock;
622

623 624 625
	if (iocb->ki_flags & IOCB_NOWAIT)
		return -EOPNOTSUPP;

626 627
write_retry:
	iolock = XFS_IOLOCK_EXCL;
628
	xfs_ilock(ip, iolock);
629

630
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
631
	if (ret)
632
		goto out;
633 634

	/* We can write back this queue in page reclaim */
635
	current->backing_dev_info = inode_to_bdi(inode);
636

C
Christoph Hellwig 已提交
637
	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
638
	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
639
	if (likely(ret >= 0))
640
		iocb->ki_pos += ret;
641

642
	/*
643 644 645 646 647 648 649
	 * If we hit a space limit, try to free up some lingering preallocated
	 * space before returning an error. In the case of ENOSPC, first try to
	 * write back all dirty inodes to free up some of the excess reserved
	 * metadata space. This reduces the chances that the eofblocks scan
	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
	 * also behaves as a filter to prevent too many eofblocks scans from
	 * running at the same time.
650
	 */
651
	if (ret == -EDQUOT && !enospc) {
652
		xfs_iunlock(ip, iolock);
653 654 655
		enospc = xfs_inode_free_quota_eofblocks(ip);
		if (enospc)
			goto write_retry;
656 657 658
		enospc = xfs_inode_free_quota_cowblocks(ip);
		if (enospc)
			goto write_retry;
659
		iolock = 0;
660 661 662
	} else if (ret == -ENOSPC && !enospc) {
		struct xfs_eofblocks eofb = {0};

663
		enospc = 1;
D
Dave Chinner 已提交
664
		xfs_flush_inodes(ip->i_mount);
665 666

		xfs_iunlock(ip, iolock);
667 668
		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
669
		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
D
Dave Chinner 已提交
670
		goto write_retry;
671
	}
672

673
	current->backing_dev_info = NULL;
674
out:
675 676
	if (iolock)
		xfs_iunlock(ip, iolock);
677 678 679 680 681 682

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
683 684 685 686
	return ret;
}

STATIC ssize_t
A
Al Viro 已提交
687
xfs_file_write_iter(
688
	struct kiocb		*iocb,
A
Al Viro 已提交
689
	struct iov_iter		*from)
690 691 692 693 694 695
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	ssize_t			ret;
A
Al Viro 已提交
696
	size_t			ocount = iov_iter_count(from);
697

698
	XFS_STATS_INC(ip->i_mount, xs_write_calls);
699 700 701 702

	if (ocount == 0)
		return 0;

A
Al Viro 已提交
703 704
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;
705

706
	if (IS_DAX(inode))
707 708 709
		return xfs_file_dax_write(iocb, from);

	if (iocb->ki_flags & IOCB_DIRECT) {
710 711 712 713 714 715
		/*
		 * Allow a directio write to fall back to a buffered
		 * write *only* in the case that we're doing a reflink
		 * CoW.  In all other directio scenarios we do not
		 * allow an operation to fall back to buffered mode.
		 */
A
Al Viro 已提交
716
		ret = xfs_file_dio_aio_write(iocb, from);
717 718
		if (ret != -EREMCHG)
			return ret;
719
	}
720

721
	return xfs_file_buffered_aio_write(iocb, from);
722 723
}

724 725
static void
xfs_wait_dax_page(
726
	struct inode		*inode)
727 728 729 730 731 732 733 734 735 736 737
{
	struct xfs_inode        *ip = XFS_I(inode);

	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
	schedule();
	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
}

static int
xfs_break_dax_layouts(
	struct inode		*inode,
738
	bool			*retry)
739 740 741 742 743 744 745 746 747
{
	struct page		*page;

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));

	page = dax_layout_busy_page(inode->i_mapping);
	if (!page)
		return 0;

748
	*retry = true;
749 750
	return ___wait_var_event(&page->_refcount,
			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
751
			0, 0, xfs_wait_dax_page(inode));
752 753
}

754 755 756 757 758 759 760
int
xfs_break_layouts(
	struct inode		*inode,
	uint			*iolock,
	enum layout_break_reason reason)
{
	bool			retry;
761
	int			error;
762 763 764

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));

765 766 767 768
	do {
		retry = false;
		switch (reason) {
		case BREAK_UNMAP:
769
			error = xfs_break_dax_layouts(inode, &retry);
770 771 772 773 774 775 776 777 778 779 780 781 782
			if (error || retry)
				break;
			/* fall through */
		case BREAK_WRITE:
			error = xfs_break_leased_layouts(inode, iolock, &retry);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EINVAL;
		}
	} while (error == 0 && retry);

	return error;
783 784
}

785 786 787
#define	XFS_FALLOC_FL_SUPPORTED						\
		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
788
		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
789

790 791
STATIC long
xfs_file_fallocate(
792 793 794 795
	struct file		*file,
	int			mode,
	loff_t			offset,
	loff_t			len)
796
{
797 798 799
	struct inode		*inode = file_inode(file);
	struct xfs_inode	*ip = XFS_I(inode);
	long			error;
800
	enum xfs_prealloc_flags	flags = 0;
801
	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
802
	loff_t			new_size = 0;
803
	bool			do_file_insert = false;
804

805 806
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;
807
	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
808 809
		return -EOPNOTSUPP;

810
	xfs_ilock(ip, iolock);
811
	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
812 813 814
	if (error)
		goto out_unlock;

815 816 817 818
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		error = xfs_free_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
819
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
F
Fabian Frederick 已提交
820
		unsigned int blksize_mask = i_blocksize(inode) - 1;
821 822

		if (offset & blksize_mask || len & blksize_mask) {
D
Dave Chinner 已提交
823
			error = -EINVAL;
824 825 826
			goto out_unlock;
		}

827 828 829 830 831
		/*
		 * There is no need to overlap collapse range with EOF,
		 * in which case it is effectively a truncate operation
		 */
		if (offset + len >= i_size_read(inode)) {
D
Dave Chinner 已提交
832
			error = -EINVAL;
833 834 835
			goto out_unlock;
		}

836 837 838 839 840
		new_size = i_size_read(inode) - len;

		error = xfs_collapse_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
841
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
842 843
		unsigned int	blksize_mask = i_blocksize(inode) - 1;
		loff_t		isize = i_size_read(inode);
844 845 846 847 848 849

		if (offset & blksize_mask || len & blksize_mask) {
			error = -EINVAL;
			goto out_unlock;
		}

850 851 852 853 854
		/*
		 * New inode size must not exceed ->s_maxbytes, accounting for
		 * possible signed overflow.
		 */
		if (inode->i_sb->s_maxbytes - isize < len) {
855 856 857
			error = -EFBIG;
			goto out_unlock;
		}
858
		new_size = isize + len;
859 860

		/* Offset should be less than i_size */
861
		if (offset >= isize) {
862 863 864
			error = -EINVAL;
			goto out_unlock;
		}
865
		do_file_insert = true;
866
	} else {
867 868
		flags |= XFS_PREALLOC_SET;

869 870 871
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
		    offset + len > i_size_read(inode)) {
			new_size = offset + len;
D
Dave Chinner 已提交
872
			error = inode_newsize_ok(inode, new_size);
873 874 875
			if (error)
				goto out_unlock;
		}
876

877
		if (mode & FALLOC_FL_ZERO_RANGE) {
878
			error = xfs_zero_file_space(ip, offset, len);
879 880 881 882 883 884 885 886
		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
			error = xfs_reflink_unshare(ip, offset, len);
			if (error)
				goto out_unlock;

			if (!xfs_is_always_cow_inode(ip)) {
				error = xfs_alloc_file_space(ip, offset, len,
						XFS_BMAPI_PREALLOC);
887
			}
888 889 890 891 892 893 894 895 896 897
		} else {
			/*
			 * If always_cow mode we can't use preallocations and
			 * thus should not create them.
			 */
			if (xfs_is_always_cow_inode(ip)) {
				error = -EOPNOTSUPP;
				goto out_unlock;
			}

898 899
			error = xfs_alloc_file_space(ip, offset, len,
						     XFS_BMAPI_PREALLOC);
900
		}
901 902 903 904
		if (error)
			goto out_unlock;
	}

905
	if (file->f_flags & O_DSYNC)
906 907 908
		flags |= XFS_PREALLOC_SYNC;

	error = xfs_update_prealloc_flags(ip, flags);
909 910 911 912 913 914 915 916 917
	if (error)
		goto out_unlock;

	/* Change file size if needed */
	if (new_size) {
		struct iattr iattr;

		iattr.ia_valid = ATTR_SIZE;
		iattr.ia_size = new_size;
918
		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
919 920
		if (error)
			goto out_unlock;
921 922
	}

923 924 925 926 927 928 929 930 931
	/*
	 * Perform hole insertion now that the file size has been
	 * updated so that if we crash during the operation we don't
	 * leave shifted extents past EOF and hence losing access to
	 * the data that is contained within them.
	 */
	if (do_file_insert)
		error = xfs_insert_file_space(ip, offset, len);

932
out_unlock:
933
	xfs_iunlock(ip, iolock);
D
Dave Chinner 已提交
934
	return error;
935 936
}

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
STATIC int
xfs_file_fadvise(
	struct file	*file,
	loff_t		start,
	loff_t		end,
	int		advice)
{
	struct xfs_inode *ip = XFS_I(file_inode(file));
	int ret;
	int lockflags = 0;

	/*
	 * Operations creating pages in page cache need protection from hole
	 * punching and similar ops
	 */
	if (advice == POSIX_FADV_WILLNEED) {
		lockflags = XFS_IOLOCK_SHARED;
		xfs_ilock(ip, lockflags);
	}
	ret = generic_fadvise(file, start, end, advice);
	if (lockflags)
		xfs_iunlock(ip, lockflags);
	return ret;
}
961

962
STATIC loff_t
963
xfs_file_remap_range(
964 965 966 967 968 969
	struct file		*file_in,
	loff_t			pos_in,
	struct file		*file_out,
	loff_t			pos_out,
	loff_t			len,
	unsigned int		remap_flags)
970
{
971 972 973 974 975 976 977 978 979
	struct inode		*inode_in = file_inode(file_in);
	struct xfs_inode	*src = XFS_I(inode_in);
	struct inode		*inode_out = file_inode(file_out);
	struct xfs_inode	*dest = XFS_I(inode_out);
	struct xfs_mount	*mp = src->i_mount;
	loff_t			remapped = 0;
	xfs_extlen_t		cowextsize;
	int			ret;

980 981
	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
		return -EINVAL;
982

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	if (!xfs_sb_version_hasreflink(&mp->m_sb))
		return -EOPNOTSUPP;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	/* Prepare and then clone file data. */
	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
			&len, remap_flags);
	if (ret < 0 || len == 0)
		return ret;

	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);

	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
			&remapped);
	if (ret)
		goto out_unlock;

	/*
	 * Carry the cowextsize hint from src to dest if we're sharing the
	 * entire source file to the entire destination file, the source file
	 * has a cowextsize hint, and the destination file does not.
	 */
	cowextsize = 0;
	if (pos_in == 0 && len == i_size_read(inode_in) &&
	    (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
	    pos_out == 0 && len >= i_size_read(inode_out) &&
	    !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
		cowextsize = src->i_d.di_cowextsize;

	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
			remap_flags);

out_unlock:
	xfs_reflink_remap_unlock(file_in, file_out);
	if (ret)
		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
	return remapped > 0 ? remapped : ret;
1022
}
1023

L
Linus Torvalds 已提交
1024
STATIC int
1025
xfs_file_open(
L
Linus Torvalds 已提交
1026
	struct inode	*inode,
1027
	struct file	*file)
L
Linus Torvalds 已提交
1028
{
1029
	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
L
Linus Torvalds 已提交
1030
		return -EFBIG;
1031 1032
	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
		return -EIO;
1033
	file->f_mode |= FMODE_NOWAIT;
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	return 0;
}

STATIC int
xfs_dir_open(
	struct inode	*inode,
	struct file	*file)
{
	struct xfs_inode *ip = XFS_I(inode);
	int		mode;
	int		error;

	error = xfs_file_open(inode, file);
	if (error)
		return error;

	/*
	 * If there are any blocks, read-ahead block 0 as we're almost
	 * certain to have the next operation be a read there.
	 */
1054
	mode = xfs_ilock_data_map_shared(ip);
1055
	if (ip->i_d.di_nextents > 0)
1056
		error = xfs_dir3_data_readahead(ip, 0, -1);
1057
	xfs_iunlock(ip, mode);
1058
	return error;
L
Linus Torvalds 已提交
1059 1060 1061
}

STATIC int
1062
xfs_file_release(
L
Linus Torvalds 已提交
1063 1064 1065
	struct inode	*inode,
	struct file	*filp)
{
D
Dave Chinner 已提交
1066
	return xfs_release(XFS_I(inode));
L
Linus Torvalds 已提交
1067 1068 1069
}

STATIC int
1070
xfs_file_readdir(
A
Al Viro 已提交
1071 1072
	struct file	*file,
	struct dir_context *ctx)
L
Linus Torvalds 已提交
1073
{
A
Al Viro 已提交
1074
	struct inode	*inode = file_inode(file);
1075
	xfs_inode_t	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	size_t		bufsize;

	/*
	 * The Linux API doesn't pass down the total size of the buffer
	 * we read into down to the filesystem.  With the filldir concept
	 * it's not needed for correct information, but the XFS dir2 leaf
	 * code wants an estimate of the buffer size to calculate it's
	 * readahead window and size the buffers used for mapping to
	 * physical blocks.
	 *
	 * Try to give it an estimate that's good enough, maybe at some
	 * point we can change the ->readdir prototype to include the
E
Eric Sandeen 已提交
1088
	 * buffer size.  For now we use the current glibc buffer size.
C
Christoph Hellwig 已提交
1089
	 */
D
Darrick J. Wong 已提交
1090
	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
C
Christoph Hellwig 已提交
1091

1092
	return xfs_readdir(NULL, ip, ctx, bufsize);
1093 1094 1095 1096 1097 1098
}

STATIC loff_t
xfs_file_llseek(
	struct file	*file,
	loff_t		offset,
1099
	int		whence)
1100
{
1101 1102 1103 1104 1105
	struct inode		*inode = file->f_mapping->host;

	if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
		return -EIO;

1106
	switch (whence) {
1107
	default:
1108
		return generic_file_llseek(file, offset, whence);
1109
	case SEEK_HOLE:
1110
		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1111
		break;
1112
	case SEEK_DATA:
1113
		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1114
		break;
1115
	}
1116 1117 1118 1119

	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1120 1121
}

1122 1123 1124 1125 1126
/*
 * Locking for serialisation of IO during page faults. This results in a lock
 * ordering of:
 *
 * mmap_sem (MM)
1127
 *   sb_start_pagefault(vfs, freeze)
1128
 *     i_mmaplock (XFS - truncate serialisation)
1129 1130
 *       page_lock (MM)
 *         i_lock (XFS - extent map serialisation)
1131
 */
1132
static vm_fault_t
1133 1134 1135 1136
__xfs_filemap_fault(
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size,
	bool			write_fault)
1137
{
1138
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1139
	struct xfs_inode	*ip = XFS_I(inode);
1140
	vm_fault_t		ret;
1141

1142
	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1143

1144 1145 1146 1147
	if (write_fault) {
		sb_start_pagefault(inode->i_sb);
		file_update_time(vmf->vma->vm_file);
	}
1148

1149
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1150
	if (IS_DAX(inode)) {
1151 1152
		pfn_t pfn;

1153
		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
1154 1155
		if (ret & VM_FAULT_NEEDDSYNC)
			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1156
	} else {
1157 1158 1159 1160
		if (write_fault)
			ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
		else
			ret = filemap_fault(vmf);
1161 1162 1163
	}
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);

1164 1165
	if (write_fault)
		sb_end_pagefault(inode->i_sb);
1166
	return ret;
1167 1168
}

1169
static vm_fault_t
1170
xfs_filemap_fault(
1171 1172
	struct vm_fault		*vmf)
{
1173
	/* DAX can shortcut the normal fault path on write faults! */
1174 1175 1176
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
			IS_DAX(file_inode(vmf->vma->vm_file)) &&
			(vmf->flags & FAULT_FLAG_WRITE));
1177 1178
}

1179
static vm_fault_t
1180
xfs_filemap_huge_fault(
1181 1182
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size)
M
Matthew Wilcox 已提交
1183
{
1184
	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
M
Matthew Wilcox 已提交
1185 1186
		return VM_FAULT_FALLBACK;

1187 1188 1189 1190
	/* DAX can shortcut the normal fault path on write faults! */
	return __xfs_filemap_fault(vmf, pe_size,
			(vmf->flags & FAULT_FLAG_WRITE));
}
M
Matthew Wilcox 已提交
1191

1192
static vm_fault_t
1193 1194 1195 1196
xfs_filemap_page_mkwrite(
	struct vm_fault		*vmf)
{
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1197 1198
}

1199
/*
1200 1201 1202
 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
 * on write faults. In reality, it needs to serialise against truncate and
 * prepare memory for writing so handle is as standard write fault.
1203
 */
1204
static vm_fault_t
1205 1206 1207 1208
xfs_filemap_pfn_mkwrite(
	struct vm_fault		*vmf)
{

1209
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1210 1211
}

1212 1213
static const struct vm_operations_struct xfs_file_vm_ops = {
	.fault		= xfs_filemap_fault,
1214
	.huge_fault	= xfs_filemap_huge_fault,
1215 1216
	.map_pages	= filemap_map_pages,
	.page_mkwrite	= xfs_filemap_page_mkwrite,
1217
	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1218 1219 1220 1221 1222 1223 1224
};

STATIC int
xfs_file_mmap(
	struct file	*filp,
	struct vm_area_struct *vma)
{
1225 1226 1227
	struct dax_device 	*dax_dev;

	dax_dev = xfs_find_daxdev_for_inode(file_inode(filp));
1228
	/*
1229 1230
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
1231
	 */
1232
	if (!daxdev_mapping_supported(vma, dax_dev))
1233 1234
		return -EOPNOTSUPP;

1235 1236 1237
	file_accessed(filp);
	vma->vm_ops = &xfs_file_vm_ops;
	if (IS_DAX(file_inode(filp)))
1238
		vma->vm_flags |= VM_HUGEPAGE;
1239
	return 0;
1240 1241
}

1242
const struct file_operations xfs_file_operations = {
1243
	.llseek		= xfs_file_llseek,
A
Al Viro 已提交
1244
	.read_iter	= xfs_file_read_iter,
A
Al Viro 已提交
1245
	.write_iter	= xfs_file_write_iter,
1246
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
1247
	.splice_write	= iter_file_splice_write,
1248
	.iopoll		= iomap_dio_iopoll,
1249
	.unlocked_ioctl	= xfs_file_ioctl,
L
Linus Torvalds 已提交
1250
#ifdef CONFIG_COMPAT
1251
	.compat_ioctl	= xfs_file_compat_ioctl,
L
Linus Torvalds 已提交
1252
#endif
1253
	.mmap		= xfs_file_mmap,
1254
	.mmap_supported_flags = MAP_SYNC,
1255 1256 1257
	.open		= xfs_file_open,
	.release	= xfs_file_release,
	.fsync		= xfs_file_fsync,
1258
	.get_unmapped_area = thp_get_unmapped_area,
1259
	.fallocate	= xfs_file_fallocate,
1260
	.fadvise	= xfs_file_fadvise,
1261
	.remap_file_range = xfs_file_remap_range,
L
Linus Torvalds 已提交
1262 1263
};

1264
const struct file_operations xfs_dir_file_operations = {
1265
	.open		= xfs_dir_open,
L
Linus Torvalds 已提交
1266
	.read		= generic_read_dir,
1267
	.iterate_shared	= xfs_file_readdir,
1268
	.llseek		= generic_file_llseek,
1269
	.unlocked_ioctl	= xfs_file_ioctl,
1270
#ifdef CONFIG_COMPAT
1271
	.compat_ioctl	= xfs_file_compat_ioctl,
1272
#endif
1273
	.fsync		= xfs_dir_fsync,
L
Linus Torvalds 已提交
1274
};