xfs_file.c 31.8 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3 4
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
5 6
 */
#include "xfs.h"
7
#include "xfs_fs.h"
8
#include "xfs_shared.h"
9
#include "xfs_format.h"
10 11
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
15
#include "xfs_inode_item.h"
16
#include "xfs_bmap.h"
D
Dave Chinner 已提交
17
#include "xfs_bmap_util.h"
18
#include "xfs_dir2.h"
D
Dave Chinner 已提交
19
#include "xfs_dir2_priv.h"
20
#include "xfs_ioctl.h"
21
#include "xfs_trace.h"
22
#include "xfs_log.h"
23
#include "xfs_icache.h"
24
#include "xfs_pnfs.h"
25
#include "xfs_iomap.h"
26
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
27

28
#include <linux/falloc.h>
29
#include <linux/backing-dev.h>
30
#include <linux/mman.h>
L
Linus Torvalds 已提交
31

32
static const struct vm_operations_struct xfs_file_vm_ops;
L
Linus Torvalds 已提交
33

34 35 36 37 38 39 40 41
int
xfs_update_prealloc_flags(
	struct xfs_inode	*ip,
	enum xfs_prealloc_flags	flags)
{
	struct xfs_trans	*tp;
	int			error;

42 43 44
	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
			0, 0, 0, &tp);
	if (error)
45 46 47 48 49 50
		return error;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);

	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
D
Dave Chinner 已提交
51 52 53
		VFS_I(ip)->i_mode &= ~S_ISUID;
		if (VFS_I(ip)->i_mode & S_IXGRP)
			VFS_I(ip)->i_mode &= ~S_ISGID;
54 55 56 57 58 59 60 61 62 63 64
		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	}

	if (flags & XFS_PREALLOC_SET)
		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
	if (flags & XFS_PREALLOC_CLEAR)
		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;

	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
	if (flags & XFS_PREALLOC_SYNC)
		xfs_trans_set_sync(tp);
65
	return xfs_trans_commit(tp);
66 67
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/*
 * Fsync operations on directories are much simpler than on regular files,
 * as there is no file data to flush, and thus also no need for explicit
 * cache flush operations, and there are no non-transaction metadata updates
 * on directories either.
 */
STATIC int
xfs_dir_fsync(
	struct file		*file,
	loff_t			start,
	loff_t			end,
	int			datasync)
{
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_lsn_t		lsn = 0;

	trace_xfs_dir_fsync(ip);

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	if (xfs_ipincount(ip))
		lsn = ip->i_itemp->ili_last_lsn;
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!lsn)
		return 0;
94
	return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
95 96
}

97 98 99
STATIC int
xfs_file_fsync(
	struct file		*file,
100 101
	loff_t			start,
	loff_t			end,
102 103
	int			datasync)
{
104 105
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
106
	struct xfs_mount	*mp = ip->i_mount;
107 108
	int			error = 0;
	int			log_flushed = 0;
109
	xfs_lsn_t		lsn = 0;
110

C
Christoph Hellwig 已提交
111
	trace_xfs_file_fsync(ip);
112

113
	error = file_write_and_wait_range(file, start, end);
114 115 116
	if (error)
		return error;

117
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
118
		return -EIO;
119 120 121

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

122 123 124 125 126 127 128 129 130 131
	/*
	 * If we have an RT and/or log subvolume we need to make sure to flush
	 * the write cache the device used for file data first.  This is to
	 * ensure newly written file data make it to disk before logging the new
	 * inode size in case of an extending write.
	 */
	if (XFS_IS_REALTIME_INODE(ip))
		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
	else if (mp->m_logdev_targp != mp->m_ddev_targp)
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
132

133
	/*
134 135 136 137 138 139 140 141 142 143 144
	 * All metadata updates are logged, which means that we just have to
	 * flush the log up to the latest LSN that touched the inode. If we have
	 * concurrent fsync/fdatasync() calls, we need them to all block on the
	 * log force before we clear the ili_fsync_fields field. This ensures
	 * that we don't get a racing sync operation that does not wait for the
	 * metadata to hit the journal before returning. If we race with
	 * clearing the ili_fsync_fields, then all that will happen is the log
	 * force will do nothing as the lsn will already be on disk. We can't
	 * race with setting ili_fsync_fields because that is done under
	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
	 * until after the ili_fsync_fields is cleared.
145 146
	 */
	xfs_ilock(ip, XFS_ILOCK_SHARED);
147 148
	if (xfs_ipincount(ip)) {
		if (!datasync ||
149
		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
150 151
			lsn = ip->i_itemp->ili_last_lsn;
	}
152

153
	if (lsn) {
154
		error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
155 156 157
		ip->i_itemp->ili_fsync_fields = 0;
	}
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
158

159 160 161 162 163 164 165
	/*
	 * If we only have a single device, and the log force about was
	 * a no-op we might have to flush the data device cache here.
	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
166 167
	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
	    mp->m_logdev_targp == mp->m_ddev_targp)
168
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
169

D
Dave Chinner 已提交
170
	return error;
171 172
}

173
STATIC ssize_t
174
xfs_file_dio_aio_read(
175
	struct kiocb		*iocb,
A
Al Viro 已提交
176
	struct iov_iter		*to)
177
{
C
Christoph Hellwig 已提交
178
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
179
	size_t			count = iov_iter_count(to);
C
Christoph Hellwig 已提交
180
	ssize_t			ret;
181

182
	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
183

184 185
	if (!count)
		return 0; /* skip atime */
186

187 188
	file_accessed(iocb->ki_filp);

189
	xfs_ilock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
190
	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
191
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
192

193 194 195
	return ret;
}

196
static noinline ssize_t
197 198 199 200
xfs_file_dax_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
201
	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
202 203 204 205 206 207 208 209
	size_t			count = iov_iter_count(to);
	ssize_t			ret = 0;

	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);

	if (!count)
		return 0; /* skip atime */

C
Christoph Hellwig 已提交
210 211
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
G
Goldwyn Rodrigues 已提交
212
			return -EAGAIN;
C
Christoph Hellwig 已提交
213
	} else {
G
Goldwyn Rodrigues 已提交
214 215
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}
C
Christoph Hellwig 已提交
216

217
	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
218
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
219

220
	file_accessed(iocb->ki_filp);
221 222 223 224 225 226 227 228 229 230 231 232
	return ret;
}

STATIC ssize_t
xfs_file_buffered_aio_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;

	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
233

C
Christoph Hellwig 已提交
234 235
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
236
			return -EAGAIN;
C
Christoph Hellwig 已提交
237
	} else {
238 239
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}
A
Al Viro 已提交
240
	ret = generic_file_read_iter(iocb, to);
241
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
242 243 244 245 246 247 248 249 250

	return ret;
}

STATIC ssize_t
xfs_file_read_iter(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
251 252
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
253 254 255 256 257 258 259
	ssize_t			ret = 0;

	XFS_STATS_INC(mp, xs_read_calls);

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

260 261 262
	if (IS_DAX(inode))
		ret = xfs_file_dax_read(iocb, to);
	else if (iocb->ki_flags & IOCB_DIRECT)
263
		ret = xfs_file_dio_aio_read(iocb, to);
C
Christoph Hellwig 已提交
264
	else
265
		ret = xfs_file_buffered_aio_read(iocb, to);
266 267

	if (ret > 0)
268
		XFS_STATS_ADD(mp, xs_read_bytes, ret);
269 270 271
	return ret;
}

272 273 274
/*
 * Common pre-write limit and setup checks.
 *
275 276 277
 * Called with the iolocked held either shared and exclusive according to
 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 * if called for a direct write beyond i_size.
278 279 280
 */
STATIC ssize_t
xfs_file_aio_write_checks(
281 282
	struct kiocb		*iocb,
	struct iov_iter		*from,
283 284
	int			*iolock)
{
285
	struct file		*file = iocb->ki_filp;
286 287
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
288
	ssize_t			error = 0;
289
	size_t			count = iov_iter_count(from);
290
	bool			drained_dio = false;
C
Christoph Hellwig 已提交
291
	loff_t			isize;
292

293
restart:
294 295
	error = generic_write_checks(iocb, from);
	if (error <= 0)
296 297
		return error;

298
	error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
299 300 301
	if (error)
		return error;

302 303 304 305
	/*
	 * For changing security info in file_remove_privs() we need i_rwsem
	 * exclusively.
	 */
306
	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
307
		xfs_iunlock(ip, *iolock);
308
		*iolock = XFS_IOLOCK_EXCL;
309
		xfs_ilock(ip, *iolock);
310 311
		goto restart;
	}
312 313 314
	/*
	 * If the offset is beyond the size of the file, we need to zero any
	 * blocks that fall between the existing EOF and the start of this
315
	 * write.  If zeroing is needed and we are currently holding the
316 317
	 * iolock shared, we need to update it to exclusive which implies
	 * having to redo all checks before.
318 319 320 321 322 323 324 325
	 *
	 * We need to serialise against EOF updates that occur in IO
	 * completions here. We want to make sure that nobody is changing the
	 * size while we do this check until we have placed an IO barrier (i.e.
	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
	 * The spinlock effectively forms a memory barrier once we have the
	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
	 * and hence be able to correctly determine if we need to run zeroing.
326
	 */
327
	spin_lock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
328 329
	isize = i_size_read(inode);
	if (iocb->ki_pos > isize) {
330
		spin_unlock(&ip->i_flags_lock);
331 332
		if (!drained_dio) {
			if (*iolock == XFS_IOLOCK_SHARED) {
333
				xfs_iunlock(ip, *iolock);
334
				*iolock = XFS_IOLOCK_EXCL;
335
				xfs_ilock(ip, *iolock);
336 337
				iov_iter_reexpand(from, count);
			}
338 339 340 341 342 343 344 345 346
			/*
			 * We now have an IO submission barrier in place, but
			 * AIO can do EOF updates during IO completion and hence
			 * we now need to wait for all of them to drain. Non-AIO
			 * DIO will have drained before we are given the
			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
			 * no-op.
			 */
			inode_dio_wait(inode);
347
			drained_dio = true;
348 349
			goto restart;
		}
C
Christoph Hellwig 已提交
350 351 352 353
	
		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
		error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
				NULL, &xfs_iomap_ops);
354 355
		if (error)
			return error;
356 357
	} else
		spin_unlock(&ip->i_flags_lock);
358

C
Christoph Hellwig 已提交
359 360 361 362 363 364
	/*
	 * Updating the timestamps will grab the ilock again from
	 * xfs_fs_dirty_inode, so we have to call it after dropping the
	 * lock above.  Eventually we should look into a way to avoid
	 * the pointless lock roundtrip.
	 */
A
Amir Goldstein 已提交
365
	return file_modified(file);
366 367
}

C
Christoph Hellwig 已提交
368 369 370 371 372 373 374 375 376
static int
xfs_dio_write_end_io(
	struct kiocb		*iocb,
	ssize_t			size,
	unsigned		flags)
{
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_inode	*ip = XFS_I(inode);
	loff_t			offset = iocb->ki_pos;
C
Christoph Hellwig 已提交
377
	unsigned int		nofs_flag;
C
Christoph Hellwig 已提交
378 379 380 381 382 383 384 385 386 387
	int			error = 0;

	trace_xfs_end_io_direct_write(ip, offset, size);

	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;

	if (size <= 0)
		return size;

388 389 390 391 392 393
	/*
	 * Capture amount written on completion as we can't reliably account
	 * for it on submission.
	 */
	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);

C
Christoph Hellwig 已提交
394 395 396 397 398 399 400
	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

401 402 403
	if (flags & IOMAP_DIO_COW) {
		error = xfs_reflink_end_cow(ip, offset, size);
		if (error)
C
Christoph Hellwig 已提交
404
			goto out;
405 406 407 408 409 410 411 412
	}

	/*
	 * Unwritten conversion updates the in-core isize after extent
	 * conversion but before updating the on-disk size. Updating isize any
	 * earlier allows a racing dio read to find unwritten extents before
	 * they are converted.
	 */
C
Christoph Hellwig 已提交
413 414 415 416
	if (flags & IOMAP_DIO_UNWRITTEN) {
		error = xfs_iomap_write_unwritten(ip, offset, size, true);
		goto out;
	}
417

C
Christoph Hellwig 已提交
418 419 420 421 422 423 424 425 426 427 428 429 430 431
	/*
	 * We need to update the in-core inode size here so that we don't end up
	 * with the on-disk inode size being outside the in-core inode size. We
	 * have no other method of updating EOF for AIO, so always do it here
	 * if necessary.
	 *
	 * We need to lock the test/set EOF update as we can be racing with
	 * other IO completions here to update the EOF. Failing to serialise
	 * here can result in EOF moving backwards and Bad Things Happen when
	 * that occurs.
	 */
	spin_lock(&ip->i_flags_lock);
	if (offset + size > i_size_read(inode)) {
		i_size_write(inode, offset + size);
432
		spin_unlock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
433
		error = xfs_setfilesize(ip, offset, size);
434 435 436
	} else {
		spin_unlock(&ip->i_flags_lock);
	}
C
Christoph Hellwig 已提交
437

C
Christoph Hellwig 已提交
438 439
out:
	memalloc_nofs_restore(nofs_flag);
C
Christoph Hellwig 已提交
440 441 442
	return error;
}

443 444 445 446
/*
 * xfs_file_dio_aio_write - handle direct IO writes
 *
 * Lock the inode appropriately to prepare for and issue a direct IO write.
447
 * By separating it from the buffered write path we remove all the tricky to
448 449
 * follow locking changes and looping.
 *
450 451 452 453 454 455 456 457 458 459 460 461 462
 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 * pages are flushed out.
 *
 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 * allowing them to be done in parallel with reads and other direct IO writes.
 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 * needs to do sub-block zeroing and that requires serialisation against other
 * direct IOs to the same block. In this case we need to serialise the
 * submission of the unaligned IOs so that we don't get racing block zeroing in
 * the dio layer.  To avoid the problem with aio, we also need to wait for
 * outstanding IOs to complete so that unwritten extent conversion is completed
 * before we try to map the overlapping block. This is currently implemented by
C
Christoph Hellwig 已提交
463
 * hitting it with a big hammer (i.e. inode_dio_wait()).
464
 *
465 466 467 468 469 470
 * Returns with locks held indicated by @iolock and errors indicated by
 * negative return values.
 */
STATIC ssize_t
xfs_file_dio_aio_write(
	struct kiocb		*iocb,
471
	struct iov_iter		*from)
472 473 474 475 476 477 478
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	ssize_t			ret = 0;
479
	int			unaligned_io = 0;
480
	int			iolock;
481
	size_t			count = iov_iter_count(from);
C
Christoph Hellwig 已提交
482
	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
483 484
					mp->m_rtdev_targp : mp->m_ddev_targp;

485
	/* DIO must be aligned to device logical sector size */
486
	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
E
Eric Sandeen 已提交
487
		return -EINVAL;
488

489
	/*
490 491 492 493 494
	 * Don't take the exclusive iolock here unless the I/O is unaligned to
	 * the file system block size.  We don't need to consider the EOF
	 * extension case here because xfs_file_aio_write_checks() will relock
	 * the inode as necessary for EOF zeroing cases and fill out the new
	 * inode size as appropriate.
495
	 */
496 497 498
	if ((iocb->ki_pos & mp->m_blockmask) ||
	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
		unaligned_io = 1;
499 500 501 502 503

		/*
		 * We can't properly handle unaligned direct I/O to reflink
		 * files yet, as we can't unshare a partial block.
		 */
504
		if (xfs_is_cow_inode(ip)) {
505 506 507
			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
			return -EREMCHG;
		}
508
		iolock = XFS_IOLOCK_EXCL;
509
	} else {
510
		iolock = XFS_IOLOCK_SHARED;
511
	}
512

C
Christoph Hellwig 已提交
513
	if (iocb->ki_flags & IOCB_NOWAIT) {
514 515 516
		/* unaligned dio always waits, bail */
		if (unaligned_io)
			return -EAGAIN;
C
Christoph Hellwig 已提交
517
		if (!xfs_ilock_nowait(ip, iolock))
G
Goldwyn Rodrigues 已提交
518
			return -EAGAIN;
C
Christoph Hellwig 已提交
519
	} else {
G
Goldwyn Rodrigues 已提交
520 521
		xfs_ilock(ip, iolock);
	}
522

523
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
524
	if (ret)
525
		goto out;
526
	count = iov_iter_count(from);
527

528
	/*
529 530 531 532 533
	 * If we are doing unaligned IO, we can't allow any other overlapping IO
	 * in-flight at the same time or we risk data corruption. Wait for all
	 * other IO to drain before we submit. If the IO is aligned, demote the
	 * iolock if we had to take the exclusive lock in
	 * xfs_file_aio_write_checks() for other reasons.
534
	 */
G
Goldwyn Rodrigues 已提交
535
	if (unaligned_io) {
536
		inode_dio_wait(inode);
G
Goldwyn Rodrigues 已提交
537
	} else if (iolock == XFS_IOLOCK_EXCL) {
538
		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
539
		iolock = XFS_IOLOCK_SHARED;
540 541
	}

C
Christoph Hellwig 已提交
542
	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
C
Christoph Hellwig 已提交
543
	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
544 545 546 547 548 549 550 551

	/*
	 * If unaligned, this is the only IO in-flight. If it has not yet
	 * completed, wait on it before we release the iolock to prevent
	 * subsequent overlapping IO.
	 */
	if (ret == -EIOCBQUEUED && unaligned_io)
		inode_dio_wait(inode);
552
out:
553
	xfs_iunlock(ip, iolock);
554

555
	/*
556 557
	 * No fallback to buffered IO on errors for XFS, direct IO will either
	 * complete fully or fail.
558
	 */
559 560 561 562
	ASSERT(ret < 0 || ret == count);
	return ret;
}

563
static noinline ssize_t
564 565 566 567
xfs_file_dax_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
568
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
569
	struct xfs_inode	*ip = XFS_I(inode);
570
	int			iolock = XFS_IOLOCK_EXCL;
571 572 573
	ssize_t			ret, error = 0;
	size_t			count;
	loff_t			pos;
574

C
Christoph Hellwig 已提交
575 576
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, iolock))
G
Goldwyn Rodrigues 已提交
577
			return -EAGAIN;
C
Christoph Hellwig 已提交
578
	} else {
G
Goldwyn Rodrigues 已提交
579 580 581
		xfs_ilock(ip, iolock);
	}

582 583 584 585
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
	if (ret)
		goto out;

586 587
	pos = iocb->ki_pos;
	count = iov_iter_count(from);
588

589
	trace_xfs_file_dax_write(ip, count, pos);
590
	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
591 592 593
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
		i_size_write(inode, iocb->ki_pos);
		error = xfs_setfilesize(ip, pos, ret);
594 595
	}
out:
596
	xfs_iunlock(ip, iolock);
597 598 599 600 601 602 603 604 605 606
	if (error)
		return error;

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);

		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
	return ret;
607 608
}

609
STATIC ssize_t
610
xfs_file_buffered_aio_write(
611
	struct kiocb		*iocb,
612
	struct iov_iter		*from)
613 614 615 616
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
617
	struct xfs_inode	*ip = XFS_I(inode);
618 619
	ssize_t			ret;
	int			enospc = 0;
620
	int			iolock;
621

622 623 624
	if (iocb->ki_flags & IOCB_NOWAIT)
		return -EOPNOTSUPP;

625 626
write_retry:
	iolock = XFS_IOLOCK_EXCL;
627
	xfs_ilock(ip, iolock);
628

629
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
630
	if (ret)
631
		goto out;
632 633

	/* We can write back this queue in page reclaim */
634
	current->backing_dev_info = inode_to_bdi(inode);
635

C
Christoph Hellwig 已提交
636
	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
637
	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
638
	if (likely(ret >= 0))
639
		iocb->ki_pos += ret;
640

641
	/*
642 643 644 645 646 647 648
	 * If we hit a space limit, try to free up some lingering preallocated
	 * space before returning an error. In the case of ENOSPC, first try to
	 * write back all dirty inodes to free up some of the excess reserved
	 * metadata space. This reduces the chances that the eofblocks scan
	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
	 * also behaves as a filter to prevent too many eofblocks scans from
	 * running at the same time.
649
	 */
650
	if (ret == -EDQUOT && !enospc) {
651
		xfs_iunlock(ip, iolock);
652 653 654
		enospc = xfs_inode_free_quota_eofblocks(ip);
		if (enospc)
			goto write_retry;
655 656 657
		enospc = xfs_inode_free_quota_cowblocks(ip);
		if (enospc)
			goto write_retry;
658
		iolock = 0;
659 660 661
	} else if (ret == -ENOSPC && !enospc) {
		struct xfs_eofblocks eofb = {0};

662
		enospc = 1;
D
Dave Chinner 已提交
663
		xfs_flush_inodes(ip->i_mount);
664 665

		xfs_iunlock(ip, iolock);
666 667
		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
668
		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
D
Dave Chinner 已提交
669
		goto write_retry;
670
	}
671

672
	current->backing_dev_info = NULL;
673
out:
674 675
	if (iolock)
		xfs_iunlock(ip, iolock);
676 677 678 679 680 681

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
682 683 684 685
	return ret;
}

STATIC ssize_t
A
Al Viro 已提交
686
xfs_file_write_iter(
687
	struct kiocb		*iocb,
A
Al Viro 已提交
688
	struct iov_iter		*from)
689 690 691 692 693 694
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	ssize_t			ret;
A
Al Viro 已提交
695
	size_t			ocount = iov_iter_count(from);
696

697
	XFS_STATS_INC(ip->i_mount, xs_write_calls);
698 699 700 701

	if (ocount == 0)
		return 0;

A
Al Viro 已提交
702 703
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;
704

705
	if (IS_DAX(inode))
706 707 708
		return xfs_file_dax_write(iocb, from);

	if (iocb->ki_flags & IOCB_DIRECT) {
709 710 711 712 713 714
		/*
		 * Allow a directio write to fall back to a buffered
		 * write *only* in the case that we're doing a reflink
		 * CoW.  In all other directio scenarios we do not
		 * allow an operation to fall back to buffered mode.
		 */
A
Al Viro 已提交
715
		ret = xfs_file_dio_aio_write(iocb, from);
716 717
		if (ret != -EREMCHG)
			return ret;
718
	}
719

720
	return xfs_file_buffered_aio_write(iocb, from);
721 722
}

723 724
static void
xfs_wait_dax_page(
725
	struct inode		*inode)
726 727 728 729 730 731 732 733 734 735 736
{
	struct xfs_inode        *ip = XFS_I(inode);

	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
	schedule();
	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
}

static int
xfs_break_dax_layouts(
	struct inode		*inode,
737
	bool			*retry)
738 739 740 741 742 743 744 745 746
{
	struct page		*page;

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));

	page = dax_layout_busy_page(inode->i_mapping);
	if (!page)
		return 0;

747
	*retry = true;
748 749
	return ___wait_var_event(&page->_refcount,
			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
750
			0, 0, xfs_wait_dax_page(inode));
751 752
}

753 754 755 756 757 758 759
int
xfs_break_layouts(
	struct inode		*inode,
	uint			*iolock,
	enum layout_break_reason reason)
{
	bool			retry;
760
	int			error;
761 762 763

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));

764 765 766 767
	do {
		retry = false;
		switch (reason) {
		case BREAK_UNMAP:
768
			error = xfs_break_dax_layouts(inode, &retry);
769 770 771 772 773 774 775 776 777 778 779 780 781
			if (error || retry)
				break;
			/* fall through */
		case BREAK_WRITE:
			error = xfs_break_leased_layouts(inode, iolock, &retry);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EINVAL;
		}
	} while (error == 0 && retry);

	return error;
782 783
}

784 785 786
#define	XFS_FALLOC_FL_SUPPORTED						\
		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
787
		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
788

789 790
STATIC long
xfs_file_fallocate(
791 792 793 794
	struct file		*file,
	int			mode,
	loff_t			offset,
	loff_t			len)
795
{
796 797 798
	struct inode		*inode = file_inode(file);
	struct xfs_inode	*ip = XFS_I(inode);
	long			error;
799
	enum xfs_prealloc_flags	flags = 0;
800
	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
801
	loff_t			new_size = 0;
802
	bool			do_file_insert = false;
803

804 805
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;
806
	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
807 808
		return -EOPNOTSUPP;

809
	xfs_ilock(ip, iolock);
810
	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
811 812 813
	if (error)
		goto out_unlock;

814 815 816 817
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		error = xfs_free_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
818
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
F
Fabian Frederick 已提交
819
		unsigned int blksize_mask = i_blocksize(inode) - 1;
820 821

		if (offset & blksize_mask || len & blksize_mask) {
D
Dave Chinner 已提交
822
			error = -EINVAL;
823 824 825
			goto out_unlock;
		}

826 827 828 829 830
		/*
		 * There is no need to overlap collapse range with EOF,
		 * in which case it is effectively a truncate operation
		 */
		if (offset + len >= i_size_read(inode)) {
D
Dave Chinner 已提交
831
			error = -EINVAL;
832 833 834
			goto out_unlock;
		}

835 836 837 838 839
		new_size = i_size_read(inode) - len;

		error = xfs_collapse_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
840
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
841 842
		unsigned int	blksize_mask = i_blocksize(inode) - 1;
		loff_t		isize = i_size_read(inode);
843 844 845 846 847 848

		if (offset & blksize_mask || len & blksize_mask) {
			error = -EINVAL;
			goto out_unlock;
		}

849 850 851 852 853
		/*
		 * New inode size must not exceed ->s_maxbytes, accounting for
		 * possible signed overflow.
		 */
		if (inode->i_sb->s_maxbytes - isize < len) {
854 855 856
			error = -EFBIG;
			goto out_unlock;
		}
857
		new_size = isize + len;
858 859

		/* Offset should be less than i_size */
860
		if (offset >= isize) {
861 862 863
			error = -EINVAL;
			goto out_unlock;
		}
864
		do_file_insert = true;
865
	} else {
866 867
		flags |= XFS_PREALLOC_SET;

868 869 870
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
		    offset + len > i_size_read(inode)) {
			new_size = offset + len;
D
Dave Chinner 已提交
871
			error = inode_newsize_ok(inode, new_size);
872 873 874
			if (error)
				goto out_unlock;
		}
875

876
		if (mode & FALLOC_FL_ZERO_RANGE) {
877
			error = xfs_zero_file_space(ip, offset, len);
878 879 880 881 882 883 884 885
		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
			error = xfs_reflink_unshare(ip, offset, len);
			if (error)
				goto out_unlock;

			if (!xfs_is_always_cow_inode(ip)) {
				error = xfs_alloc_file_space(ip, offset, len,
						XFS_BMAPI_PREALLOC);
886
			}
887 888 889 890 891 892 893 894 895 896
		} else {
			/*
			 * If always_cow mode we can't use preallocations and
			 * thus should not create them.
			 */
			if (xfs_is_always_cow_inode(ip)) {
				error = -EOPNOTSUPP;
				goto out_unlock;
			}

897 898
			error = xfs_alloc_file_space(ip, offset, len,
						     XFS_BMAPI_PREALLOC);
899
		}
900 901 902 903
		if (error)
			goto out_unlock;
	}

904
	if (file->f_flags & O_DSYNC)
905 906 907
		flags |= XFS_PREALLOC_SYNC;

	error = xfs_update_prealloc_flags(ip, flags);
908 909 910 911 912 913 914 915 916
	if (error)
		goto out_unlock;

	/* Change file size if needed */
	if (new_size) {
		struct iattr iattr;

		iattr.ia_valid = ATTR_SIZE;
		iattr.ia_size = new_size;
917
		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
918 919
		if (error)
			goto out_unlock;
920 921
	}

922 923 924 925 926 927 928 929 930
	/*
	 * Perform hole insertion now that the file size has been
	 * updated so that if we crash during the operation we don't
	 * leave shifted extents past EOF and hence losing access to
	 * the data that is contained within them.
	 */
	if (do_file_insert)
		error = xfs_insert_file_space(ip, offset, len);

931
out_unlock:
932
	xfs_iunlock(ip, iolock);
D
Dave Chinner 已提交
933
	return error;
934 935
}

936

937
STATIC loff_t
938
xfs_file_remap_range(
939 940 941 942 943 944
	struct file		*file_in,
	loff_t			pos_in,
	struct file		*file_out,
	loff_t			pos_out,
	loff_t			len,
	unsigned int		remap_flags)
945
{
946 947 948 949 950 951 952 953 954
	struct inode		*inode_in = file_inode(file_in);
	struct xfs_inode	*src = XFS_I(inode_in);
	struct inode		*inode_out = file_inode(file_out);
	struct xfs_inode	*dest = XFS_I(inode_out);
	struct xfs_mount	*mp = src->i_mount;
	loff_t			remapped = 0;
	xfs_extlen_t		cowextsize;
	int			ret;

955 956
	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
		return -EINVAL;
957

958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
	if (!xfs_sb_version_hasreflink(&mp->m_sb))
		return -EOPNOTSUPP;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	/* Prepare and then clone file data. */
	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
			&len, remap_flags);
	if (ret < 0 || len == 0)
		return ret;

	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);

	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
			&remapped);
	if (ret)
		goto out_unlock;

	/*
	 * Carry the cowextsize hint from src to dest if we're sharing the
	 * entire source file to the entire destination file, the source file
	 * has a cowextsize hint, and the destination file does not.
	 */
	cowextsize = 0;
	if (pos_in == 0 && len == i_size_read(inode_in) &&
	    (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
	    pos_out == 0 && len >= i_size_read(inode_out) &&
	    !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
		cowextsize = src->i_d.di_cowextsize;

	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
			remap_flags);

out_unlock:
	xfs_reflink_remap_unlock(file_in, file_out);
	if (ret)
		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
	return remapped > 0 ? remapped : ret;
997
}
998

L
Linus Torvalds 已提交
999
STATIC int
1000
xfs_file_open(
L
Linus Torvalds 已提交
1001
	struct inode	*inode,
1002
	struct file	*file)
L
Linus Torvalds 已提交
1003
{
1004
	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
L
Linus Torvalds 已提交
1005
		return -EFBIG;
1006 1007
	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
		return -EIO;
1008
	file->f_mode |= FMODE_NOWAIT;
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	return 0;
}

STATIC int
xfs_dir_open(
	struct inode	*inode,
	struct file	*file)
{
	struct xfs_inode *ip = XFS_I(inode);
	int		mode;
	int		error;

	error = xfs_file_open(inode, file);
	if (error)
		return error;

	/*
	 * If there are any blocks, read-ahead block 0 as we're almost
	 * certain to have the next operation be a read there.
	 */
1029
	mode = xfs_ilock_data_map_shared(ip);
1030
	if (ip->i_d.di_nextents > 0)
1031
		error = xfs_dir3_data_readahead(ip, 0, -1);
1032
	xfs_iunlock(ip, mode);
1033
	return error;
L
Linus Torvalds 已提交
1034 1035 1036
}

STATIC int
1037
xfs_file_release(
L
Linus Torvalds 已提交
1038 1039 1040
	struct inode	*inode,
	struct file	*filp)
{
D
Dave Chinner 已提交
1041
	return xfs_release(XFS_I(inode));
L
Linus Torvalds 已提交
1042 1043 1044
}

STATIC int
1045
xfs_file_readdir(
A
Al Viro 已提交
1046 1047
	struct file	*file,
	struct dir_context *ctx)
L
Linus Torvalds 已提交
1048
{
A
Al Viro 已提交
1049
	struct inode	*inode = file_inode(file);
1050
	xfs_inode_t	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	size_t		bufsize;

	/*
	 * The Linux API doesn't pass down the total size of the buffer
	 * we read into down to the filesystem.  With the filldir concept
	 * it's not needed for correct information, but the XFS dir2 leaf
	 * code wants an estimate of the buffer size to calculate it's
	 * readahead window and size the buffers used for mapping to
	 * physical blocks.
	 *
	 * Try to give it an estimate that's good enough, maybe at some
	 * point we can change the ->readdir prototype to include the
E
Eric Sandeen 已提交
1063
	 * buffer size.  For now we use the current glibc buffer size.
C
Christoph Hellwig 已提交
1064
	 */
D
Darrick J. Wong 已提交
1065
	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
C
Christoph Hellwig 已提交
1066

1067
	return xfs_readdir(NULL, ip, ctx, bufsize);
1068 1069 1070 1071 1072 1073
}

STATIC loff_t
xfs_file_llseek(
	struct file	*file,
	loff_t		offset,
1074
	int		whence)
1075
{
1076 1077 1078 1079 1080
	struct inode		*inode = file->f_mapping->host;

	if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
		return -EIO;

1081
	switch (whence) {
1082
	default:
1083
		return generic_file_llseek(file, offset, whence);
1084
	case SEEK_HOLE:
1085
		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1086
		break;
1087
	case SEEK_DATA:
1088
		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1089
		break;
1090
	}
1091 1092 1093 1094

	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1095 1096
}

1097 1098 1099 1100 1101
/*
 * Locking for serialisation of IO during page faults. This results in a lock
 * ordering of:
 *
 * mmap_sem (MM)
1102
 *   sb_start_pagefault(vfs, freeze)
1103
 *     i_mmaplock (XFS - truncate serialisation)
1104 1105
 *       page_lock (MM)
 *         i_lock (XFS - extent map serialisation)
1106
 */
1107
static vm_fault_t
1108 1109 1110 1111
__xfs_filemap_fault(
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size,
	bool			write_fault)
1112
{
1113
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1114
	struct xfs_inode	*ip = XFS_I(inode);
1115
	vm_fault_t		ret;
1116

1117
	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1118

1119 1120 1121 1122
	if (write_fault) {
		sb_start_pagefault(inode->i_sb);
		file_update_time(vmf->vma->vm_file);
	}
1123

1124
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1125
	if (IS_DAX(inode)) {
1126 1127
		pfn_t pfn;

1128
		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
1129 1130
		if (ret & VM_FAULT_NEEDDSYNC)
			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1131
	} else {
1132 1133 1134 1135
		if (write_fault)
			ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
		else
			ret = filemap_fault(vmf);
1136 1137 1138
	}
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);

1139 1140
	if (write_fault)
		sb_end_pagefault(inode->i_sb);
1141
	return ret;
1142 1143
}

1144
static vm_fault_t
1145
xfs_filemap_fault(
1146 1147
	struct vm_fault		*vmf)
{
1148
	/* DAX can shortcut the normal fault path on write faults! */
1149 1150 1151
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
			IS_DAX(file_inode(vmf->vma->vm_file)) &&
			(vmf->flags & FAULT_FLAG_WRITE));
1152 1153
}

1154
static vm_fault_t
1155
xfs_filemap_huge_fault(
1156 1157
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size)
M
Matthew Wilcox 已提交
1158
{
1159
	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
M
Matthew Wilcox 已提交
1160 1161
		return VM_FAULT_FALLBACK;

1162 1163 1164 1165
	/* DAX can shortcut the normal fault path on write faults! */
	return __xfs_filemap_fault(vmf, pe_size,
			(vmf->flags & FAULT_FLAG_WRITE));
}
M
Matthew Wilcox 已提交
1166

1167
static vm_fault_t
1168 1169 1170 1171
xfs_filemap_page_mkwrite(
	struct vm_fault		*vmf)
{
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1172 1173
}

1174
/*
1175 1176 1177
 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
 * on write faults. In reality, it needs to serialise against truncate and
 * prepare memory for writing so handle is as standard write fault.
1178
 */
1179
static vm_fault_t
1180 1181 1182 1183
xfs_filemap_pfn_mkwrite(
	struct vm_fault		*vmf)
{

1184
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1185 1186
}

1187 1188
static const struct vm_operations_struct xfs_file_vm_ops = {
	.fault		= xfs_filemap_fault,
1189
	.huge_fault	= xfs_filemap_huge_fault,
1190 1191
	.map_pages	= filemap_map_pages,
	.page_mkwrite	= xfs_filemap_page_mkwrite,
1192
	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1193 1194 1195 1196 1197 1198 1199
};

STATIC int
xfs_file_mmap(
	struct file	*filp,
	struct vm_area_struct *vma)
{
1200 1201 1202
	struct dax_device 	*dax_dev;

	dax_dev = xfs_find_daxdev_for_inode(file_inode(filp));
1203
	/*
1204 1205
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
1206
	 */
1207
	if (!daxdev_mapping_supported(vma, dax_dev))
1208 1209
		return -EOPNOTSUPP;

1210 1211 1212
	file_accessed(filp);
	vma->vm_ops = &xfs_file_vm_ops;
	if (IS_DAX(file_inode(filp)))
1213
		vma->vm_flags |= VM_HUGEPAGE;
1214
	return 0;
1215 1216
}

1217
const struct file_operations xfs_file_operations = {
1218
	.llseek		= xfs_file_llseek,
A
Al Viro 已提交
1219
	.read_iter	= xfs_file_read_iter,
A
Al Viro 已提交
1220
	.write_iter	= xfs_file_write_iter,
1221
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
1222
	.splice_write	= iter_file_splice_write,
1223
	.iopoll		= iomap_dio_iopoll,
1224
	.unlocked_ioctl	= xfs_file_ioctl,
L
Linus Torvalds 已提交
1225
#ifdef CONFIG_COMPAT
1226
	.compat_ioctl	= xfs_file_compat_ioctl,
L
Linus Torvalds 已提交
1227
#endif
1228
	.mmap		= xfs_file_mmap,
1229
	.mmap_supported_flags = MAP_SYNC,
1230 1231 1232
	.open		= xfs_file_open,
	.release	= xfs_file_release,
	.fsync		= xfs_file_fsync,
1233
	.get_unmapped_area = thp_get_unmapped_area,
1234
	.fallocate	= xfs_file_fallocate,
1235
	.remap_file_range = xfs_file_remap_range,
L
Linus Torvalds 已提交
1236 1237
};

1238
const struct file_operations xfs_dir_file_operations = {
1239
	.open		= xfs_dir_open,
L
Linus Torvalds 已提交
1240
	.read		= generic_read_dir,
1241
	.iterate_shared	= xfs_file_readdir,
1242
	.llseek		= generic_file_llseek,
1243
	.unlocked_ioctl	= xfs_file_ioctl,
1244
#ifdef CONFIG_COMPAT
1245
	.compat_ioctl	= xfs_file_compat_ioctl,
1246
#endif
1247
	.fsync		= xfs_dir_fsync,
L
Linus Torvalds 已提交
1248
};