xfs_file.c 37.3 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3 4
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
5 6
 */
#include "xfs.h"
7
#include "xfs_fs.h"
8
#include "xfs_shared.h"
9
#include "xfs_format.h"
10 11
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
12 13
#include "xfs_mount.h"
#include "xfs_inode.h"
14
#include "xfs_trans.h"
15
#include "xfs_inode_item.h"
16
#include "xfs_bmap.h"
D
Dave Chinner 已提交
17
#include "xfs_bmap_util.h"
18
#include "xfs_dir2.h"
D
Dave Chinner 已提交
19
#include "xfs_dir2_priv.h"
20
#include "xfs_ioctl.h"
21
#include "xfs_trace.h"
22
#include "xfs_log.h"
23
#include "xfs_icache.h"
24
#include "xfs_pnfs.h"
25
#include "xfs_iomap.h"
26
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
27

28
#include <linux/falloc.h>
29
#include <linux/backing-dev.h>
30
#include <linux/mman.h>
31
#include <linux/fadvise.h>
C
Christoph Hellwig 已提交
32
#include <linux/mount.h>
L
Linus Torvalds 已提交
33

34
static const struct vm_operations_struct xfs_file_vm_ops;
L
Linus Torvalds 已提交
35

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
/*
 * Decide if the given file range is aligned to the size of the fundamental
 * allocation unit for the file.
 */
static bool
xfs_is_falloc_aligned(
	struct xfs_inode	*ip,
	loff_t			pos,
	long long int		len)
{
	struct xfs_mount	*mp = ip->i_mount;
	uint64_t		mask;

	if (XFS_IS_REALTIME_INODE(ip)) {
		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
			u64	rextbytes;
			u32	mod;

			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
			div_u64_rem(pos, rextbytes, &mod);
			if (mod)
				return false;
			div_u64_rem(len, rextbytes, &mod);
			return mod == 0;
		}
		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
	} else {
		mask = mp->m_sb.sb_blocksize - 1;
	}

	return !((pos | len) & mask);
}

69 70 71 72 73 74 75 76
int
xfs_update_prealloc_flags(
	struct xfs_inode	*ip,
	enum xfs_prealloc_flags	flags)
{
	struct xfs_trans	*tp;
	int			error;

77 78 79
	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
			0, 0, 0, &tp);
	if (error)
80 81 82 83 84 85
		return error;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);

	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
D
Dave Chinner 已提交
86 87 88
		VFS_I(ip)->i_mode &= ~S_ISUID;
		if (VFS_I(ip)->i_mode & S_IXGRP)
			VFS_I(ip)->i_mode &= ~S_ISGID;
89 90 91 92
		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	}

	if (flags & XFS_PREALLOC_SET)
93
		ip->i_diflags |= XFS_DIFLAG_PREALLOC;
94
	if (flags & XFS_PREALLOC_CLEAR)
95
		ip->i_diflags &= ~XFS_DIFLAG_PREALLOC;
96 97 98 99

	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
	if (flags & XFS_PREALLOC_SYNC)
		xfs_trans_set_sync(tp);
100
	return xfs_trans_commit(tp);
101 102
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
/*
 * Fsync operations on directories are much simpler than on regular files,
 * as there is no file data to flush, and thus also no need for explicit
 * cache flush operations, and there are no non-transaction metadata updates
 * on directories either.
 */
STATIC int
xfs_dir_fsync(
	struct file		*file,
	loff_t			start,
	loff_t			end,
	int			datasync)
{
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);

	trace_xfs_dir_fsync(ip);
119
	return xfs_log_force_inode(ip);
120 121
}

C
Christoph Hellwig 已提交
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
static xfs_lsn_t
xfs_fsync_lsn(
	struct xfs_inode	*ip,
	bool			datasync)
{
	if (!xfs_ipincount(ip))
		return 0;
	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
		return 0;
	return ip->i_itemp->ili_last_lsn;
}

/*
 * All metadata updates are logged, which means that we just have to flush the
 * log up to the latest LSN that touched the inode.
 *
 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
 * the log force before we clear the ili_fsync_fields field. This ensures that
 * we don't get a racing sync operation that does not wait for the metadata to
 * hit the journal before returning.  If we race with clearing ili_fsync_fields,
 * then all that will happen is the log force will do nothing as the lsn will
 * already be on disk.  We can't race with setting ili_fsync_fields because that
 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
 * shared until after the ili_fsync_fields is cleared.
 */
static  int
xfs_fsync_flush_log(
	struct xfs_inode	*ip,
	bool			datasync,
	int			*log_flushed)
{
	int			error = 0;
	xfs_lsn_t		lsn;

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	lsn = xfs_fsync_lsn(ip, datasync);
	if (lsn) {
		error = xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC,
					  log_flushed);

		spin_lock(&ip->i_itemp->ili_lock);
		ip->i_itemp->ili_fsync_fields = 0;
		spin_unlock(&ip->i_itemp->ili_lock);
	}
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	return error;
}

170 171 172
STATIC int
xfs_file_fsync(
	struct file		*file,
173 174
	loff_t			start,
	loff_t			end,
175 176
	int			datasync)
{
C
Christoph Hellwig 已提交
177
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
178
	struct xfs_mount	*mp = ip->i_mount;
179 180 181
	int			error = 0;
	int			log_flushed = 0;

C
Christoph Hellwig 已提交
182
	trace_xfs_file_fsync(ip);
183

184
	error = file_write_and_wait_range(file, start, end);
185 186 187
	if (error)
		return error;

188
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
189
		return -EIO;
190 191 192

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

193 194 195 196 197 198 199 200 201 202
	/*
	 * If we have an RT and/or log subvolume we need to make sure to flush
	 * the write cache the device used for file data first.  This is to
	 * ensure newly written file data make it to disk before logging the new
	 * inode size in case of an extending write.
	 */
	if (XFS_IS_REALTIME_INODE(ip))
		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
	else if (mp->m_logdev_targp != mp->m_ddev_targp)
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
203

204
	/*
205 206 207 208
	 * Any inode that has dirty modifications in the log is pinned.  The
	 * racy check here for a pinned inode while not catch modifications
	 * that happen concurrently to the fsync call, but fsync semantics
	 * only require to sync previously completed I/O.
209
	 */
210 211
	if (xfs_ipincount(ip))
		error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
212

213 214 215 216 217 218 219
	/*
	 * If we only have a single device, and the log force about was
	 * a no-op we might have to flush the data device cache here.
	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
220 221
	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
	    mp->m_logdev_targp == mp->m_ddev_targp)
222
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
223

D
Dave Chinner 已提交
224
	return error;
225 226
}

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
static int
xfs_ilock_iocb(
	struct kiocb		*iocb,
	unsigned int		lock_mode)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));

	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!xfs_ilock_nowait(ip, lock_mode))
			return -EAGAIN;
	} else {
		xfs_ilock(ip, lock_mode);
	}

	return 0;
}

244
STATIC ssize_t
245
xfs_file_dio_read(
246
	struct kiocb		*iocb,
A
Al Viro 已提交
247
	struct iov_iter		*to)
248
{
C
Christoph Hellwig 已提交
249 250
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;
251

252
	trace_xfs_file_direct_read(iocb, to);
253

254
	if (!iov_iter_count(to))
255
		return 0; /* skip atime */
256

257 258
	file_accessed(iocb->ki_filp);

259 260 261
	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
	if (ret)
		return ret;
262
	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0);
263
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
264

265 266 267
	return ret;
}

268
static noinline ssize_t
269 270 271 272
xfs_file_dax_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
273
	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
274 275
	ssize_t			ret = 0;

276
	trace_xfs_file_dax_read(iocb, to);
277

278
	if (!iov_iter_count(to))
279 280
		return 0; /* skip atime */

281 282 283
	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
	if (ret)
		return ret;
284
	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
285
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
286

287
	file_accessed(iocb->ki_filp);
288 289 290 291
	return ret;
}

STATIC ssize_t
292
xfs_file_buffered_read(
293 294 295 296 297 298
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;

299
	trace_xfs_file_buffered_read(iocb, to);
300

301 302 303
	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
	if (ret)
		return ret;
A
Al Viro 已提交
304
	ret = generic_file_read_iter(iocb, to);
305
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
306 307 308 309 310 311 312 313 314

	return ret;
}

STATIC ssize_t
xfs_file_read_iter(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
315 316
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
317 318 319 320 321 322 323
	ssize_t			ret = 0;

	XFS_STATS_INC(mp, xs_read_calls);

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

324 325 326
	if (IS_DAX(inode))
		ret = xfs_file_dax_read(iocb, to);
	else if (iocb->ki_flags & IOCB_DIRECT)
327
		ret = xfs_file_dio_read(iocb, to);
C
Christoph Hellwig 已提交
328
	else
329
		ret = xfs_file_buffered_read(iocb, to);
330 331

	if (ret > 0)
332
		XFS_STATS_ADD(mp, xs_read_bytes, ret);
333 334 335
	return ret;
}

336 337 338
/*
 * Common pre-write limit and setup checks.
 *
339 340 341
 * Called with the iolocked held either shared and exclusive according to
 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 * if called for a direct write beyond i_size.
342 343
 */
STATIC ssize_t
344
xfs_file_write_checks(
345 346
	struct kiocb		*iocb,
	struct iov_iter		*from,
347 348
	int			*iolock)
{
349
	struct file		*file = iocb->ki_filp;
350 351
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
352
	ssize_t			error = 0;
353
	size_t			count = iov_iter_count(from);
354
	bool			drained_dio = false;
C
Christoph Hellwig 已提交
355
	loff_t			isize;
356

357
restart:
358 359
	error = generic_write_checks(iocb, from);
	if (error <= 0)
360 361
		return error;

362 363 364 365 366 367 368 369
	if (iocb->ki_flags & IOCB_NOWAIT) {
		error = break_layout(inode, false);
		if (error == -EWOULDBLOCK)
			error = -EAGAIN;
	} else {
		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
	}

370 371 372
	if (error)
		return error;

373 374 375 376
	/*
	 * For changing security info in file_remove_privs() we need i_rwsem
	 * exclusively.
	 */
377
	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
378
		xfs_iunlock(ip, *iolock);
379
		*iolock = XFS_IOLOCK_EXCL;
380 381 382 383 384
		error = xfs_ilock_iocb(iocb, *iolock);
		if (error) {
			*iolock = 0;
			return error;
		}
385 386
		goto restart;
	}
387

388 389 390
	/*
	 * If the offset is beyond the size of the file, we need to zero any
	 * blocks that fall between the existing EOF and the start of this
391 392 393 394 395 396 397 398 399 400 401
	 * write.  If zeroing is needed and we are currently holding the iolock
	 * shared, we need to update it to exclusive which implies having to
	 * redo all checks before.
	 *
	 * We need to serialise against EOF updates that occur in IO completions
	 * here. We want to make sure that nobody is changing the size while we
	 * do this check until we have placed an IO barrier (i.e.  hold the
	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
	 * spinlock effectively forms a memory barrier once we have the
	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
	 * hence be able to correctly determine if we need to run zeroing.
402
	 *
403 404 405 406
	 * We can do an unlocked check here safely as IO completion can only
	 * extend EOF. Truncate is locked out at this point, so the EOF can
	 * not move backwards, only forwards. Hence we only need to take the
	 * slow path and spin locks when we are at or beyond the current EOF.
407
	 */
408 409 410
	if (iocb->ki_pos <= i_size_read(inode))
		goto out;

411
	spin_lock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
412 413
	isize = i_size_read(inode);
	if (iocb->ki_pos > isize) {
414
		spin_unlock(&ip->i_flags_lock);
415 416 417 418

		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;

419 420
		if (!drained_dio) {
			if (*iolock == XFS_IOLOCK_SHARED) {
421
				xfs_iunlock(ip, *iolock);
422
				*iolock = XFS_IOLOCK_EXCL;
423
				xfs_ilock(ip, *iolock);
424 425
				iov_iter_reexpand(from, count);
			}
426 427 428 429 430 431 432 433 434
			/*
			 * We now have an IO submission barrier in place, but
			 * AIO can do EOF updates during IO completion and hence
			 * we now need to wait for all of them to drain. Non-AIO
			 * DIO will have drained before we are given the
			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
			 * no-op.
			 */
			inode_dio_wait(inode);
435
			drained_dio = true;
436 437
			goto restart;
		}
438

C
Christoph Hellwig 已提交
439 440
		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
		error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
441
				NULL, &xfs_buffered_write_iomap_ops);
442 443
		if (error)
			return error;
444 445
	} else
		spin_unlock(&ip->i_flags_lock);
446

447
out:
A
Amir Goldstein 已提交
448
	return file_modified(file);
449 450
}

C
Christoph Hellwig 已提交
451 452 453 454
static int
xfs_dio_write_end_io(
	struct kiocb		*iocb,
	ssize_t			size,
455
	int			error,
C
Christoph Hellwig 已提交
456 457 458 459 460
	unsigned		flags)
{
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_inode	*ip = XFS_I(inode);
	loff_t			offset = iocb->ki_pos;
C
Christoph Hellwig 已提交
461
	unsigned int		nofs_flag;
C
Christoph Hellwig 已提交
462 463 464 465 466 467

	trace_xfs_end_io_direct_write(ip, offset, size);

	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;

468 469 470 471
	if (error)
		return error;
	if (!size)
		return 0;
C
Christoph Hellwig 已提交
472

473 474 475 476 477 478
	/*
	 * Capture amount written on completion as we can't reliably account
	 * for it on submission.
	 */
	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);

C
Christoph Hellwig 已提交
479 480 481 482 483 484 485
	/*
	 * We can allocate memory here while doing writeback on behalf of
	 * memory reclaim.  To avoid memory allocation deadlocks set the
	 * task-wide nofs context for the following operations.
	 */
	nofs_flag = memalloc_nofs_save();

486 487 488
	if (flags & IOMAP_DIO_COW) {
		error = xfs_reflink_end_cow(ip, offset, size);
		if (error)
C
Christoph Hellwig 已提交
489
			goto out;
490 491 492 493 494 495 496 497
	}

	/*
	 * Unwritten conversion updates the in-core isize after extent
	 * conversion but before updating the on-disk size. Updating isize any
	 * earlier allows a racing dio read to find unwritten extents before
	 * they are converted.
	 */
C
Christoph Hellwig 已提交
498 499 500 501
	if (flags & IOMAP_DIO_UNWRITTEN) {
		error = xfs_iomap_write_unwritten(ip, offset, size, true);
		goto out;
	}
502

C
Christoph Hellwig 已提交
503 504 505 506 507 508 509 510 511 512
	/*
	 * We need to update the in-core inode size here so that we don't end up
	 * with the on-disk inode size being outside the in-core inode size. We
	 * have no other method of updating EOF for AIO, so always do it here
	 * if necessary.
	 *
	 * We need to lock the test/set EOF update as we can be racing with
	 * other IO completions here to update the EOF. Failing to serialise
	 * here can result in EOF moving backwards and Bad Things Happen when
	 * that occurs.
513 514 515 516 517 518 519
	 *
	 * As IO completion only ever extends EOF, we can do an unlocked check
	 * here to avoid taking the spinlock. If we land within the current EOF,
	 * then we do not need to do an extending update at all, and we don't
	 * need to take the lock to check this. If we race with an update moving
	 * EOF, then we'll either still be beyond EOF and need to take the lock,
	 * or we'll be within EOF and we don't need to take it at all.
C
Christoph Hellwig 已提交
520
	 */
521 522 523
	if (offset + size <= i_size_read(inode))
		goto out;

C
Christoph Hellwig 已提交
524 525 526
	spin_lock(&ip->i_flags_lock);
	if (offset + size > i_size_read(inode)) {
		i_size_write(inode, offset + size);
527
		spin_unlock(&ip->i_flags_lock);
C
Christoph Hellwig 已提交
528
		error = xfs_setfilesize(ip, offset, size);
529 530 531
	} else {
		spin_unlock(&ip->i_flags_lock);
	}
C
Christoph Hellwig 已提交
532

C
Christoph Hellwig 已提交
533 534
out:
	memalloc_nofs_restore(nofs_flag);
C
Christoph Hellwig 已提交
535 536 537
	return error;
}

538 539 540 541
static const struct iomap_dio_ops xfs_dio_write_ops = {
	.end_io		= xfs_dio_write_end_io,
};

542
/*
543
 * Handle block aligned direct I/O writes
544
 */
545 546 547
static noinline ssize_t
xfs_file_dio_write_aligned(
	struct xfs_inode	*ip,
548
	struct kiocb		*iocb,
549
	struct iov_iter		*from)
550
{
551 552
	int			iolock = XFS_IOLOCK_SHARED;
	ssize_t			ret;
553

554 555 556 557 558 559
	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
	ret = xfs_file_write_checks(iocb, from, &iolock);
	if (ret)
		goto out_unlock;
560

561
	/*
562 563 564
	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
	 * the iolock back to shared if we had to take the exclusive lock in
	 * xfs_file_write_checks() for other reasons.
565
	 */
566 567
	if (iolock == XFS_IOLOCK_EXCL) {
		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
568
		iolock = XFS_IOLOCK_SHARED;
569
	}
570 571 572 573 574 575 576 577
	trace_xfs_file_direct_write(iocb, from);
	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
			   &xfs_dio_write_ops, 0);
out_unlock:
	if (iolock)
		xfs_iunlock(ip, iolock);
	return ret;
}
578

579 580 581 582 583 584 585 586 587
/*
 * Handle block unaligned direct I/O writes
 *
 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
 * them to be done in parallel with reads and other direct I/O writes.  However,
 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
 * to do sub-block zeroing and that requires serialisation against other direct
 * I/O to the same block.  In this case we need to serialise the submission of
 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
588 589
 * In the case where sub-block zeroing is not required, we can do concurrent
 * sub-block dios to the same block successfully.
590
 *
591 592 593 594
 * Optimistically submit the I/O using the shared lock first, but use the
 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
 * if block allocation or partial block zeroing would be required.  In that case
 * we try again with the exclusive lock.
595 596 597 598 599 600 601
 */
static noinline ssize_t
xfs_file_dio_write_unaligned(
	struct xfs_inode	*ip,
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
602 603 604 605
	size_t			isize = i_size_read(VFS_I(ip));
	size_t			count = iov_iter_count(from);
	int			iolock = XFS_IOLOCK_SHARED;
	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
606 607
	ssize_t			ret;

608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
	/*
	 * Extending writes need exclusivity because of the sub-block zeroing
	 * that the DIO code always does for partial tail blocks beyond EOF, so
	 * don't even bother trying the fast path in this case.
	 */
	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
retry_exclusive:
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
		iolock = XFS_IOLOCK_EXCL;
		flags = IOMAP_DIO_FORCE_WAIT;
	}

	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
624 625 626 627 628 629 630 631 632

	/*
	 * We can't properly handle unaligned direct I/O to reflink files yet,
	 * as we can't unshare a partial block.
	 */
	if (xfs_is_cow_inode(ip)) {
		trace_xfs_reflink_bounce_dio_write(iocb, from);
		ret = -ENOTBLK;
		goto out_unlock;
G
Goldwyn Rodrigues 已提交
633
	}
634

635
	ret = xfs_file_write_checks(iocb, from, &iolock);
636
	if (ret)
637
		goto out_unlock;
638

639
	/*
640 641 642 643
	 * If we are doing exclusive unaligned I/O, this must be the only I/O
	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
	 * conversions from the AIO end_io handler.  Wait for all other I/O to
	 * drain first.
644
	 */
645 646
	if (flags & IOMAP_DIO_FORCE_WAIT)
		inode_dio_wait(VFS_I(ip));
647

648
	trace_xfs_file_direct_write(iocb, from);
649
	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
650 651 652 653 654 655 656 657 658 659 660 661 662
			   &xfs_dio_write_ops, flags);

	/*
	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
	 * layer rejected it for mapping or locking reasons. If we are doing
	 * nonblocking user I/O, propagate the error.
	 */
	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
		xfs_iunlock(ip, iolock);
		goto retry_exclusive;
	}

663
out_unlock:
664 665
	if (iolock)
		xfs_iunlock(ip, iolock);
666 667 668
	return ret;
}

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
static ssize_t
xfs_file_dio_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
	size_t			count = iov_iter_count(from);

	/* direct I/O must be aligned to device logical sector size */
	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
		return -EINVAL;
	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
		return xfs_file_dio_write_unaligned(ip, iocb, from);
	return xfs_file_dio_write_aligned(ip, iocb, from);
}

686
static noinline ssize_t
687 688 689 690
xfs_file_dax_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
691
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
692
	struct xfs_inode	*ip = XFS_I(inode);
693
	int			iolock = XFS_IOLOCK_EXCL;
694 695
	ssize_t			ret, error = 0;
	loff_t			pos;
696

697 698 699
	ret = xfs_ilock_iocb(iocb, iolock);
	if (ret)
		return ret;
700
	ret = xfs_file_write_checks(iocb, from, &iolock);
701 702 703
	if (ret)
		goto out;

704
	pos = iocb->ki_pos;
705

706
	trace_xfs_file_dax_write(iocb, from);
707
	ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
708 709 710
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
		i_size_write(inode, iocb->ki_pos);
		error = xfs_setfilesize(ip, pos, ret);
711 712
	}
out:
713 714
	if (iolock)
		xfs_iunlock(ip, iolock);
715 716 717 718 719 720 721 722 723 724
	if (error)
		return error;

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);

		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
	return ret;
725 726
}

727
STATIC ssize_t
728
xfs_file_buffered_write(
729
	struct kiocb		*iocb,
730
	struct iov_iter		*from)
731 732 733 734
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
735
	struct xfs_inode	*ip = XFS_I(inode);
736
	ssize_t			ret;
737
	bool			cleared_space = false;
738
	int			iolock;
739

740 741 742
	if (iocb->ki_flags & IOCB_NOWAIT)
		return -EOPNOTSUPP;

743 744
write_retry:
	iolock = XFS_IOLOCK_EXCL;
745
	xfs_ilock(ip, iolock);
746

747
	ret = xfs_file_write_checks(iocb, from, &iolock);
748
	if (ret)
749
		goto out;
750 751

	/* We can write back this queue in page reclaim */
752
	current->backing_dev_info = inode_to_bdi(inode);
753

754
	trace_xfs_file_buffered_write(iocb, from);
755 756
	ret = iomap_file_buffered_write(iocb, from,
			&xfs_buffered_write_iomap_ops);
757
	if (likely(ret >= 0))
758
		iocb->ki_pos += ret;
759

760
	/*
761 762 763 764 765 766
	 * If we hit a space limit, try to free up some lingering preallocated
	 * space before returning an error. In the case of ENOSPC, first try to
	 * write back all dirty inodes to free up some of the excess reserved
	 * metadata space. This reduces the chances that the eofblocks scan
	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
	 * also behaves as a filter to prevent too many eofblocks scans from
767 768
	 * running at the same time.  Use a synchronous scan to increase the
	 * effectiveness of the scan.
769
	 */
770
	if (ret == -EDQUOT && !cleared_space) {
771
		xfs_iunlock(ip, iolock);
772
		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
773 774
		cleared_space = true;
		goto write_retry;
775
	} else if (ret == -ENOSPC && !cleared_space) {
776 777
		struct xfs_eofblocks eofb = {0};

778
		cleared_space = true;
D
Dave Chinner 已提交
779
		xfs_flush_inodes(ip->i_mount);
780 781

		xfs_iunlock(ip, iolock);
782
		eofb.eof_flags = XFS_ICWALK_FLAG_SYNC;
783
		xfs_blockgc_free_space(ip->i_mount, &eofb);
D
Dave Chinner 已提交
784
		goto write_retry;
785
	}
786

787
	current->backing_dev_info = NULL;
788
out:
789 790
	if (iolock)
		xfs_iunlock(ip, iolock);
791 792 793 794 795 796

	if (ret > 0) {
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
797 798 799 800
	return ret;
}

STATIC ssize_t
A
Al Viro 已提交
801
xfs_file_write_iter(
802
	struct kiocb		*iocb,
A
Al Viro 已提交
803
	struct iov_iter		*from)
804 805 806 807 808 809
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	ssize_t			ret;
A
Al Viro 已提交
810
	size_t			ocount = iov_iter_count(from);
811

812
	XFS_STATS_INC(ip->i_mount, xs_write_calls);
813 814 815 816

	if (ocount == 0)
		return 0;

A
Al Viro 已提交
817 818
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;
819

820
	if (IS_DAX(inode))
821 822 823
		return xfs_file_dax_write(iocb, from);

	if (iocb->ki_flags & IOCB_DIRECT) {
824 825 826 827 828 829
		/*
		 * Allow a directio write to fall back to a buffered
		 * write *only* in the case that we're doing a reflink
		 * CoW.  In all other directio scenarios we do not
		 * allow an operation to fall back to buffered mode.
		 */
830
		ret = xfs_file_dio_write(iocb, from);
831
		if (ret != -ENOTBLK)
832
			return ret;
833
	}
834

835
	return xfs_file_buffered_write(iocb, from);
836 837
}

838 839
static void
xfs_wait_dax_page(
840
	struct inode		*inode)
841 842 843 844 845 846 847 848 849 850 851
{
	struct xfs_inode        *ip = XFS_I(inode);

	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
	schedule();
	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
}

static int
xfs_break_dax_layouts(
	struct inode		*inode,
852
	bool			*retry)
853 854 855 856 857 858 859 860 861
{
	struct page		*page;

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));

	page = dax_layout_busy_page(inode->i_mapping);
	if (!page)
		return 0;

862
	*retry = true;
863 864
	return ___wait_var_event(&page->_refcount,
			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
865
			0, 0, xfs_wait_dax_page(inode));
866 867
}

868 869 870 871 872 873 874
int
xfs_break_layouts(
	struct inode		*inode,
	uint			*iolock,
	enum layout_break_reason reason)
{
	bool			retry;
875
	int			error;
876 877 878

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));

879 880 881 882
	do {
		retry = false;
		switch (reason) {
		case BREAK_UNMAP:
883
			error = xfs_break_dax_layouts(inode, &retry);
884 885 886 887 888 889 890 891 892 893 894 895 896
			if (error || retry)
				break;
			/* fall through */
		case BREAK_WRITE:
			error = xfs_break_leased_layouts(inode, iolock, &retry);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EINVAL;
		}
	} while (error == 0 && retry);

	return error;
897 898
}

899 900 901
#define	XFS_FALLOC_FL_SUPPORTED						\
		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
902
		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
903

904 905
STATIC long
xfs_file_fallocate(
906 907 908 909
	struct file		*file,
	int			mode,
	loff_t			offset,
	loff_t			len)
910
{
911 912 913
	struct inode		*inode = file_inode(file);
	struct xfs_inode	*ip = XFS_I(inode);
	long			error;
914
	enum xfs_prealloc_flags	flags = 0;
915
	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
916
	loff_t			new_size = 0;
917
	bool			do_file_insert = false;
918

919 920
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;
921
	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
922 923
		return -EOPNOTSUPP;

924
	xfs_ilock(ip, iolock);
925
	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
926 927 928
	if (error)
		goto out_unlock;

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
	/*
	 * Must wait for all AIO to complete before we continue as AIO can
	 * change the file size on completion without holding any locks we
	 * currently hold. We must do this first because AIO can update both
	 * the on disk and in memory inode sizes, and the operations that follow
	 * require the in-memory size to be fully up-to-date.
	 */
	inode_dio_wait(inode);

	/*
	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
	 * the cached range over the first operation we are about to run.
	 *
	 * We care about zero and collapse here because they both run a hole
	 * punch over the range first. Because that can zero data, and the range
	 * of invalidation for the shift operations is much larger, we still do
	 * the required flush for collapse in xfs_prepare_shift().
	 *
	 * Insert has the same range requirements as collapse, and we extend the
	 * file first which can zero data. Hence insert has the same
	 * flush/invalidate requirements as collapse and so they are both
	 * handled at the right time by xfs_prepare_shift().
	 */
	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
		    FALLOC_FL_COLLAPSE_RANGE)) {
		error = xfs_flush_unmap_range(ip, offset, len);
		if (error)
			goto out_unlock;
	}

959 960 961 962
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		error = xfs_free_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
963
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
964
		if (!xfs_is_falloc_aligned(ip, offset, len)) {
D
Dave Chinner 已提交
965
			error = -EINVAL;
966 967 968
			goto out_unlock;
		}

969 970 971 972 973
		/*
		 * There is no need to overlap collapse range with EOF,
		 * in which case it is effectively a truncate operation
		 */
		if (offset + len >= i_size_read(inode)) {
D
Dave Chinner 已提交
974
			error = -EINVAL;
975 976 977
			goto out_unlock;
		}

978 979 980 981 982
		new_size = i_size_read(inode) - len;

		error = xfs_collapse_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
983
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
984
		loff_t		isize = i_size_read(inode);
985

986
		if (!xfs_is_falloc_aligned(ip, offset, len)) {
987 988 989 990
			error = -EINVAL;
			goto out_unlock;
		}

991 992 993 994 995
		/*
		 * New inode size must not exceed ->s_maxbytes, accounting for
		 * possible signed overflow.
		 */
		if (inode->i_sb->s_maxbytes - isize < len) {
996 997 998
			error = -EFBIG;
			goto out_unlock;
		}
999
		new_size = isize + len;
1000 1001

		/* Offset should be less than i_size */
1002
		if (offset >= isize) {
1003 1004 1005
			error = -EINVAL;
			goto out_unlock;
		}
1006
		do_file_insert = true;
1007
	} else {
1008 1009
		flags |= XFS_PREALLOC_SET;

1010 1011 1012
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
		    offset + len > i_size_read(inode)) {
			new_size = offset + len;
D
Dave Chinner 已提交
1013
			error = inode_newsize_ok(inode, new_size);
1014 1015 1016
			if (error)
				goto out_unlock;
		}
1017

1018
		if (mode & FALLOC_FL_ZERO_RANGE) {
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
			/*
			 * Punch a hole and prealloc the range.  We use a hole
			 * punch rather than unwritten extent conversion for two
			 * reasons:
			 *
			 *   1.) Hole punch handles partial block zeroing for us.
			 *   2.) If prealloc returns ENOSPC, the file range is
			 *       still zero-valued by virtue of the hole punch.
			 */
			unsigned int blksize = i_blocksize(inode);

			trace_xfs_zero_file_space(ip);

			error = xfs_free_file_space(ip, offset, len);
			if (error)
				goto out_unlock;

			len = round_up(offset + len, blksize) -
			      round_down(offset, blksize);
			offset = round_down(offset, blksize);
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
			error = xfs_reflink_unshare(ip, offset, len);
			if (error)
				goto out_unlock;
		} else {
			/*
			 * If always_cow mode we can't use preallocations and
			 * thus should not create them.
			 */
			if (xfs_is_always_cow_inode(ip)) {
				error = -EOPNOTSUPP;
				goto out_unlock;
			}
1052
		}
1053

1054
		if (!xfs_is_always_cow_inode(ip)) {
1055 1056
			error = xfs_alloc_file_space(ip, offset, len,
						     XFS_BMAPI_PREALLOC);
1057 1058
			if (error)
				goto out_unlock;
1059
		}
1060 1061
	}

1062
	if (file->f_flags & O_DSYNC)
1063 1064 1065
		flags |= XFS_PREALLOC_SYNC;

	error = xfs_update_prealloc_flags(ip, flags);
1066 1067 1068 1069 1070 1071 1072 1073 1074
	if (error)
		goto out_unlock;

	/* Change file size if needed */
	if (new_size) {
		struct iattr iattr;

		iattr.ia_valid = ATTR_SIZE;
		iattr.ia_size = new_size;
C
Christoph Hellwig 已提交
1075 1076
		error = xfs_vn_setattr_size(file_mnt_user_ns(file),
					    file_dentry(file), &iattr);
1077 1078
		if (error)
			goto out_unlock;
1079 1080
	}

1081 1082 1083 1084 1085 1086 1087 1088 1089
	/*
	 * Perform hole insertion now that the file size has been
	 * updated so that if we crash during the operation we don't
	 * leave shifted extents past EOF and hence losing access to
	 * the data that is contained within them.
	 */
	if (do_file_insert)
		error = xfs_insert_file_space(ip, offset, len);

1090
out_unlock:
1091
	xfs_iunlock(ip, iolock);
D
Dave Chinner 已提交
1092
	return error;
1093 1094
}

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
STATIC int
xfs_file_fadvise(
	struct file	*file,
	loff_t		start,
	loff_t		end,
	int		advice)
{
	struct xfs_inode *ip = XFS_I(file_inode(file));
	int ret;
	int lockflags = 0;

	/*
	 * Operations creating pages in page cache need protection from hole
	 * punching and similar ops
	 */
	if (advice == POSIX_FADV_WILLNEED) {
		lockflags = XFS_IOLOCK_SHARED;
		xfs_ilock(ip, lockflags);
	}
	ret = generic_fadvise(file, start, end, advice);
	if (lockflags)
		xfs_iunlock(ip, lockflags);
	return ret;
}
1119

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
/* Does this file, inode, or mount want synchronous writes? */
static inline bool xfs_file_sync_writes(struct file *filp)
{
	struct xfs_inode	*ip = XFS_I(file_inode(filp));

	if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC)
		return true;
	if (filp->f_flags & (__O_SYNC | O_DSYNC))
		return true;
	if (IS_SYNC(file_inode(filp)))
		return true;

	return false;
}

1135
STATIC loff_t
1136
xfs_file_remap_range(
1137 1138 1139 1140 1141 1142
	struct file		*file_in,
	loff_t			pos_in,
	struct file		*file_out,
	loff_t			pos_out,
	loff_t			len,
	unsigned int		remap_flags)
1143
{
1144 1145 1146 1147 1148 1149 1150 1151 1152
	struct inode		*inode_in = file_inode(file_in);
	struct xfs_inode	*src = XFS_I(inode_in);
	struct inode		*inode_out = file_inode(file_out);
	struct xfs_inode	*dest = XFS_I(inode_out);
	struct xfs_mount	*mp = src->i_mount;
	loff_t			remapped = 0;
	xfs_extlen_t		cowextsize;
	int			ret;

1153 1154
	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
		return -EINVAL;
1155

1156 1157 1158 1159 1160 1161 1162 1163 1164
	if (!xfs_sb_version_hasreflink(&mp->m_sb))
		return -EOPNOTSUPP;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	/* Prepare and then clone file data. */
	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
			&len, remap_flags);
1165
	if (ret || len == 0)
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
		return ret;

	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);

	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
			&remapped);
	if (ret)
		goto out_unlock;

	/*
	 * Carry the cowextsize hint from src to dest if we're sharing the
	 * entire source file to the entire destination file, the source file
	 * has a cowextsize hint, and the destination file does not.
	 */
	cowextsize = 0;
	if (pos_in == 0 && len == i_size_read(inode_in) &&
1182
	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1183
	    pos_out == 0 && len >= i_size_read(inode_out) &&
1184
	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1185
		cowextsize = src->i_cowextsize;
1186 1187 1188

	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
			remap_flags);
1189 1190
	if (ret)
		goto out_unlock;
1191

1192
	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1193
		xfs_log_force_inode(dest);
1194
out_unlock:
1195
	xfs_iunlock2_io_mmap(src, dest);
1196 1197 1198
	if (ret)
		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
	return remapped > 0 ? remapped : ret;
1199
}
1200

L
Linus Torvalds 已提交
1201
STATIC int
1202
xfs_file_open(
L
Linus Torvalds 已提交
1203
	struct inode	*inode,
1204
	struct file	*file)
L
Linus Torvalds 已提交
1205
{
1206
	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
L
Linus Torvalds 已提交
1207
		return -EFBIG;
1208 1209
	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
		return -EIO;
1210
	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	return 0;
}

STATIC int
xfs_dir_open(
	struct inode	*inode,
	struct file	*file)
{
	struct xfs_inode *ip = XFS_I(inode);
	int		mode;
	int		error;

	error = xfs_file_open(inode, file);
	if (error)
		return error;

	/*
	 * If there are any blocks, read-ahead block 0 as we're almost
	 * certain to have the next operation be a read there.
	 */
1231
	mode = xfs_ilock_data_map_shared(ip);
1232
	if (ip->i_df.if_nextents > 0)
1233
		error = xfs_dir3_data_readahead(ip, 0, 0);
1234
	xfs_iunlock(ip, mode);
1235
	return error;
L
Linus Torvalds 已提交
1236 1237 1238
}

STATIC int
1239
xfs_file_release(
L
Linus Torvalds 已提交
1240 1241 1242
	struct inode	*inode,
	struct file	*filp)
{
D
Dave Chinner 已提交
1243
	return xfs_release(XFS_I(inode));
L
Linus Torvalds 已提交
1244 1245 1246
}

STATIC int
1247
xfs_file_readdir(
A
Al Viro 已提交
1248 1249
	struct file	*file,
	struct dir_context *ctx)
L
Linus Torvalds 已提交
1250
{
A
Al Viro 已提交
1251
	struct inode	*inode = file_inode(file);
1252
	xfs_inode_t	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
	size_t		bufsize;

	/*
	 * The Linux API doesn't pass down the total size of the buffer
	 * we read into down to the filesystem.  With the filldir concept
	 * it's not needed for correct information, but the XFS dir2 leaf
	 * code wants an estimate of the buffer size to calculate it's
	 * readahead window and size the buffers used for mapping to
	 * physical blocks.
	 *
	 * Try to give it an estimate that's good enough, maybe at some
	 * point we can change the ->readdir prototype to include the
E
Eric Sandeen 已提交
1265
	 * buffer size.  For now we use the current glibc buffer size.
C
Christoph Hellwig 已提交
1266
	 */
1267
	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
C
Christoph Hellwig 已提交
1268

1269
	return xfs_readdir(NULL, ip, ctx, bufsize);
1270 1271 1272 1273 1274 1275
}

STATIC loff_t
xfs_file_llseek(
	struct file	*file,
	loff_t		offset,
1276
	int		whence)
1277
{
1278 1279 1280 1281 1282
	struct inode		*inode = file->f_mapping->host;

	if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
		return -EIO;

1283
	switch (whence) {
1284
	default:
1285
		return generic_file_llseek(file, offset, whence);
1286
	case SEEK_HOLE:
1287
		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1288
		break;
1289
	case SEEK_DATA:
1290
		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1291
		break;
1292
	}
1293 1294 1295 1296

	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1297 1298
}

1299 1300 1301 1302
/*
 * Locking for serialisation of IO during page faults. This results in a lock
 * ordering of:
 *
1303
 * mmap_lock (MM)
1304
 *   sb_start_pagefault(vfs, freeze)
1305
 *     i_mmaplock (XFS - truncate serialisation)
1306 1307
 *       page_lock (MM)
 *         i_lock (XFS - extent map serialisation)
1308
 */
1309
static vm_fault_t
1310 1311 1312 1313
__xfs_filemap_fault(
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size,
	bool			write_fault)
1314
{
1315
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1316
	struct xfs_inode	*ip = XFS_I(inode);
1317
	vm_fault_t		ret;
1318

1319
	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1320

1321 1322 1323 1324
	if (write_fault) {
		sb_start_pagefault(inode->i_sb);
		file_update_time(vmf->vma->vm_file);
	}
1325

1326
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1327
	if (IS_DAX(inode)) {
1328 1329
		pfn_t pfn;

1330 1331
		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
				(write_fault && !vmf->cow_page) ?
1332 1333
				 &xfs_direct_write_iomap_ops :
				 &xfs_read_iomap_ops);
1334 1335
		if (ret & VM_FAULT_NEEDDSYNC)
			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1336
	} else {
1337
		if (write_fault)
1338 1339
			ret = iomap_page_mkwrite(vmf,
					&xfs_buffered_write_iomap_ops);
1340 1341
		else
			ret = filemap_fault(vmf);
1342 1343 1344
	}
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);

1345 1346
	if (write_fault)
		sb_end_pagefault(inode->i_sb);
1347
	return ret;
1348 1349
}

1350 1351 1352 1353 1354 1355 1356 1357
static inline bool
xfs_is_write_fault(
	struct vm_fault		*vmf)
{
	return (vmf->flags & FAULT_FLAG_WRITE) &&
	       (vmf->vma->vm_flags & VM_SHARED);
}

1358
static vm_fault_t
1359
xfs_filemap_fault(
1360 1361
	struct vm_fault		*vmf)
{
1362
	/* DAX can shortcut the normal fault path on write faults! */
1363 1364
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1365
			xfs_is_write_fault(vmf));
1366 1367
}

1368
static vm_fault_t
1369
xfs_filemap_huge_fault(
1370 1371
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size)
M
Matthew Wilcox 已提交
1372
{
1373
	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
M
Matthew Wilcox 已提交
1374 1375
		return VM_FAULT_FALLBACK;

1376 1377
	/* DAX can shortcut the normal fault path on write faults! */
	return __xfs_filemap_fault(vmf, pe_size,
1378
			xfs_is_write_fault(vmf));
1379
}
M
Matthew Wilcox 已提交
1380

1381
static vm_fault_t
1382 1383 1384 1385
xfs_filemap_page_mkwrite(
	struct vm_fault		*vmf)
{
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1386 1387
}

1388
/*
1389 1390 1391
 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
 * on write faults. In reality, it needs to serialise against truncate and
 * prepare memory for writing so handle is as standard write fault.
1392
 */
1393
static vm_fault_t
1394 1395 1396 1397
xfs_filemap_pfn_mkwrite(
	struct vm_fault		*vmf)
{

1398
	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
M
Matthew Wilcox 已提交
1399 1400
}

1401
static vm_fault_t
1402 1403 1404 1405 1406 1407
xfs_filemap_map_pages(
	struct vm_fault		*vmf,
	pgoff_t			start_pgoff,
	pgoff_t			end_pgoff)
{
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1408
	vm_fault_t ret;
1409 1410

	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1411
	ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
1412
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1413
	return ret;
1414 1415
}

1416 1417
static const struct vm_operations_struct xfs_file_vm_ops = {
	.fault		= xfs_filemap_fault,
1418
	.huge_fault	= xfs_filemap_huge_fault,
1419
	.map_pages	= xfs_filemap_map_pages,
1420
	.page_mkwrite	= xfs_filemap_page_mkwrite,
1421
	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1422 1423 1424 1425
};

STATIC int
xfs_file_mmap(
1426 1427
	struct file		*file,
	struct vm_area_struct	*vma)
1428
{
1429 1430
	struct inode		*inode = file_inode(file);
	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1431

1432
	/*
1433 1434
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
1435
	 */
1436
	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1437 1438
		return -EOPNOTSUPP;

1439
	file_accessed(file);
1440
	vma->vm_ops = &xfs_file_vm_ops;
1441
	if (IS_DAX(inode))
1442
		vma->vm_flags |= VM_HUGEPAGE;
1443
	return 0;
1444 1445
}

1446
const struct file_operations xfs_file_operations = {
1447
	.llseek		= xfs_file_llseek,
A
Al Viro 已提交
1448
	.read_iter	= xfs_file_read_iter,
A
Al Viro 已提交
1449
	.write_iter	= xfs_file_write_iter,
1450
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
1451
	.splice_write	= iter_file_splice_write,
1452
	.iopoll		= iomap_dio_iopoll,
1453
	.unlocked_ioctl	= xfs_file_ioctl,
L
Linus Torvalds 已提交
1454
#ifdef CONFIG_COMPAT
1455
	.compat_ioctl	= xfs_file_compat_ioctl,
L
Linus Torvalds 已提交
1456
#endif
1457
	.mmap		= xfs_file_mmap,
1458
	.mmap_supported_flags = MAP_SYNC,
1459 1460 1461
	.open		= xfs_file_open,
	.release	= xfs_file_release,
	.fsync		= xfs_file_fsync,
1462
	.get_unmapped_area = thp_get_unmapped_area,
1463
	.fallocate	= xfs_file_fallocate,
1464
	.fadvise	= xfs_file_fadvise,
1465
	.remap_file_range = xfs_file_remap_range,
L
Linus Torvalds 已提交
1466 1467
};

1468
const struct file_operations xfs_dir_file_operations = {
1469
	.open		= xfs_dir_open,
L
Linus Torvalds 已提交
1470
	.read		= generic_read_dir,
1471
	.iterate_shared	= xfs_file_readdir,
1472
	.llseek		= generic_file_llseek,
1473
	.unlocked_ioctl	= xfs_file_ioctl,
1474
#ifdef CONFIG_COMPAT
1475
	.compat_ioctl	= xfs_file_compat_ioctl,
1476
#endif
1477
	.fsync		= xfs_dir_fsync,
L
Linus Torvalds 已提交
1478
};