xfs_file.c 38.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17 18
 */
#include "xfs.h"
19
#include "xfs_fs.h"
20
#include "xfs_shared.h"
21
#include "xfs_format.h"
22 23
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
24
#include "xfs_mount.h"
25 26
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
L
Linus Torvalds 已提交
27
#include "xfs_inode.h"
28
#include "xfs_trans.h"
29
#include "xfs_inode_item.h"
30
#include "xfs_bmap.h"
D
Dave Chinner 已提交
31
#include "xfs_bmap_util.h"
L
Linus Torvalds 已提交
32
#include "xfs_error.h"
33
#include "xfs_dir2.h"
D
Dave Chinner 已提交
34
#include "xfs_dir2_priv.h"
35
#include "xfs_ioctl.h"
36
#include "xfs_trace.h"
37
#include "xfs_log.h"
38
#include "xfs_icache.h"
39
#include "xfs_pnfs.h"
40
#include "xfs_iomap.h"
41
#include "xfs_reflink.h"
L
Linus Torvalds 已提交
42 43

#include <linux/dcache.h>
44
#include <linux/falloc.h>
45
#include <linux/pagevec.h>
46
#include <linux/backing-dev.h>
L
Linus Torvalds 已提交
47

48
static const struct vm_operations_struct xfs_file_vm_ops;
L
Linus Torvalds 已提交
49

50
/*
51 52
 * Clear the specified ranges to zero through either the pagecache or DAX.
 * Holes and unwritten extents will be left as-is as they already are zeroed.
53
 */
54
int
55
xfs_zero_range(
56
	struct xfs_inode	*ip,
57 58 59
	xfs_off_t		pos,
	xfs_off_t		count,
	bool			*did_zero)
60
{
61
	return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
62 63
}

64 65 66 67 68 69 70 71
int
xfs_update_prealloc_flags(
	struct xfs_inode	*ip,
	enum xfs_prealloc_flags	flags)
{
	struct xfs_trans	*tp;
	int			error;

72 73 74
	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
			0, 0, 0, &tp);
	if (error)
75 76 77 78 79 80
		return error;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);

	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
D
Dave Chinner 已提交
81 82 83
		VFS_I(ip)->i_mode &= ~S_ISUID;
		if (VFS_I(ip)->i_mode & S_IXGRP)
			VFS_I(ip)->i_mode &= ~S_ISGID;
84 85 86 87 88 89 90 91 92 93 94
		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	}

	if (flags & XFS_PREALLOC_SET)
		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
	if (flags & XFS_PREALLOC_CLEAR)
		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;

	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
	if (flags & XFS_PREALLOC_SYNC)
		xfs_trans_set_sync(tp);
95
	return xfs_trans_commit(tp);
96 97
}

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
/*
 * Fsync operations on directories are much simpler than on regular files,
 * as there is no file data to flush, and thus also no need for explicit
 * cache flush operations, and there are no non-transaction metadata updates
 * on directories either.
 */
STATIC int
xfs_dir_fsync(
	struct file		*file,
	loff_t			start,
	loff_t			end,
	int			datasync)
{
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_lsn_t		lsn = 0;

	trace_xfs_dir_fsync(ip);

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	if (xfs_ipincount(ip))
		lsn = ip->i_itemp->ili_last_lsn;
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!lsn)
		return 0;
D
Dave Chinner 已提交
124
	return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
125 126
}

127 128 129
STATIC int
xfs_file_fsync(
	struct file		*file,
130 131
	loff_t			start,
	loff_t			end,
132 133
	int			datasync)
{
134 135
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
136
	struct xfs_mount	*mp = ip->i_mount;
137 138
	int			error = 0;
	int			log_flushed = 0;
139
	xfs_lsn_t		lsn = 0;
140

C
Christoph Hellwig 已提交
141
	trace_xfs_file_fsync(ip);
142

143 144 145 146
	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
	if (error)
		return error;

147
	if (XFS_FORCED_SHUTDOWN(mp))
E
Eric Sandeen 已提交
148
		return -EIO;
149 150 151

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

152 153 154 155 156 157 158 159 160 161
	/*
	 * If we have an RT and/or log subvolume we need to make sure to flush
	 * the write cache the device used for file data first.  This is to
	 * ensure newly written file data make it to disk before logging the new
	 * inode size in case of an extending write.
	 */
	if (XFS_IS_REALTIME_INODE(ip))
		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
	else if (mp->m_logdev_targp != mp->m_ddev_targp)
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
162

163
	/*
164 165 166 167 168 169 170 171 172 173 174
	 * All metadata updates are logged, which means that we just have to
	 * flush the log up to the latest LSN that touched the inode. If we have
	 * concurrent fsync/fdatasync() calls, we need them to all block on the
	 * log force before we clear the ili_fsync_fields field. This ensures
	 * that we don't get a racing sync operation that does not wait for the
	 * metadata to hit the journal before returning. If we race with
	 * clearing the ili_fsync_fields, then all that will happen is the log
	 * force will do nothing as the lsn will already be on disk. We can't
	 * race with setting ili_fsync_fields because that is done under
	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
	 * until after the ili_fsync_fields is cleared.
175 176
	 */
	xfs_ilock(ip, XFS_ILOCK_SHARED);
177 178
	if (xfs_ipincount(ip)) {
		if (!datasync ||
179
		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
180 181
			lsn = ip->i_itemp->ili_last_lsn;
	}
182

183
	if (lsn) {
184
		error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
185 186 187
		ip->i_itemp->ili_fsync_fields = 0;
	}
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
188

189 190 191 192 193 194 195
	/*
	 * If we only have a single device, and the log force about was
	 * a no-op we might have to flush the data device cache here.
	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
196 197
	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
	    mp->m_logdev_targp == mp->m_ddev_targp)
198
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
199

D
Dave Chinner 已提交
200
	return error;
201 202
}

203
STATIC ssize_t
204
xfs_file_dio_aio_read(
205
	struct kiocb		*iocb,
A
Al Viro 已提交
206
	struct iov_iter		*to)
207
{
C
Christoph Hellwig 已提交
208
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
209
	size_t			count = iov_iter_count(to);
C
Christoph Hellwig 已提交
210
	ssize_t			ret;
211

212
	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
213

214 215
	if (!count)
		return 0; /* skip atime */
216

217 218
	file_accessed(iocb->ki_filp);

219
	xfs_ilock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
220
	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
221
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
C
Christoph Hellwig 已提交
222

223 224 225
	return ret;
}

226
static noinline ssize_t
227 228 229 230
xfs_file_dax_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
231
	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
232 233 234 235 236 237 238 239
	size_t			count = iov_iter_count(to);
	ssize_t			ret = 0;

	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);

	if (!count)
		return 0; /* skip atime */

240
	xfs_ilock(ip, XFS_IOLOCK_SHARED);
241
	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
242
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
243

244
	file_accessed(iocb->ki_filp);
245 246 247 248 249 250 251 252 253 254 255 256
	return ret;
}

STATIC ssize_t
xfs_file_buffered_aio_read(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
	ssize_t			ret;

	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
257

258
	xfs_ilock(ip, XFS_IOLOCK_SHARED);
A
Al Viro 已提交
259
	ret = generic_file_read_iter(iocb, to);
260
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
261 262 263 264 265 266 267 268 269

	return ret;
}

STATIC ssize_t
xfs_file_read_iter(
	struct kiocb		*iocb,
	struct iov_iter		*to)
{
270 271
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
272 273 274 275 276 277 278
	ssize_t			ret = 0;

	XFS_STATS_INC(mp, xs_read_calls);

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

279 280 281
	if (IS_DAX(inode))
		ret = xfs_file_dax_read(iocb, to);
	else if (iocb->ki_flags & IOCB_DIRECT)
282
		ret = xfs_file_dio_aio_read(iocb, to);
C
Christoph Hellwig 已提交
283
	else
284
		ret = xfs_file_buffered_aio_read(iocb, to);
285 286

	if (ret > 0)
287
		XFS_STATS_ADD(mp, xs_read_bytes, ret);
288 289 290 291
	return ret;
}

/*
292 293 294 295 296 297 298 299 300
 * Zero any on disk space between the current EOF and the new, larger EOF.
 *
 * This handles the normal case of zeroing the remainder of the last block in
 * the file and the unusual case of zeroing blocks out beyond the size of the
 * file.  This second case only happens with fixed size extents and when the
 * system crashes before the inode size was updated but after blocks were
 * allocated.
 *
 * Expects the iolock to be held exclusive, and will take the ilock internally.
301 302 303
 */
int					/* error (positive) */
xfs_zero_eof(
304 305
	struct xfs_inode	*ip,
	xfs_off_t		offset,		/* starting I/O offset */
306 307
	xfs_fsize_t		isize,		/* current inode size */
	bool			*did_zeroing)
308
{
309
	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
310 311
	ASSERT(offset > isize);

312
	trace_xfs_zero_eof(ip, isize, offset - isize);
313
	return xfs_zero_range(ip, isize, offset - isize, did_zeroing);
314 315
}

316 317 318
/*
 * Common pre-write limit and setup checks.
 *
319 320 321
 * Called with the iolocked held either shared and exclusive according to
 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 * if called for a direct write beyond i_size.
322 323 324
 */
STATIC ssize_t
xfs_file_aio_write_checks(
325 326
	struct kiocb		*iocb,
	struct iov_iter		*from,
327 328
	int			*iolock)
{
329
	struct file		*file = iocb->ki_filp;
330 331
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
332
	ssize_t			error = 0;
333
	size_t			count = iov_iter_count(from);
334
	bool			drained_dio = false;
335

336
restart:
337 338
	error = generic_write_checks(iocb, from);
	if (error <= 0)
339 340
		return error;

341
	error = xfs_break_layouts(inode, iolock);
342 343 344
	if (error)
		return error;

345 346 347 348
	/*
	 * For changing security info in file_remove_privs() we need i_rwsem
	 * exclusively.
	 */
349
	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
350
		xfs_iunlock(ip, *iolock);
351
		*iolock = XFS_IOLOCK_EXCL;
352
		xfs_ilock(ip, *iolock);
353 354
		goto restart;
	}
355 356 357
	/*
	 * If the offset is beyond the size of the file, we need to zero any
	 * blocks that fall between the existing EOF and the start of this
358
	 * write.  If zeroing is needed and we are currently holding the
359 360
	 * iolock shared, we need to update it to exclusive which implies
	 * having to redo all checks before.
361 362 363 364 365 366 367 368
	 *
	 * We need to serialise against EOF updates that occur in IO
	 * completions here. We want to make sure that nobody is changing the
	 * size while we do this check until we have placed an IO barrier (i.e.
	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
	 * The spinlock effectively forms a memory barrier once we have the
	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
	 * and hence be able to correctly determine if we need to run zeroing.
369
	 */
370
	spin_lock(&ip->i_flags_lock);
371
	if (iocb->ki_pos > i_size_read(inode)) {
372 373
		bool	zero = false;

374
		spin_unlock(&ip->i_flags_lock);
375 376
		if (!drained_dio) {
			if (*iolock == XFS_IOLOCK_SHARED) {
377
				xfs_iunlock(ip, *iolock);
378
				*iolock = XFS_IOLOCK_EXCL;
379
				xfs_ilock(ip, *iolock);
380 381
				iov_iter_reexpand(from, count);
			}
382 383 384 385 386 387 388 389 390
			/*
			 * We now have an IO submission barrier in place, but
			 * AIO can do EOF updates during IO completion and hence
			 * we now need to wait for all of them to drain. Non-AIO
			 * DIO will have drained before we are given the
			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
			 * no-op.
			 */
			inode_dio_wait(inode);
391
			drained_dio = true;
392 393
			goto restart;
		}
394
		error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
395 396
		if (error)
			return error;
397 398
	} else
		spin_unlock(&ip->i_flags_lock);
399

C
Christoph Hellwig 已提交
400 401 402 403 404 405
	/*
	 * Updating the timestamps will grab the ilock again from
	 * xfs_fs_dirty_inode, so we have to call it after dropping the
	 * lock above.  Eventually we should look into a way to avoid
	 * the pointless lock roundtrip.
	 */
406 407 408 409 410
	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
		error = file_update_time(file);
		if (error)
			return error;
	}
C
Christoph Hellwig 已提交
411

412 413 414 415 416
	/*
	 * If we're writing the file then make sure to clear the setuid and
	 * setgid bits if the process is not being run by root.  This keeps
	 * people from modifying setuid and setgid binaries.
	 */
417 418 419
	if (!IS_NOSEC(inode))
		return file_remove_privs(file);
	return 0;
420 421
}

C
Christoph Hellwig 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
static int
xfs_dio_write_end_io(
	struct kiocb		*iocb,
	ssize_t			size,
	unsigned		flags)
{
	struct inode		*inode = file_inode(iocb->ki_filp);
	struct xfs_inode	*ip = XFS_I(inode);
	loff_t			offset = iocb->ki_pos;
	bool			update_size = false;
	int			error = 0;

	trace_xfs_end_io_direct_write(ip, offset, size);

	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;

	if (size <= 0)
		return size;

	/*
	 * We need to update the in-core inode size here so that we don't end up
	 * with the on-disk inode size being outside the in-core inode size. We
	 * have no other method of updating EOF for AIO, so always do it here
	 * if necessary.
	 *
	 * We need to lock the test/set EOF update as we can be racing with
	 * other IO completions here to update the EOF. Failing to serialise
	 * here can result in EOF moving backwards and Bad Things Happen when
	 * that occurs.
	 */
	spin_lock(&ip->i_flags_lock);
	if (offset + size > i_size_read(inode)) {
		i_size_write(inode, offset + size);
		update_size = true;
	}
	spin_unlock(&ip->i_flags_lock);

	if (flags & IOMAP_DIO_COW) {
		error = xfs_reflink_end_cow(ip, offset, size);
		if (error)
			return error;
	}

	if (flags & IOMAP_DIO_UNWRITTEN)
		error = xfs_iomap_write_unwritten(ip, offset, size);
	else if (update_size)
		error = xfs_setfilesize(ip, offset, size);

	return error;
}

474 475 476 477
/*
 * xfs_file_dio_aio_write - handle direct IO writes
 *
 * Lock the inode appropriately to prepare for and issue a direct IO write.
478
 * By separating it from the buffered write path we remove all the tricky to
479 480
 * follow locking changes and looping.
 *
481 482 483 484 485 486 487 488 489 490 491 492 493
 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 * pages are flushed out.
 *
 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 * allowing them to be done in parallel with reads and other direct IO writes.
 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 * needs to do sub-block zeroing and that requires serialisation against other
 * direct IOs to the same block. In this case we need to serialise the
 * submission of the unaligned IOs so that we don't get racing block zeroing in
 * the dio layer.  To avoid the problem with aio, we also need to wait for
 * outstanding IOs to complete so that unwritten extent conversion is completed
 * before we try to map the overlapping block. This is currently implemented by
C
Christoph Hellwig 已提交
494
 * hitting it with a big hammer (i.e. inode_dio_wait()).
495
 *
496 497 498 499 500 501
 * Returns with locks held indicated by @iolock and errors indicated by
 * negative return values.
 */
STATIC ssize_t
xfs_file_dio_aio_write(
	struct kiocb		*iocb,
502
	struct iov_iter		*from)
503 504 505 506 507 508 509
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	ssize_t			ret = 0;
510
	int			unaligned_io = 0;
511
	int			iolock;
512
	size_t			count = iov_iter_count(from);
C
Christoph Hellwig 已提交
513
	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
514 515
					mp->m_rtdev_targp : mp->m_ddev_targp;

516
	/* DIO must be aligned to device logical sector size */
517
	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
E
Eric Sandeen 已提交
518
		return -EINVAL;
519

520
	/*
521 522 523 524 525
	 * Don't take the exclusive iolock here unless the I/O is unaligned to
	 * the file system block size.  We don't need to consider the EOF
	 * extension case here because xfs_file_aio_write_checks() will relock
	 * the inode as necessary for EOF zeroing cases and fill out the new
	 * inode size as appropriate.
526
	 */
527 528 529
	if ((iocb->ki_pos & mp->m_blockmask) ||
	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
		unaligned_io = 1;
530 531 532 533 534 535 536 537 538

		/*
		 * We can't properly handle unaligned direct I/O to reflink
		 * files yet, as we can't unshare a partial block.
		 */
		if (xfs_is_reflink_inode(ip)) {
			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
			return -EREMCHG;
		}
539
		iolock = XFS_IOLOCK_EXCL;
540
	} else {
541
		iolock = XFS_IOLOCK_SHARED;
542
	}
543

544
	xfs_ilock(ip, iolock);
545

546
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
547
	if (ret)
548
		goto out;
549
	count = iov_iter_count(from);
550

551 552
	/*
	 * If we are doing unaligned IO, wait for all other IO to drain,
553 554
	 * otherwise demote the lock if we had to take the exclusive lock
	 * for other reasons in xfs_file_aio_write_checks.
555 556
	 */
	if (unaligned_io)
C
Christoph Hellwig 已提交
557
		inode_dio_wait(inode);
558
	else if (iolock == XFS_IOLOCK_EXCL) {
559
		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
560
		iolock = XFS_IOLOCK_SHARED;
561 562
	}

C
Christoph Hellwig 已提交
563
	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
C
Christoph Hellwig 已提交
564
	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
565
out:
566
	xfs_iunlock(ip, iolock);
567

568
	/*
569 570
	 * No fallback to buffered IO on errors for XFS, direct IO will either
	 * complete fully or fail.
571
	 */
572 573 574 575
	ASSERT(ret < 0 || ret == count);
	return ret;
}

576
static noinline ssize_t
577 578 579 580
xfs_file_dax_write(
	struct kiocb		*iocb,
	struct iov_iter		*from)
{
581
	struct inode		*inode = iocb->ki_filp->f_mapping->host;
582
	struct xfs_inode	*ip = XFS_I(inode);
583
	int			iolock = XFS_IOLOCK_EXCL;
584 585 586
	ssize_t			ret, error = 0;
	size_t			count;
	loff_t			pos;
587

588
	xfs_ilock(ip, iolock);
589 590 591 592
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
	if (ret)
		goto out;

593 594
	pos = iocb->ki_pos;
	count = iov_iter_count(from);
595

596
	trace_xfs_file_dax_write(ip, count, pos);
597
	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
598 599 600
	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
		i_size_write(inode, iocb->ki_pos);
		error = xfs_setfilesize(ip, pos, ret);
601 602
	}
out:
603
	xfs_iunlock(ip, iolock);
604
	return error ? error : ret;
605 606
}

607
STATIC ssize_t
608
xfs_file_buffered_aio_write(
609
	struct kiocb		*iocb,
610
	struct iov_iter		*from)
611 612 613 614
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
615
	struct xfs_inode	*ip = XFS_I(inode);
616 617
	ssize_t			ret;
	int			enospc = 0;
618
	int			iolock;
619

620 621
write_retry:
	iolock = XFS_IOLOCK_EXCL;
622
	xfs_ilock(ip, iolock);
623

624
	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
625
	if (ret)
626
		goto out;
627 628

	/* We can write back this queue in page reclaim */
629
	current->backing_dev_info = inode_to_bdi(inode);
630

C
Christoph Hellwig 已提交
631
	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
632
	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
633
	if (likely(ret >= 0))
634
		iocb->ki_pos += ret;
635

636
	/*
637 638 639 640 641 642 643
	 * If we hit a space limit, try to free up some lingering preallocated
	 * space before returning an error. In the case of ENOSPC, first try to
	 * write back all dirty inodes to free up some of the excess reserved
	 * metadata space. This reduces the chances that the eofblocks scan
	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
	 * also behaves as a filter to prevent too many eofblocks scans from
	 * running at the same time.
644
	 */
645
	if (ret == -EDQUOT && !enospc) {
646
		xfs_iunlock(ip, iolock);
647 648 649
		enospc = xfs_inode_free_quota_eofblocks(ip);
		if (enospc)
			goto write_retry;
650 651 652
		enospc = xfs_inode_free_quota_cowblocks(ip);
		if (enospc)
			goto write_retry;
653
		iolock = 0;
654 655 656
	} else if (ret == -ENOSPC && !enospc) {
		struct xfs_eofblocks eofb = {0};

657
		enospc = 1;
D
Dave Chinner 已提交
658
		xfs_flush_inodes(ip->i_mount);
659 660

		xfs_iunlock(ip, iolock);
661 662
		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
663
		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
D
Dave Chinner 已提交
664
		goto write_retry;
665
	}
666

667
	current->backing_dev_info = NULL;
668
out:
669 670
	if (iolock)
		xfs_iunlock(ip, iolock);
671 672 673 674
	return ret;
}

STATIC ssize_t
A
Al Viro 已提交
675
xfs_file_write_iter(
676
	struct kiocb		*iocb,
A
Al Viro 已提交
677
	struct iov_iter		*from)
678 679 680 681 682 683
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	ssize_t			ret;
A
Al Viro 已提交
684
	size_t			ocount = iov_iter_count(from);
685

686
	XFS_STATS_INC(ip->i_mount, xs_write_calls);
687 688 689 690

	if (ocount == 0)
		return 0;

A
Al Viro 已提交
691 692
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;
693

694 695
	if (IS_DAX(inode))
		ret = xfs_file_dax_write(iocb, from);
696 697 698 699 700 701 702
	else if (iocb->ki_flags & IOCB_DIRECT) {
		/*
		 * Allow a directio write to fall back to a buffered
		 * write *only* in the case that we're doing a reflink
		 * CoW.  In all other directio scenarios we do not
		 * allow an operation to fall back to buffered mode.
		 */
A
Al Viro 已提交
703
		ret = xfs_file_dio_aio_write(iocb, from);
704 705 706 707
		if (ret == -EREMCHG)
			goto buffered;
	} else {
buffered:
A
Al Viro 已提交
708
		ret = xfs_file_buffered_aio_write(iocb, from);
709
	}
710

711
	if (ret > 0) {
712
		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
713

714
		/* Handle various SYNC-type writes */
715
		ret = generic_write_sync(iocb, ret);
716
	}
717
	return ret;
718 719
}

720 721 722
#define	XFS_FALLOC_FL_SUPPORTED						\
		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
723
		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
724

725 726
STATIC long
xfs_file_fallocate(
727 728 729 730
	struct file		*file,
	int			mode,
	loff_t			offset,
	loff_t			len)
731
{
732 733 734
	struct inode		*inode = file_inode(file);
	struct xfs_inode	*ip = XFS_I(inode);
	long			error;
735
	enum xfs_prealloc_flags	flags = 0;
736
	uint			iolock = XFS_IOLOCK_EXCL;
737
	loff_t			new_size = 0;
738
	bool			do_file_insert = 0;
739

740 741
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;
742
	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
743 744
		return -EOPNOTSUPP;

745
	xfs_ilock(ip, iolock);
746
	error = xfs_break_layouts(inode, &iolock);
747 748 749
	if (error)
		goto out_unlock;

750 751 752
	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
	iolock |= XFS_MMAPLOCK_EXCL;

753 754 755 756
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		error = xfs_free_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
757
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
F
Fabian Frederick 已提交
758
		unsigned int blksize_mask = i_blocksize(inode) - 1;
759 760

		if (offset & blksize_mask || len & blksize_mask) {
D
Dave Chinner 已提交
761
			error = -EINVAL;
762 763 764
			goto out_unlock;
		}

765 766 767 768 769
		/*
		 * There is no need to overlap collapse range with EOF,
		 * in which case it is effectively a truncate operation
		 */
		if (offset + len >= i_size_read(inode)) {
D
Dave Chinner 已提交
770
			error = -EINVAL;
771 772 773
			goto out_unlock;
		}

774 775 776 777 778
		new_size = i_size_read(inode) - len;

		error = xfs_collapse_file_space(ip, offset, len);
		if (error)
			goto out_unlock;
779
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
F
Fabian Frederick 已提交
780
		unsigned int blksize_mask = i_blocksize(inode) - 1;
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799

		new_size = i_size_read(inode) + len;
		if (offset & blksize_mask || len & blksize_mask) {
			error = -EINVAL;
			goto out_unlock;
		}

		/* check the new inode size does not wrap through zero */
		if (new_size > inode->i_sb->s_maxbytes) {
			error = -EFBIG;
			goto out_unlock;
		}

		/* Offset should be less than i_size */
		if (offset >= i_size_read(inode)) {
			error = -EINVAL;
			goto out_unlock;
		}
		do_file_insert = 1;
800
	} else {
801 802
		flags |= XFS_PREALLOC_SET;

803 804 805
		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
		    offset + len > i_size_read(inode)) {
			new_size = offset + len;
D
Dave Chinner 已提交
806
			error = inode_newsize_ok(inode, new_size);
807 808 809
			if (error)
				goto out_unlock;
		}
810

811 812
		if (mode & FALLOC_FL_ZERO_RANGE)
			error = xfs_zero_file_space(ip, offset, len);
813 814 815 816 817 818
		else {
			if (mode & FALLOC_FL_UNSHARE_RANGE) {
				error = xfs_reflink_unshare(ip, offset, len);
				if (error)
					goto out_unlock;
			}
819 820
			error = xfs_alloc_file_space(ip, offset, len,
						     XFS_BMAPI_PREALLOC);
821
		}
822 823 824 825
		if (error)
			goto out_unlock;
	}

826
	if (file->f_flags & O_DSYNC)
827 828 829
		flags |= XFS_PREALLOC_SYNC;

	error = xfs_update_prealloc_flags(ip, flags);
830 831 832 833 834 835 836 837 838
	if (error)
		goto out_unlock;

	/* Change file size if needed */
	if (new_size) {
		struct iattr iattr;

		iattr.ia_valid = ATTR_SIZE;
		iattr.ia_size = new_size;
839
		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
840 841
		if (error)
			goto out_unlock;
842 843
	}

844 845 846 847 848 849 850 851 852
	/*
	 * Perform hole insertion now that the file size has been
	 * updated so that if we crash during the operation we don't
	 * leave shifted extents past EOF and hence losing access to
	 * the data that is contained within them.
	 */
	if (do_file_insert)
		error = xfs_insert_file_space(ip, offset, len);

853
out_unlock:
854
	xfs_iunlock(ip, iolock);
D
Dave Chinner 已提交
855
	return error;
856 857
}

858 859 860 861 862 863 864 865
STATIC int
xfs_file_clone_range(
	struct file	*file_in,
	loff_t		pos_in,
	struct file	*file_out,
	loff_t		pos_out,
	u64		len)
{
866
	return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
867 868 869 870 871 872 873 874 875 876 877 878 879
				     len, false);
}

STATIC ssize_t
xfs_file_dedupe_range(
	struct file	*src_file,
	u64		loff,
	u64		len,
	struct file	*dst_file,
	u64		dst_loff)
{
	int		error;

880
	error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
881 882 883 884
				     len, true);
	if (error)
		return error;
	return len;
885
}
886

L
Linus Torvalds 已提交
887
STATIC int
888
xfs_file_open(
L
Linus Torvalds 已提交
889
	struct inode	*inode,
890
	struct file	*file)
L
Linus Torvalds 已提交
891
{
892
	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
L
Linus Torvalds 已提交
893
		return -EFBIG;
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
		return -EIO;
	return 0;
}

STATIC int
xfs_dir_open(
	struct inode	*inode,
	struct file	*file)
{
	struct xfs_inode *ip = XFS_I(inode);
	int		mode;
	int		error;

	error = xfs_file_open(inode, file);
	if (error)
		return error;

	/*
	 * If there are any blocks, read-ahead block 0 as we're almost
	 * certain to have the next operation be a read there.
	 */
916
	mode = xfs_ilock_data_map_shared(ip);
917
	if (ip->i_d.di_nextents > 0)
918
		error = xfs_dir3_data_readahead(ip, 0, -1);
919
	xfs_iunlock(ip, mode);
920
	return error;
L
Linus Torvalds 已提交
921 922 923
}

STATIC int
924
xfs_file_release(
L
Linus Torvalds 已提交
925 926 927
	struct inode	*inode,
	struct file	*filp)
{
D
Dave Chinner 已提交
928
	return xfs_release(XFS_I(inode));
L
Linus Torvalds 已提交
929 930 931
}

STATIC int
932
xfs_file_readdir(
A
Al Viro 已提交
933 934
	struct file	*file,
	struct dir_context *ctx)
L
Linus Torvalds 已提交
935
{
A
Al Viro 已提交
936
	struct inode	*inode = file_inode(file);
937
	xfs_inode_t	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
938 939 940 941 942 943 944 945 946 947 948 949
	size_t		bufsize;

	/*
	 * The Linux API doesn't pass down the total size of the buffer
	 * we read into down to the filesystem.  With the filldir concept
	 * it's not needed for correct information, but the XFS dir2 leaf
	 * code wants an estimate of the buffer size to calculate it's
	 * readahead window and size the buffers used for mapping to
	 * physical blocks.
	 *
	 * Try to give it an estimate that's good enough, maybe at some
	 * point we can change the ->readdir prototype to include the
E
Eric Sandeen 已提交
950
	 * buffer size.  For now we use the current glibc buffer size.
C
Christoph Hellwig 已提交
951
	 */
E
Eric Sandeen 已提交
952
	bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
C
Christoph Hellwig 已提交
953

954
	return xfs_readdir(NULL, ip, ctx, bufsize);
L
Linus Torvalds 已提交
955 956
}

957 958
/*
 * This type is designed to indicate the type of offset we would like
959
 * to search from page cache for xfs_seek_hole_data().
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
 */
enum {
	HOLE_OFF = 0,
	DATA_OFF,
};

/*
 * Lookup the desired type of offset from the given page.
 *
 * On success, return true and the offset argument will point to the
 * start of the region that was found.  Otherwise this function will
 * return false and keep the offset argument unchanged.
 */
STATIC bool
xfs_lookup_buffer_offset(
	struct page		*page,
	loff_t			*offset,
	unsigned int		type)
{
	loff_t			lastoff = page_offset(page);
	bool			found = false;
	struct buffer_head	*bh, *head;

	bh = head = page_buffers(page);
	do {
		/*
		 * Unwritten extents that have data in the page
		 * cache covering them can be identified by the
		 * BH_Unwritten state flag.  Pages with multiple
		 * buffers might have a mix of holes, data and
		 * unwritten extents - any buffer with valid
		 * data in it should have BH_Uptodate flag set
		 * on it.
		 */
		if (buffer_unwritten(bh) ||
		    buffer_uptodate(bh)) {
			if (type == DATA_OFF)
				found = true;
		} else {
			if (type == HOLE_OFF)
				found = true;
		}

		if (found) {
			*offset = lastoff;
			break;
		}
		lastoff += bh->b_size;
	} while ((bh = bh->b_this_page) != head);

	return found;
}

/*
 * This routine is called to find out and return a data or hole offset
 * from the page cache for unwritten extents according to the desired
1016
 * type for xfs_seek_hole_data().
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
 *
 * The argument offset is used to tell where we start to search from the
 * page cache.  Map is used to figure out the end points of the range to
 * lookup pages.
 *
 * Return true if the desired type of offset was found, and the argument
 * offset is filled with that address.  Otherwise, return false and keep
 * offset unchanged.
 */
STATIC bool
xfs_find_get_desired_pgoff(
	struct inode		*inode,
	struct xfs_bmbt_irec	*map,
	unsigned int		type,
	loff_t			*offset)
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	struct pagevec		pvec;
	pgoff_t			index;
	pgoff_t			end;
	loff_t			endoff;
	loff_t			startoff = *offset;
	loff_t			lastoff = startoff;
	bool			found = false;

	pagevec_init(&pvec, 0);

1045
	index = startoff >> PAGE_SHIFT;
1046
	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1047
	end = (endoff - 1) >> PAGE_SHIFT;
1048 1049 1050 1051 1052
	do {
		int		want;
		unsigned	nr_pages;
		unsigned int	i;

1053
		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
1054 1055
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
					  want);
1056
		if (nr_pages == 0)
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page	*page = pvec.pages[i];
			loff_t		b_offset;

			/*
			 * At this point, the page may be truncated or
			 * invalidated (changing page->mapping to NULL),
			 * or even swizzled back from swapper_space to tmpfs
			 * file mapping. However, page->index will not change
			 * because we have a reference on the page.
			 *
1070 1071
			 * If current page offset is beyond where we've ended,
			 * we've found a hole.
1072
			 */
1073 1074 1075 1076
			if (type == HOLE_OFF && lastoff < endoff &&
			    lastoff < page_offset(pvec.pages[i])) {
				found = true;
				*offset = lastoff;
1077 1078
				goto out;
			}
1079 1080 1081
			/* Searching done if the page index is out of range. */
			if (page->index > end)
				goto out;
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122

			lock_page(page);
			/*
			 * Page truncated or invalidated(page->mapping == NULL).
			 * We can freely skip it and proceed to check the next
			 * page.
			 */
			if (unlikely(page->mapping != inode->i_mapping)) {
				unlock_page(page);
				continue;
			}

			if (!page_has_buffers(page)) {
				unlock_page(page);
				continue;
			}

			found = xfs_lookup_buffer_offset(page, &b_offset, type);
			if (found) {
				/*
				 * The found offset may be less than the start
				 * point to search if this is the first time to
				 * come here.
				 */
				*offset = max_t(loff_t, startoff, b_offset);
				unlock_page(page);
				goto out;
			}

			/*
			 * We either searching data but nothing was found, or
			 * searching hole but found a data buffer.  In either
			 * case, probably the next page contains the desired
			 * things, update the last offset to it so.
			 */
			lastoff = page_offset(page) + PAGE_SIZE;
			unlock_page(page);
		}

		/*
		 * The number of returned pages less than our desired, search
1123
		 * done.
1124
		 */
1125
		if (nr_pages < want)
1126 1127 1128 1129 1130 1131
			break;

		index = pvec.pages[i - 1]->index + 1;
		pagevec_release(&pvec);
	} while (index <= end);

1132 1133 1134 1135 1136
	/* No page at lastoff and we are not done - we found a hole. */
	if (type == HOLE_OFF && lastoff < endoff) {
		*offset = lastoff;
		found = true;
	}
1137 1138 1139 1140 1141
out:
	pagevec_release(&pvec);
	return found;
}

1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
/*
 * caller must lock inode with xfs_ilock_data_map_shared,
 * can we craft an appropriate ASSERT?
 *
 * end is because the VFS-level lseek interface is defined such that any
 * offset past i_size shall return -ENXIO, but we use this for quota code
 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
 */
loff_t
__xfs_seek_hole_data(
	struct inode		*inode,
1153
	loff_t			start,
1154
	loff_t			end,
1155
	int			whence)
1156 1157 1158 1159 1160
{
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	loff_t			uninitialized_var(offset);
	xfs_fileoff_t		fsbno;
1161
	xfs_filblks_t		lastbno;
1162 1163
	int			error;

1164
	if (start >= end) {
D
Dave Chinner 已提交
1165
		error = -ENXIO;
1166
		goto out_error;
1167 1168 1169 1170 1171 1172
	}

	/*
	 * Try to read extents from the first block indicated
	 * by fsbno to the end block of the file.
	 */
1173
	fsbno = XFS_B_TO_FSBT(mp, start);
1174
	lastbno = XFS_B_TO_FSB(mp, end);
1175

1176 1177 1178 1179
	for (;;) {
		struct xfs_bmbt_irec	map[2];
		int			nmap = 2;
		unsigned int		i;
1180

1181
		error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
1182 1183
				       XFS_BMAPI_ENTIRE);
		if (error)
1184
			goto out_error;
1185

1186 1187
		/* No extents at given offset, must be beyond EOF */
		if (nmap == 0) {
D
Dave Chinner 已提交
1188
			error = -ENXIO;
1189
			goto out_error;
1190 1191 1192 1193 1194 1195
		}

		for (i = 0; i < nmap; i++) {
			offset = max_t(loff_t, start,
				       XFS_FSB_TO_B(mp, map[i].br_startoff));

1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
			/* Landed in the hole we wanted? */
			if (whence == SEEK_HOLE &&
			    map[i].br_startblock == HOLESTARTBLOCK)
				goto out;

			/* Landed in the data extent we wanted? */
			if (whence == SEEK_DATA &&
			    (map[i].br_startblock == DELAYSTARTBLOCK ||
			     (map[i].br_state == XFS_EXT_NORM &&
			      !isnullstartblock(map[i].br_startblock))))
1206 1207 1208
				goto out;

			/*
1209 1210
			 * Landed in an unwritten extent, try to search
			 * for hole or data from page cache.
1211 1212 1213
			 */
			if (map[i].br_state == XFS_EXT_UNWRITTEN) {
				if (xfs_find_get_desired_pgoff(inode, &map[i],
1214 1215
				      whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
							&offset))
1216 1217 1218 1219 1220
					goto out;
			}
		}

		/*
1221 1222
		 * We only received one extent out of the two requested. This
		 * means we've hit EOF and didn't find what we are looking for.
1223
		 */
1224
		if (nmap == 1) {
1225 1226 1227 1228 1229 1230
			/*
			 * If we were looking for a hole, set offset to
			 * the end of the file (i.e., there is an implicit
			 * hole at the end of any file).
		 	 */
			if (whence == SEEK_HOLE) {
1231
				offset = end;
1232 1233 1234 1235 1236 1237
				break;
			}
			/*
			 * If we were looking for data, it's nowhere to be found
			 */
			ASSERT(whence == SEEK_DATA);
D
Dave Chinner 已提交
1238
			error = -ENXIO;
1239
			goto out_error;
1240 1241
		}

1242 1243 1244 1245
		ASSERT(i > 1);

		/*
		 * Nothing was found, proceed to the next round of search
1246
		 * if the next reading offset is not at or beyond EOF.
1247 1248 1249
		 */
		fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
		start = XFS_FSB_TO_B(mp, fsbno);
1250
		if (start >= end) {
1251
			if (whence == SEEK_HOLE) {
1252
				offset = end;
1253 1254 1255
				break;
			}
			ASSERT(whence == SEEK_DATA);
D
Dave Chinner 已提交
1256
			error = -ENXIO;
1257
			goto out_error;
1258
		}
1259 1260
	}

1261 1262
out:
	/*
1263
	 * If at this point we have found the hole we wanted, the returned
1264
	 * offset may be bigger than the file size as it may be aligned to
1265
	 * page boundary for unwritten extents.  We need to deal with this
1266 1267
	 * situation in particular.
	 */
1268
	if (whence == SEEK_HOLE)
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
		offset = min_t(loff_t, offset, end);

	return offset;

out_error:
	return error;
}

STATIC loff_t
xfs_seek_hole_data(
	struct file		*file,
	loff_t			start,
	int			whence)
{
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	uint			lock;
	loff_t			offset, end;
	int			error = 0;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	lock = xfs_ilock_data_map_shared(ip);

	end = i_size_read(inode);
	offset = __xfs_seek_hole_data(inode, start, end, whence);
	if (offset < 0) {
		error = offset;
		goto out_unlock;
	}

J
Jie Liu 已提交
1302
	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1303 1304

out_unlock:
1305
	xfs_iunlock(ip, lock);
1306 1307

	if (error)
D
Dave Chinner 已提交
1308
		return error;
1309 1310 1311 1312 1313 1314 1315
	return offset;
}

STATIC loff_t
xfs_file_llseek(
	struct file	*file,
	loff_t		offset,
1316
	int		whence)
1317
{
1318
	switch (whence) {
1319 1320 1321
	case SEEK_END:
	case SEEK_CUR:
	case SEEK_SET:
1322
		return generic_file_llseek(file, offset, whence);
1323
	case SEEK_HOLE:
1324
	case SEEK_DATA:
1325
		return xfs_seek_hole_data(file, offset, whence);
1326 1327 1328 1329 1330
	default:
		return -EINVAL;
	}
}

1331 1332 1333 1334 1335
/*
 * Locking for serialisation of IO during page faults. This results in a lock
 * ordering of:
 *
 * mmap_sem (MM)
1336
 *   sb_start_pagefault(vfs, freeze)
1337
 *     i_mmaplock (XFS - truncate serialisation)
1338 1339
 *       page_lock (MM)
 *         i_lock (XFS - extent map serialisation)
1340 1341
 */

1342 1343 1344 1345 1346
/*
 * mmap()d file has taken write protection fault and is being made writable. We
 * can set the page state up correctly for a writable page, which means we can
 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
 * mapping.
1347 1348
 */
STATIC int
1349
xfs_filemap_page_mkwrite(
1350 1351
	struct vm_fault		*vmf)
{
1352
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1353
	int			ret;
1354

1355
	trace_xfs_filemap_page_mkwrite(XFS_I(inode));
1356

1357
	sb_start_pagefault(inode->i_sb);
1358
	file_update_time(vmf->vma->vm_file);
1359
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1360

1361
	if (IS_DAX(inode)) {
1362
		ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
1363
	} else {
1364
		ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1365 1366 1367 1368 1369 1370 1371
		ret = block_page_mkwrite_return(ret);
	}

	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
	sb_end_pagefault(inode->i_sb);

	return ret;
1372 1373
}

1374
STATIC int
1375
xfs_filemap_fault(
1376 1377
	struct vm_fault		*vmf)
{
1378
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1379
	int			ret;
1380

1381
	trace_xfs_filemap_fault(XFS_I(inode));
1382

1383
	/* DAX can shortcut the normal fault path on write faults! */
1384
	if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1385
		return xfs_filemap_page_mkwrite(vmf);
1386

1387
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
C
Christoph Hellwig 已提交
1388
	if (IS_DAX(inode))
1389
		ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
C
Christoph Hellwig 已提交
1390
	else
1391
		ret = filemap_fault(vmf);
1392
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1393

1394 1395 1396
	return ret;
}

1397 1398 1399
/*
 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
 * both read and write faults. Hence we need to handle both cases. There is no
1400
 * ->huge_mkwrite callout for huge pages, so we have a single function here to
1401 1402 1403
 * handle both cases here. @flags carries the information on the type of fault
 * occuring.
 */
M
Matthew Wilcox 已提交
1404
STATIC int
1405
xfs_filemap_huge_fault(
1406 1407
	struct vm_fault		*vmf,
	enum page_entry_size	pe_size)
M
Matthew Wilcox 已提交
1408
{
1409
	struct inode		*inode = file_inode(vmf->vma->vm_file);
M
Matthew Wilcox 已提交
1410 1411 1412 1413 1414 1415
	struct xfs_inode	*ip = XFS_I(inode);
	int			ret;

	if (!IS_DAX(inode))
		return VM_FAULT_FALLBACK;

1416
	trace_xfs_filemap_huge_fault(ip);
M
Matthew Wilcox 已提交
1417

1418
	if (vmf->flags & FAULT_FLAG_WRITE) {
1419
		sb_start_pagefault(inode->i_sb);
1420
		file_update_time(vmf->vma->vm_file);
1421 1422
	}

M
Matthew Wilcox 已提交
1423
	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1424
	ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops);
M
Matthew Wilcox 已提交
1425 1426
	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);

1427
	if (vmf->flags & FAULT_FLAG_WRITE)
1428
		sb_end_pagefault(inode->i_sb);
M
Matthew Wilcox 已提交
1429 1430 1431 1432

	return ret;
}

1433 1434 1435
/*
 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
 * updates on write faults. In reality, it's need to serialise against
1436 1437
 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
 * to ensure we serialise the fault barrier in place.
1438 1439 1440 1441 1442 1443
 */
static int
xfs_filemap_pfn_mkwrite(
	struct vm_fault		*vmf)
{

1444
	struct inode		*inode = file_inode(vmf->vma->vm_file);
1445 1446 1447 1448 1449 1450 1451
	struct xfs_inode	*ip = XFS_I(inode);
	int			ret = VM_FAULT_NOPAGE;
	loff_t			size;

	trace_xfs_filemap_pfn_mkwrite(ip);

	sb_start_pagefault(inode->i_sb);
1452
	file_update_time(vmf->vma->vm_file);
1453 1454 1455 1456 1457 1458

	/* check if the faulting page hasn't raced with truncate */
	xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (vmf->pgoff >= size)
		ret = VM_FAULT_SIGBUS;
1459
	else if (IS_DAX(inode))
1460
		ret = dax_pfn_mkwrite(vmf);
1461 1462
	xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
	sb_end_pagefault(inode->i_sb);
M
Matthew Wilcox 已提交
1463
	return ret;
1464

M
Matthew Wilcox 已提交
1465 1466
}

1467 1468
static const struct vm_operations_struct xfs_file_vm_ops = {
	.fault		= xfs_filemap_fault,
1469
	.huge_fault	= xfs_filemap_huge_fault,
1470 1471
	.map_pages	= filemap_map_pages,
	.page_mkwrite	= xfs_filemap_page_mkwrite,
1472
	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
};

STATIC int
xfs_file_mmap(
	struct file	*filp,
	struct vm_area_struct *vma)
{
	file_accessed(filp);
	vma->vm_ops = &xfs_file_vm_ops;
	if (IS_DAX(file_inode(filp)))
M
Matthew Wilcox 已提交
1483
		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1484
	return 0;
1485 1486
}

1487
const struct file_operations xfs_file_operations = {
1488
	.llseek		= xfs_file_llseek,
A
Al Viro 已提交
1489
	.read_iter	= xfs_file_read_iter,
A
Al Viro 已提交
1490
	.write_iter	= xfs_file_write_iter,
1491
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
1492
	.splice_write	= iter_file_splice_write,
1493
	.unlocked_ioctl	= xfs_file_ioctl,
L
Linus Torvalds 已提交
1494
#ifdef CONFIG_COMPAT
1495
	.compat_ioctl	= xfs_file_compat_ioctl,
L
Linus Torvalds 已提交
1496
#endif
1497 1498 1499 1500
	.mmap		= xfs_file_mmap,
	.open		= xfs_file_open,
	.release	= xfs_file_release,
	.fsync		= xfs_file_fsync,
1501
	.get_unmapped_area = thp_get_unmapped_area,
1502
	.fallocate	= xfs_file_fallocate,
1503
	.clone_file_range = xfs_file_clone_range,
1504
	.dedupe_file_range = xfs_file_dedupe_range,
L
Linus Torvalds 已提交
1505 1506
};

1507
const struct file_operations xfs_dir_file_operations = {
1508
	.open		= xfs_dir_open,
L
Linus Torvalds 已提交
1509
	.read		= generic_read_dir,
1510
	.iterate_shared	= xfs_file_readdir,
1511
	.llseek		= generic_file_llseek,
1512
	.unlocked_ioctl	= xfs_file_ioctl,
1513
#ifdef CONFIG_COMPAT
1514
	.compat_ioctl	= xfs_file_compat_ioctl,
1515
#endif
1516
	.fsync		= xfs_dir_fsync,
L
Linus Torvalds 已提交
1517
};