xfs_file.c 26.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17 18
 */
#include "xfs.h"
19
#include "xfs_fs.h"
20
#include "xfs_bit.h"
L
Linus Torvalds 已提交
21
#include "xfs_log.h"
22
#include "xfs_inum.h"
L
Linus Torvalds 已提交
23
#include "xfs_sb.h"
24
#include "xfs_ag.h"
L
Linus Torvalds 已提交
25 26 27 28 29 30
#include "xfs_trans.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
31
#include "xfs_inode_item.h"
32
#include "xfs_bmap.h"
L
Linus Torvalds 已提交
33
#include "xfs_error.h"
34
#include "xfs_vnodeops.h"
35
#include "xfs_da_btree.h"
36
#include "xfs_ioctl.h"
37
#include "xfs_trace.h"
L
Linus Torvalds 已提交
38 39

#include <linux/dcache.h>
40
#include <linux/falloc.h>
L
Linus Torvalds 已提交
41

42
static const struct vm_operations_struct xfs_file_vm_ops;
L
Linus Torvalds 已提交
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
/*
 * Locking primitives for read and write IO paths to ensure we consistently use
 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
 */
static inline void
xfs_rw_ilock(
	struct xfs_inode	*ip,
	int			type)
{
	if (type & XFS_IOLOCK_EXCL)
		mutex_lock(&VFS_I(ip)->i_mutex);
	xfs_ilock(ip, type);
}

static inline void
xfs_rw_iunlock(
	struct xfs_inode	*ip,
	int			type)
{
	xfs_iunlock(ip, type);
	if (type & XFS_IOLOCK_EXCL)
		mutex_unlock(&VFS_I(ip)->i_mutex);
}

static inline void
xfs_rw_ilock_demote(
	struct xfs_inode	*ip,
	int			type)
{
	xfs_ilock_demote(ip, type);
	if (type & XFS_IOLOCK_EXCL)
		mutex_unlock(&VFS_I(ip)->i_mutex);
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
/*
 *	xfs_iozero
 *
 *	xfs_iozero clears the specified range of buffer supplied,
 *	and marks all the affected blocks as valid and modified.  If
 *	an affected block is not allocated, it will be allocated.  If
 *	an affected block is not completely overwritten, and is not
 *	valid before the operation, it will be read from disk before
 *	being partially zeroed.
 */
STATIC int
xfs_iozero(
	struct xfs_inode	*ip,	/* inode			*/
	loff_t			pos,	/* offset in file		*/
	size_t			count)	/* size of data to zero		*/
{
	struct page		*page;
	struct address_space	*mapping;
	int			status;

	mapping = VFS_I(ip)->i_mapping;
	do {
		unsigned offset, bytes;
		void *fsdata;

		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
		bytes = PAGE_CACHE_SIZE - offset;
		if (bytes > count)
			bytes = count;

		status = pagecache_write_begin(NULL, mapping, pos, bytes,
					AOP_FLAG_UNINTERRUPTIBLE,
					&page, &fsdata);
		if (status)
			break;

		zero_user(page, offset, bytes);

		status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
					page, fsdata);
		WARN_ON(status <= 0); /* can't return less than zero! */
		pos += bytes;
		count -= bytes;
		status = 0;
	} while (count);

	return (-status);
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
/*
 * Fsync operations on directories are much simpler than on regular files,
 * as there is no file data to flush, and thus also no need for explicit
 * cache flush operations, and there are no non-transaction metadata updates
 * on directories either.
 */
STATIC int
xfs_dir_fsync(
	struct file		*file,
	loff_t			start,
	loff_t			end,
	int			datasync)
{
	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
	struct xfs_mount	*mp = ip->i_mount;
	xfs_lsn_t		lsn = 0;

	trace_xfs_dir_fsync(ip);

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	if (xfs_ipincount(ip))
		lsn = ip->i_itemp->ili_last_lsn;
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!lsn)
		return 0;
	return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
}

156 157 158
STATIC int
xfs_file_fsync(
	struct file		*file,
159 160
	loff_t			start,
	loff_t			end,
161 162
	int			datasync)
{
163 164
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
165
	struct xfs_mount	*mp = ip->i_mount;
166 167
	int			error = 0;
	int			log_flushed = 0;
168
	xfs_lsn_t		lsn = 0;
169

C
Christoph Hellwig 已提交
170
	trace_xfs_file_fsync(ip);
171

172 173 174 175
	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
	if (error)
		return error;

176
	if (XFS_FORCED_SHUTDOWN(mp))
177 178 179 180
		return -XFS_ERROR(EIO);

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

181 182 183 184 185 186 187 188 189 190 191 192 193 194
	if (mp->m_flags & XFS_MOUNT_BARRIER) {
		/*
		 * If we have an RT and/or log subvolume we need to make sure
		 * to flush the write cache the device used for file data
		 * first.  This is to ensure newly written file data make
		 * it to disk before logging the new inode size in case of
		 * an extending write.
		 */
		if (XFS_IS_REALTIME_INODE(ip))
			xfs_blkdev_issue_flush(mp->m_rtdev_targp);
		else if (mp->m_logdev_targp != mp->m_ddev_targp)
			xfs_blkdev_issue_flush(mp->m_ddev_targp);
	}

195
	/*
C
Christoph Hellwig 已提交
196 197
	 * All metadata updates are logged, which means that we just have
	 * to flush the log up to the latest LSN that touched the inode.
198 199
	 */
	xfs_ilock(ip, XFS_ILOCK_SHARED);
200 201 202 203 204
	if (xfs_ipincount(ip)) {
		if (!datasync ||
		    (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP))
			lsn = ip->i_itemp->ili_last_lsn;
	}
C
Christoph Hellwig 已提交
205
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
206

C
Christoph Hellwig 已提交
207
	if (lsn)
208 209
		error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);

210 211 212 213 214 215 216 217 218 219 220 221
	/*
	 * If we only have a single device, and the log force about was
	 * a no-op we might have to flush the data device cache here.
	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
	 * an already allocated file and thus do not have any metadata to
	 * commit.
	 */
	if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
	    mp->m_logdev_targp == mp->m_ddev_targp &&
	    !XFS_IS_REALTIME_INODE(ip) &&
	    !log_flushed)
		xfs_blkdev_issue_flush(mp->m_ddev_targp);
222 223 224 225

	return -error;
}

226 227
STATIC ssize_t
xfs_file_aio_read(
228 229
	struct kiocb		*iocb,
	const struct iovec	*iovp,
230 231
	unsigned long		nr_segs,
	loff_t			pos)
232 233 234
{
	struct file		*file = iocb->ki_filp;
	struct inode		*inode = file->f_mapping->host;
235 236
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
237 238
	size_t			size = 0;
	ssize_t			ret = 0;
239
	int			ioflags = 0;
240 241 242 243 244
	xfs_fsize_t		n;
	unsigned long		seg;

	XFS_STATS_INC(xs_read_calls);

245 246 247 248 249 250 251
	BUG_ON(iocb->ki_pos != pos);

	if (unlikely(file->f_flags & O_DIRECT))
		ioflags |= IO_ISDIRECT;
	if (file->f_mode & FMODE_NOCMTIME)
		ioflags |= IO_INVIS;

252
	/* START copy & waste from filemap.c */
253
	for (seg = 0; seg < nr_segs; seg++) {
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
		const struct iovec *iv = &iovp[seg];

		/*
		 * If any segment has a negative length, or the cumulative
		 * length ever wraps negative then return -EINVAL.
		 */
		size += iv->iov_len;
		if (unlikely((ssize_t)(size|iv->iov_len) < 0))
			return XFS_ERROR(-EINVAL);
	}
	/* END copy & waste from filemap.c */

	if (unlikely(ioflags & IO_ISDIRECT)) {
		xfs_buftarg_t	*target =
			XFS_IS_REALTIME_INODE(ip) ?
				mp->m_rtdev_targp : mp->m_ddev_targp;
270
		if ((iocb->ki_pos & target->bt_smask) ||
271
		    (size & target->bt_smask)) {
272
			if (iocb->ki_pos == i_size_read(inode))
273
				return 0;
274 275 276 277
			return -XFS_ERROR(EINVAL);
		}
	}

278 279
	n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
	if (n <= 0 || size == 0)
280 281 282 283 284 285 286 287
		return 0;

	if (n < size)
		size = n;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

288 289 290 291 292 293 294 295 296 297 298 299 300
	/*
	 * Locking is a bit tricky here. If we take an exclusive lock
	 * for direct IO, we effectively serialise all new concurrent
	 * read IO to this file and block it behind IO that is currently in
	 * progress because IO in progress holds the IO lock shared. We only
	 * need to hold the lock exclusive to blow away the page cache, so
	 * only take lock exclusively if the page cache needs invalidation.
	 * This allows the normal direct IO case of no page cache pages to
	 * proceeed concurrently without serialisation.
	 */
	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
	if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {
		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
301 302
		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);

303 304 305 306
		if (inode->i_mapping->nrpages) {
			ret = -xfs_flushinval_pages(ip,
					(iocb->ki_pos & PAGE_CACHE_MASK),
					-1, FI_REMAPF_LOCKED);
307 308 309 310
			if (ret) {
				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
				return ret;
			}
311
		}
312
		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
313
	}
314

315
	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
316

317
	ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos);
318 319 320
	if (ret > 0)
		XFS_STATS_ADD(xs_read_bytes, ret);

321
	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
322 323 324
	return ret;
}

325 326
STATIC ssize_t
xfs_file_splice_read(
327 328 329 330
	struct file		*infilp,
	loff_t			*ppos,
	struct pipe_inode_info	*pipe,
	size_t			count,
331
	unsigned int		flags)
332
{
333 334
	struct xfs_inode	*ip = XFS_I(infilp->f_mapping->host);
	int			ioflags = 0;
335 336 337
	ssize_t			ret;

	XFS_STATS_INC(xs_read_calls);
338 339 340 341

	if (infilp->f_mode & FMODE_NOCMTIME)
		ioflags |= IO_INVIS;

342 343 344
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;

345
	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
346 347 348 349 350 351 352

	trace_xfs_file_splice_read(ip, count, *ppos, ioflags);

	ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
	if (ret > 0)
		XFS_STATS_ADD(xs_read_bytes, ret);

353
	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
354 355 356
	return ret;
}

357 358 359 360 361 362 363 364
/*
 * xfs_file_splice_write() does not use xfs_rw_ilock() because
 * generic_file_splice_write() takes the i_mutex itself. This, in theory,
 * couuld cause lock inversions between the aio_write path and the splice path
 * if someone is doing concurrent splice(2) based writes and write(2) based
 * writes to the same inode. The only real way to fix this is to re-implement
 * the generic code here with correct locking orders.
 */
365 366
STATIC ssize_t
xfs_file_splice_write(
367 368 369 370
	struct pipe_inode_info	*pipe,
	struct file		*outfilp,
	loff_t			*ppos,
	size_t			count,
371
	unsigned int		flags)
372 373
{
	struct inode		*inode = outfilp->f_mapping->host;
374 375 376
	struct xfs_inode	*ip = XFS_I(inode);
	int			ioflags = 0;
	ssize_t			ret;
377 378

	XFS_STATS_INC(xs_write_calls);
379 380 381 382

	if (outfilp->f_mode & FMODE_NOCMTIME)
		ioflags |= IO_INVIS;

383 384 385 386 387 388 389 390
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;

	xfs_ilock(ip, XFS_IOLOCK_EXCL);

	trace_xfs_file_splice_write(ip, count, *ppos, ioflags);

	ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
391 392
	if (ret > 0)
		XFS_STATS_ADD(xs_write_bytes, ret);
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430

	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
	return ret;
}

/*
 * This routine is called to handle zeroing any space in the last
 * block of the file that is beyond the EOF.  We do this since the
 * size is being increased without writing anything to that block
 * and we don't want anyone to read the garbage on the disk.
 */
STATIC int				/* error (positive) */
xfs_zero_last_block(
	xfs_inode_t	*ip,
	xfs_fsize_t	offset,
	xfs_fsize_t	isize)
{
	xfs_fileoff_t	last_fsb;
	xfs_mount_t	*mp = ip->i_mount;
	int		nimaps;
	int		zero_offset;
	int		zero_len;
	int		error = 0;
	xfs_bmbt_irec_t	imap;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	zero_offset = XFS_B_FSB_OFFSET(mp, isize);
	if (zero_offset == 0) {
		/*
		 * There are no extra bytes in the last block on disk to
		 * zero, so return.
		 */
		return 0;
	}

	last_fsb = XFS_B_TO_FSBT(mp, isize);
	nimaps = 1;
D
Dave Chinner 已提交
431 432
	error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
	if (error)
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
		return error;
	ASSERT(nimaps > 0);
	/*
	 * If the block underlying isize is just a hole, then there
	 * is nothing to zero.
	 */
	if (imap.br_startblock == HOLESTARTBLOCK) {
		return 0;
	}
	/*
	 * Zero the part of the last block beyond the EOF, and write it
	 * out sync.  We need to drop the ilock while we do this so we
	 * don't deadlock when the buffer cache calls back to us.
	 */
	xfs_iunlock(ip, XFS_ILOCK_EXCL);

	zero_len = mp->m_sb.sb_blocksize - zero_offset;
	if (isize + zero_len > offset)
		zero_len = offset - isize;
	error = xfs_iozero(ip, isize, zero_len);

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	ASSERT(error >= 0);
	return error;
}

/*
 * Zero any on disk space between the current EOF and the new,
 * larger EOF.  This handles the normal case of zeroing the remainder
 * of the last block in the file and the unusual case of zeroing blocks
 * out beyond the size of the file.  This second case only happens
 * with fixed size extents and when the system crashes before the inode
 * size was updated but after blocks were allocated.  If fill is set,
 * then any holes in the range are filled and zeroed.  If not, the holes
 * are left alone as holes.
 */

int					/* error (positive) */
xfs_zero_eof(
	xfs_inode_t	*ip,
	xfs_off_t	offset,		/* starting I/O offset */
	xfs_fsize_t	isize)		/* current inode size */
{
	xfs_mount_t	*mp = ip->i_mount;
	xfs_fileoff_t	start_zero_fsb;
	xfs_fileoff_t	end_zero_fsb;
	xfs_fileoff_t	zero_count_fsb;
	xfs_fileoff_t	last_fsb;
	xfs_fileoff_t	zero_off;
	xfs_fsize_t	zero_len;
	int		nimaps;
	int		error = 0;
	xfs_bmbt_irec_t	imap;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
	ASSERT(offset > isize);

	/*
	 * First handle zeroing the block on which isize resides.
	 * We only zero a part of that block so it is handled specially.
	 */
	error = xfs_zero_last_block(ip, offset, isize);
	if (error) {
		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
		return error;
	}

	/*
	 * Calculate the range between the new size and the old
	 * where blocks needing to be zeroed may exist.  To get the
	 * block where the last byte in the file currently resides,
	 * we need to subtract one from the size and truncate back
	 * to a block boundary.  We subtract 1 in case the size is
	 * exactly on a block boundary.
	 */
	last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
	start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
	end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
	ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
	if (last_fsb == end_zero_fsb) {
		/*
		 * The size was only incremented on its last block.
		 * We took care of that above, so just return.
		 */
		return 0;
	}

	ASSERT(start_zero_fsb <= end_zero_fsb);
	while (start_zero_fsb <= end_zero_fsb) {
		nimaps = 1;
		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
D
Dave Chinner 已提交
524 525
		error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
					  &imap, &nimaps, 0);
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
		if (error) {
			ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
			return error;
		}
		ASSERT(nimaps > 0);

		if (imap.br_state == XFS_EXT_UNWRITTEN ||
		    imap.br_startblock == HOLESTARTBLOCK) {
			/*
			 * This loop handles initializing pages that were
			 * partially initialized by the code below this
			 * loop. It basically zeroes the part of the page
			 * that sits on a hole and sets the page as P_HOLE
			 * and calls remapf if it is a mapped file.
			 */
			start_zero_fsb = imap.br_startoff + imap.br_blockcount;
			ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
			continue;
		}

		/*
		 * There are blocks we need to zero.
		 * Drop the inode lock while we're doing the I/O.
		 * We'll still have the iolock to protect us.
		 */
		xfs_iunlock(ip, XFS_ILOCK_EXCL);

		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);

		if ((zero_off + zero_len) > offset)
			zero_len = offset - zero_off;

		error = xfs_iozero(ip, zero_off, zero_len);
		if (error) {
			goto out_lock;
		}

		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));

		xfs_ilock(ip, XFS_ILOCK_EXCL);
	}

	return 0;

out_lock:
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	ASSERT(error >= 0);
	return error;
}

578 579 580
/*
 * Common pre-write limit and setup checks.
 *
581 582 583
 * Called with the iolocked held either shared and exclusive according to
 * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
 * if called for a direct write beyond i_size.
584 585 586 587 588 589 590 591 592 593 594 595
 */
STATIC ssize_t
xfs_file_aio_write_checks(
	struct file		*file,
	loff_t			*pos,
	size_t			*count,
	int			*iolock)
{
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	int			error = 0;

596
restart:
597
	error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
598
	if (error)
599 600 601 602 603
		return error;

	/*
	 * If the offset is beyond the size of the file, we need to zero any
	 * blocks that fall between the existing EOF and the start of this
604
	 * write.  If zeroing is needed and we are currently holding the
605 606
	 * iolock shared, we need to update it to exclusive which implies
	 * having to redo all checks before.
607
	 */
608
	if (*pos > i_size_read(inode)) {
609
		if (*iolock == XFS_IOLOCK_SHARED) {
610
			xfs_rw_iunlock(ip, *iolock);
611
			*iolock = XFS_IOLOCK_EXCL;
612
			xfs_rw_ilock(ip, *iolock);
613 614
			goto restart;
		}
615
		xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
616
		error = -xfs_zero_eof(ip, *pos, i_size_read(inode));
617 618 619
		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
		if (error)
			return error;
620
	}
621

C
Christoph Hellwig 已提交
622 623 624 625 626 627 628 629 630
	/*
	 * Updating the timestamps will grab the ilock again from
	 * xfs_fs_dirty_inode, so we have to call it after dropping the
	 * lock above.  Eventually we should look into a way to avoid
	 * the pointless lock roundtrip.
	 */
	if (likely(!(file->f_mode & FMODE_NOCMTIME)))
		file_update_time(file);

631 632 633 634 635 636 637 638
	/*
	 * If we're writing the file then make sure to clear the setuid and
	 * setgid bits if the process is not being run by root.  This keeps
	 * people from modifying setuid and setgid binaries.
	 */
	return file_remove_suid(file);
}

639 640 641 642
/*
 * xfs_file_dio_aio_write - handle direct IO writes
 *
 * Lock the inode appropriately to prepare for and issue a direct IO write.
643
 * By separating it from the buffered write path we remove all the tricky to
644 645
 * follow locking changes and looping.
 *
646 647 648 649 650 651 652 653 654 655 656 657 658
 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
 * pages are flushed out.
 *
 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
 * allowing them to be done in parallel with reads and other direct IO writes.
 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
 * needs to do sub-block zeroing and that requires serialisation against other
 * direct IOs to the same block. In this case we need to serialise the
 * submission of the unaligned IOs so that we don't get racing block zeroing in
 * the dio layer.  To avoid the problem with aio, we also need to wait for
 * outstanding IOs to complete so that unwritten extent conversion is completed
 * before we try to map the overlapping block. This is currently implemented by
C
Christoph Hellwig 已提交
659
 * hitting it with a big hammer (i.e. inode_dio_wait()).
660
 *
661 662 663 664 665 666 667 668 669
 * Returns with locks held indicated by @iolock and errors indicated by
 * negative return values.
 */
STATIC ssize_t
xfs_file_dio_aio_write(
	struct kiocb		*iocb,
	const struct iovec	*iovp,
	unsigned long		nr_segs,
	loff_t			pos,
670
	size_t			ocount)
671 672 673 674 675 676 677 678
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	ssize_t			ret = 0;
	size_t			count = ocount;
679
	int			unaligned_io = 0;
680
	int			iolock;
681 682 683 684 685 686
	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?
					mp->m_rtdev_targp : mp->m_ddev_targp;

	if ((pos & target->bt_smask) || (count & target->bt_smask))
		return -XFS_ERROR(EINVAL);

687 688 689
	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
		unaligned_io = 1;

690 691 692 693 694 695 696 697
	/*
	 * We don't need to take an exclusive lock unless there page cache needs
	 * to be invalidated or unaligned IO is being executed. We don't need to
	 * consider the EOF extension case here because
	 * xfs_file_aio_write_checks() will relock the inode as necessary for
	 * EOF zeroing cases and fill out the new inode size as appropriate.
	 */
	if (unaligned_io || mapping->nrpages)
698
		iolock = XFS_IOLOCK_EXCL;
699
	else
700 701
		iolock = XFS_IOLOCK_SHARED;
	xfs_rw_ilock(ip, iolock);
702 703 704 705 706 707

	/*
	 * Recheck if there are cached pages that need invalidate after we got
	 * the iolock to protect against other threads adding new pages while
	 * we were waiting for the iolock.
	 */
708 709 710 711
	if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
		xfs_rw_iunlock(ip, iolock);
		iolock = XFS_IOLOCK_EXCL;
		xfs_rw_ilock(ip, iolock);
712
	}
713

714
	ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
715
	if (ret)
716
		goto out;
717 718 719 720 721

	if (mapping->nrpages) {
		ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
							FI_REMAPF_LOCKED);
		if (ret)
722
			goto out;
723 724
	}

725 726 727 728 729
	/*
	 * If we are doing unaligned IO, wait for all other IO to drain,
	 * otherwise demote the lock if we had to flush cached pages
	 */
	if (unaligned_io)
C
Christoph Hellwig 已提交
730
		inode_dio_wait(inode);
731
	else if (iolock == XFS_IOLOCK_EXCL) {
732
		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
733
		iolock = XFS_IOLOCK_SHARED;
734 735 736 737 738 739
	}

	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
	ret = generic_file_direct_write(iocb, iovp,
			&nr_segs, pos, &iocb->ki_pos, count, ocount);

740 741 742
out:
	xfs_rw_iunlock(ip, iolock);

743 744 745 746 747
	/* No fallback to buffered IO on errors for XFS. */
	ASSERT(ret < 0 || ret == count);
	return ret;
}

748
STATIC ssize_t
749
xfs_file_buffered_aio_write(
750 751
	struct kiocb		*iocb,
	const struct iovec	*iovp,
752
	unsigned long		nr_segs,
753
	loff_t			pos,
754
	size_t			ocount)
755 756 757 758
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
759
	struct xfs_inode	*ip = XFS_I(inode);
760 761
	ssize_t			ret;
	int			enospc = 0;
762
	int			iolock = XFS_IOLOCK_EXCL;
763
	size_t			count = ocount;
764

765
	xfs_rw_ilock(ip, iolock);
766

767
	ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
768
	if (ret)
769
		goto out;
770 771 772 773 774

	/* We can write back this queue in page reclaim */
	current->backing_dev_info = mapping->backing_dev_info;

write_retry:
775 776 777 778 779 780 781 782 783
	trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
	ret = generic_file_buffered_write(iocb, iovp, nr_segs,
			pos, &iocb->ki_pos, count, ret);
	/*
	 * if we just got an ENOSPC, flush the inode now we aren't holding any
	 * page locks and retry *once*
	 */
	if (ret == -ENOSPC && !enospc) {
		enospc = 1;
784 785 786
		ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
		if (!ret)
			goto write_retry;
787
	}
788

789
	current->backing_dev_info = NULL;
790 791
out:
	xfs_rw_iunlock(ip, iolock);
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
	return ret;
}

STATIC ssize_t
xfs_file_aio_write(
	struct kiocb		*iocb,
	const struct iovec	*iovp,
	unsigned long		nr_segs,
	loff_t			pos)
{
	struct file		*file = iocb->ki_filp;
	struct address_space	*mapping = file->f_mapping;
	struct inode		*inode = mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	ssize_t			ret;
	size_t			ocount = 0;

	XFS_STATS_INC(xs_write_calls);

	BUG_ON(iocb->ki_pos != pos);

	ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
	if (ret)
		return ret;

	if (ocount == 0)
		return 0;

	xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);

	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return -EIO;

	if (unlikely(file->f_flags & O_DIRECT))
826
		ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
827 828
	else
		ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
829
						  ocount);
830

831 832
	if (ret > 0) {
		ssize_t err;
833

834
		XFS_STATS_ADD(xs_write_bytes, ret);
835

836 837 838 839
		/* Handle various SYNC-type writes */
		err = generic_write_sync(file, pos, ret);
		if (err < 0)
			ret = err;
840 841
	}

842
	return ret;
843 844
}

845 846 847 848 849 850 851 852 853 854 855 856 857
STATIC long
xfs_file_fallocate(
	struct file	*file,
	int		mode,
	loff_t		offset,
	loff_t		len)
{
	struct inode	*inode = file->f_path.dentry->d_inode;
	long		error;
	loff_t		new_size = 0;
	xfs_flock64_t	bf;
	xfs_inode_t	*ip = XFS_I(inode);
	int		cmd = XFS_IOC_RESVSP;
858
	int		attr_flags = XFS_ATTR_NOLOCK;
859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880

	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
		return -EOPNOTSUPP;

	bf.l_whence = 0;
	bf.l_start = offset;
	bf.l_len = len;

	xfs_ilock(ip, XFS_IOLOCK_EXCL);

	if (mode & FALLOC_FL_PUNCH_HOLE)
		cmd = XFS_IOC_UNRESVSP;

	/* check the new inode size is valid before allocating */
	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
	    offset + len > i_size_read(inode)) {
		new_size = offset + len;
		error = inode_newsize_ok(inode, new_size);
		if (error)
			goto out_unlock;
	}

881 882 883 884
	if (file->f_flags & O_DSYNC)
		attr_flags |= XFS_ATTR_SYNC;

	error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
885 886 887 888 889 890 891 892 893
	if (error)
		goto out_unlock;

	/* Change file size if needed */
	if (new_size) {
		struct iattr iattr;

		iattr.ia_valid = ATTR_SIZE;
		iattr.ia_size = new_size;
C
Christoph Hellwig 已提交
894
		error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
895 896 897 898 899 900 901 902
	}

out_unlock:
	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
	return error;
}


L
Linus Torvalds 已提交
903
STATIC int
904
xfs_file_open(
L
Linus Torvalds 已提交
905
	struct inode	*inode,
906
	struct file	*file)
L
Linus Torvalds 已提交
907
{
908
	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
L
Linus Torvalds 已提交
909
		return -EFBIG;
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
		return -EIO;
	return 0;
}

STATIC int
xfs_dir_open(
	struct inode	*inode,
	struct file	*file)
{
	struct xfs_inode *ip = XFS_I(inode);
	int		mode;
	int		error;

	error = xfs_file_open(inode, file);
	if (error)
		return error;

	/*
	 * If there are any blocks, read-ahead block 0 as we're almost
	 * certain to have the next operation be a read there.
	 */
	mode = xfs_ilock_map_shared(ip);
	if (ip->i_d.di_nextents > 0)
		xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
	xfs_iunlock(ip, mode);
	return 0;
L
Linus Torvalds 已提交
937 938 939
}

STATIC int
940
xfs_file_release(
L
Linus Torvalds 已提交
941 942 943
	struct inode	*inode,
	struct file	*filp)
{
944
	return -xfs_release(XFS_I(inode));
L
Linus Torvalds 已提交
945 946 947
}

STATIC int
948
xfs_file_readdir(
L
Linus Torvalds 已提交
949 950 951 952
	struct file	*filp,
	void		*dirent,
	filldir_t	filldir)
{
C
Christoph Hellwig 已提交
953
	struct inode	*inode = filp->f_path.dentry->d_inode;
954
	xfs_inode_t	*ip = XFS_I(inode);
C
Christoph Hellwig 已提交
955 956 957 958 959 960 961 962 963 964 965 966 967
	int		error;
	size_t		bufsize;

	/*
	 * The Linux API doesn't pass down the total size of the buffer
	 * we read into down to the filesystem.  With the filldir concept
	 * it's not needed for correct information, but the XFS dir2 leaf
	 * code wants an estimate of the buffer size to calculate it's
	 * readahead window and size the buffers used for mapping to
	 * physical blocks.
	 *
	 * Try to give it an estimate that's good enough, maybe at some
	 * point we can change the ->readdir prototype to include the
E
Eric Sandeen 已提交
968
	 * buffer size.  For now we use the current glibc buffer size.
C
Christoph Hellwig 已提交
969
	 */
E
Eric Sandeen 已提交
970
	bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
C
Christoph Hellwig 已提交
971

972
	error = xfs_readdir(ip, dirent, bufsize,
C
Christoph Hellwig 已提交
973 974 975 976
				(xfs_off_t *)&filp->f_pos, filldir);
	if (error)
		return -error;
	return 0;
L
Linus Torvalds 已提交
977 978 979
}

STATIC int
980
xfs_file_mmap(
L
Linus Torvalds 已提交
981 982 983
	struct file	*filp,
	struct vm_area_struct *vma)
{
984
	vma->vm_ops = &xfs_file_vm_ops;
N
Nick Piggin 已提交
985
	vma->vm_flags |= VM_CAN_NONLINEAR;
986

987
	file_accessed(filp);
L
Linus Torvalds 已提交
988 989 990
	return 0;
}

991 992 993 994 995 996 997 998 999
/*
 * mmap()d file has taken write protection fault and is being made
 * writable. We can set the page state up correctly for a writable
 * page, which means we can do correct delalloc accounting (ENOSPC
 * checking!) and unwritten extent mapping.
 */
STATIC int
xfs_vm_page_mkwrite(
	struct vm_area_struct	*vma,
1000
	struct vm_fault		*vmf)
1001
{
1002
	return block_page_mkwrite(vma, vmf, xfs_get_blocks);
1003 1004
}

1005
const struct file_operations xfs_file_operations = {
L
Linus Torvalds 已提交
1006 1007
	.llseek		= generic_file_llseek,
	.read		= do_sync_read,
1008
	.write		= do_sync_write,
1009 1010
	.aio_read	= xfs_file_aio_read,
	.aio_write	= xfs_file_aio_write,
1011 1012
	.splice_read	= xfs_file_splice_read,
	.splice_write	= xfs_file_splice_write,
1013
	.unlocked_ioctl	= xfs_file_ioctl,
L
Linus Torvalds 已提交
1014
#ifdef CONFIG_COMPAT
1015
	.compat_ioctl	= xfs_file_compat_ioctl,
L
Linus Torvalds 已提交
1016
#endif
1017 1018 1019 1020
	.mmap		= xfs_file_mmap,
	.open		= xfs_file_open,
	.release	= xfs_file_release,
	.fsync		= xfs_file_fsync,
1021
	.fallocate	= xfs_file_fallocate,
L
Linus Torvalds 已提交
1022 1023
};

1024
const struct file_operations xfs_dir_file_operations = {
1025
	.open		= xfs_dir_open,
L
Linus Torvalds 已提交
1026
	.read		= generic_read_dir,
1027
	.readdir	= xfs_file_readdir,
1028
	.llseek		= generic_file_llseek,
1029
	.unlocked_ioctl	= xfs_file_ioctl,
1030
#ifdef CONFIG_COMPAT
1031
	.compat_ioctl	= xfs_file_compat_ioctl,
1032
#endif
1033
	.fsync		= xfs_dir_fsync,
L
Linus Torvalds 已提交
1034 1035
};

1036
static const struct vm_operations_struct xfs_file_vm_ops = {
1037
	.fault		= filemap_fault,
1038
	.page_mkwrite	= xfs_vm_page_mkwrite,
1039
};