file.c 23.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/file.c
4 5 6 7 8 9 10 11 12 13 14 15
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/file.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
16
 *  ext4 fs regular file handling primitives
17 18 19 20 21 22 23
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 */

#include <linux/time.h>
#include <linux/fs.h>
24
#include <linux/iomap.h>
25 26
#include <linux/mount.h>
#include <linux/path.h>
27
#include <linux/dax.h>
28
#include <linux/quotaops.h>
29
#include <linux/pagevec.h>
30
#include <linux/uio.h>
31
#include <linux/mman.h>
32
#include <linux/backing-dev.h>
33 34
#include "ext4.h"
#include "ext4_jbd2.h"
35 36
#include "xattr.h"
#include "acl.h"
37
#include "truncate.h"
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static bool ext4_dio_supported(struct inode *inode)
{
	if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
		return false;
	if (fsverity_active(inode))
		return false;
	if (ext4_should_journal_data(inode))
		return false;
	if (ext4_has_inline_data(inode))
		return false;
	return true;
}

static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	ssize_t ret;
	struct inode *inode = file_inode(iocb->ki_filp);

	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock_shared(inode))
			return -EAGAIN;
	} else {
		inode_lock_shared(inode);
	}

	if (!ext4_dio_supported(inode)) {
		inode_unlock_shared(inode);
		/*
		 * Fallback to buffered I/O if the operation being performed on
		 * the inode is not supported by direct I/O. The IOCB_DIRECT
		 * flag needs to be cleared here in order to ensure that the
		 * direct I/O path within generic_file_read_iter() is not
		 * taken.
		 */
		iocb->ki_flags &= ~IOCB_DIRECT;
		return generic_file_read_iter(iocb, to);
	}

77
	ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0);
78 79 80 81 82 83
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}

84 85 86 87 88 89
#ifdef CONFIG_FS_DAX
static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

90 91
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock_shared(inode))
G
Goldwyn Rodrigues 已提交
92
			return -EAGAIN;
93
	} else {
G
Goldwyn Rodrigues 已提交
94 95
		inode_lock_shared(inode);
	}
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
	/*
	 * Recheck under inode lock - at this point we are sure it cannot
	 * change anymore
	 */
	if (!IS_DAX(inode)) {
		inode_unlock_shared(inode);
		/* Fallback to buffered IO in case we cannot support DAX */
		return generic_file_read_iter(iocb, to);
	}
	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}
#endif

static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
115 116 117
	struct inode *inode = file_inode(iocb->ki_filp);

	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
118 119
		return -EIO;

120 121 122 123
	if (!iov_iter_count(to))
		return 0; /* skip atime */

#ifdef CONFIG_FS_DAX
124
	if (IS_DAX(inode))
125 126
		return ext4_dax_read_iter(iocb, to);
#endif
127 128 129
	if (iocb->ki_flags & IOCB_DIRECT)
		return ext4_dio_read_iter(iocb, to);

130 131 132
	return generic_file_read_iter(iocb, to);
}

133 134
/*
 * Called when an inode is released. Note that this is different
135
 * from ext4_file_open: open gets called at every open, but release
136 137
 * gets called only when /all/ the files are closed.
 */
138
static int ext4_release_file(struct inode *inode, struct file *filp)
139
{
140
	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
141
		ext4_alloc_da_blocks(inode);
142
		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
143
	}
144 145
	/* if we are the last writer on the inode, drop the block reservation */
	if ((filp->f_mode & FMODE_WRITE) &&
146
			(atomic_read(&inode->i_writecount) == 1) &&
D
Dio Putra 已提交
147
			!EXT4_I(inode)->i_reserved_data_blocks) {
148
		down_write(&EXT4_I(inode)->i_data_sem);
149
		ext4_discard_preallocations(inode, 0);
150
		up_write(&EXT4_I(inode)->i_data_sem);
151 152
	}
	if (is_dx(inode) && filp->private_data)
153
		ext4_htree_free_dir_info(filp->private_data);
154 155 156 157

	return 0;
}

158 159 160 161 162 163 164 165 166
/*
 * This tests whether the IO in question is block-aligned or not.
 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 * are converted to written only after the IO is complete.  Until they are
 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 * threads are at work on the same unwritten block, they must be synchronized
 * or one thread will zero the other's data, causing corruption.
 */
167 168
static bool
ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
169 170
{
	struct super_block *sb = inode->i_sb;
171
	unsigned long blockmask = sb->s_blocksize - 1;
172

A
Al Viro 已提交
173
	if ((pos | iov_iter_alignment(from)) & blockmask)
174
		return true;
175

176 177 178 179 180 181 182 183 184 185
	return false;
}

static bool
ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
{
	if (offset + len > i_size_read(inode) ||
	    offset + len > EXT4_I(inode)->i_disksize)
		return true;
	return false;
186 187
}

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
/* Is IO overwriting allocated and initialized blocks? */
static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
{
	struct ext4_map_blocks map;
	unsigned int blkbits = inode->i_blkbits;
	int err, blklen;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = pos >> blkbits;
	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
	blklen = map.m_len;

	err = ext4_map_blocks(NULL, inode, &map, 0);
	/*
	 * 'err==len' means that all of the blocks have been preallocated,
	 * regardless of whether they have been initialized or not. To exclude
	 * unwritten extents, we need to check m_flags.
	 */
	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
}

211 212
static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
					 struct iov_iter *from)
213 214 215 216
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

217 218 219
	if (unlikely(IS_IMMUTABLE(inode)))
		return -EPERM;

220 221 222
	ret = generic_write_checks(iocb, from);
	if (ret <= 0)
		return ret;
223

224 225 226 227 228 229 230 231 232 233 234
	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);

		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
			return -EFBIG;
		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
	}
235

236 237 238 239 240 241 242 243 244 245 246
	return iov_iter_count(from);
}

static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
	ssize_t ret, count;

	count = ext4_generic_write_checks(iocb, from);
	if (count <= 0)
		return count;

247 248 249
	ret = file_modified(iocb->ki_filp);
	if (ret)
		return ret;
250
	return count;
251 252
}

253 254 255 256 257 258 259 260 261
static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
					struct iov_iter *from)
{
	ssize_t ret;
	struct inode *inode = file_inode(iocb->ki_filp);

	if (iocb->ki_flags & IOCB_NOWAIT)
		return -EOPNOTSUPP;

262
	ext4_fc_start_update(inode);
263 264 265 266 267 268 269 270 271 272 273
	inode_lock(inode);
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

	current->backing_dev_info = inode_to_bdi(inode);
	ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
	current->backing_dev_info = NULL;

out:
	inode_unlock(inode);
274
	ext4_fc_stop_update(inode);
275 276 277 278 279 280 281 282
	if (likely(ret > 0)) {
		iocb->ki_pos += ret;
		ret = generic_write_sync(iocb, ret);
	}

	return ret;
}

283 284 285 286 287 288 289
static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
					   ssize_t written, size_t count)
{
	handle_t *handle;
	bool truncate = false;
	u8 blkbits = inode->i_blkbits;
	ext4_lblk_t written_blk, end_blk;
290
	int ret;
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330

	/*
	 * Note that EXT4_I(inode)->i_disksize can get extended up to
	 * inode->i_size while the I/O was running due to writeback of delalloc
	 * blocks. But, the code in ext4_iomap_alloc() is careful to use
	 * zeroed/unwritten extents if this is possible; thus we won't leave
	 * uninitialized blocks in a file even if we didn't succeed in writing
	 * as much as we intended.
	 */
	WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
	if (offset + count <= EXT4_I(inode)->i_disksize) {
		/*
		 * We need to ensure that the inode is removed from the orphan
		 * list if it has been added prematurely, due to writeback of
		 * delalloc blocks.
		 */
		if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
			handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);

			if (IS_ERR(handle)) {
				ext4_orphan_del(NULL, inode);
				return PTR_ERR(handle);
			}

			ext4_orphan_del(handle, inode);
			ext4_journal_stop(handle);
		}

		return written;
	}

	if (written < 0)
		goto truncate;

	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
	if (IS_ERR(handle)) {
		written = PTR_ERR(handle);
		goto truncate;
	}

331 332 333 334 335 336 337 338
	if (ext4_update_inode_size(inode, offset + written)) {
		ret = ext4_mark_inode_dirty(handle, inode);
		if (unlikely(ret)) {
			written = ret;
			ext4_journal_stop(handle);
			goto truncate;
		}
	}
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370

	/*
	 * We may need to truncate allocated but not written blocks beyond EOF.
	 */
	written_blk = ALIGN(offset + written, 1 << blkbits);
	end_blk = ALIGN(offset + count, 1 << blkbits);
	if (written_blk < end_blk && ext4_can_truncate(inode))
		truncate = true;

	/*
	 * Remove the inode from the orphan list if it has been extended and
	 * everything went OK.
	 */
	if (!truncate && inode->i_nlink)
		ext4_orphan_del(handle, inode);
	ext4_journal_stop(handle);

	if (truncate) {
truncate:
		ext4_truncate_failed_write(inode);
		/*
		 * If the truncate operation failed early, then the inode may
		 * still be on the orphan list. In that case, we need to try
		 * remove the inode from the in-memory linked list.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
	}

	return written;
}

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
				 int error, unsigned int flags)
{
	loff_t offset = iocb->ki_pos;
	struct inode *inode = file_inode(iocb->ki_filp);

	if (error)
		return error;

	if (size && flags & IOMAP_DIO_UNWRITTEN)
		return ext4_convert_unwritten_extents(NULL, inode,
						      offset, size);

	return 0;
}

static const struct iomap_dio_ops ext4_dio_write_ops = {
	.end_io = ext4_dio_write_end_io,
};

391 392 393 394 395 396 397 398 399 400 401
/*
 * The intention here is to start with shared lock acquired then see if any
 * condition requires an exclusive inode lock. If yes, then we restart the
 * whole operation by releasing the shared lock and acquiring exclusive lock.
 *
 * - For unaligned_io we never take shared lock as it may cause data corruption
 *   when two unaligned IO tries to modify the same block e.g. while zeroing.
 *
 * - For extending writes case we don't take the shared lock, since it requires
 *   updating inode i_disksize and/or orphan handling with exclusive lock.
 *
402 403
 * - shared locking will only be true mostly with overwrites. Otherwise we will
 *   switch to exclusive i_rwsem lock.
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
 */
static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
				     bool *ilock_shared, bool *extend)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
	loff_t offset;
	size_t count;
	ssize_t ret;

restart:
	ret = ext4_generic_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

	offset = iocb->ki_pos;
	count = ret;
	if (ext4_extending_io(inode, offset, count))
		*extend = true;
	/*
	 * Determine whether the IO operation will overwrite allocated
425
	 * and initialized blocks.
426 427 428 429 430
	 * We need exclusive i_rwsem for changing security info
	 * in file_modified().
	 */
	if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
	     !ext4_overwrite_io(inode, offset, count))) {
431 432 433 434
		if (iocb->ki_flags & IOCB_NOWAIT) {
			ret = -EAGAIN;
			goto out;
		}
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
		inode_unlock_shared(inode);
		*ilock_shared = false;
		inode_lock(inode);
		goto restart;
	}

	ret = file_modified(file);
	if (ret < 0)
		goto out;

	return count;
out:
	if (*ilock_shared)
		inode_unlock_shared(inode);
	else
		inode_unlock(inode);
	return ret;
}

454 455 456 457 458
static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	ssize_t ret;
	handle_t *handle;
	struct inode *inode = file_inode(iocb->ki_filp);
459 460
	loff_t offset = iocb->ki_pos;
	size_t count = iov_iter_count(from);
J
Jan Kara 已提交
461
	const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	bool extend = false, unaligned_io = false;
	bool ilock_shared = true;

	/*
	 * We initially start with shared inode lock unless it is
	 * unaligned IO which needs exclusive lock anyways.
	 */
	if (ext4_unaligned_io(inode, from, offset)) {
		unaligned_io = true;
		ilock_shared = false;
	}
	/*
	 * Quick check here without any i_rwsem lock to see if it is extending
	 * IO. A more reliable check is done in ext4_dio_write_checks() with
	 * proper locking in place.
	 */
	if (offset + count > i_size_read(inode))
		ilock_shared = false;
480 481

	if (iocb->ki_flags & IOCB_NOWAIT) {
482 483 484 485 486 487 488
		if (ilock_shared) {
			if (!inode_trylock_shared(inode))
				return -EAGAIN;
		} else {
			if (!inode_trylock(inode))
				return -EAGAIN;
		}
489
	} else {
490 491 492 493
		if (ilock_shared)
			inode_lock_shared(inode);
		else
			inode_lock(inode);
494 495
	}

496
	/* Fallback to buffered I/O if the inode does not support direct I/O. */
497
	if (!ext4_dio_supported(inode)) {
498 499 500 501
		if (ilock_shared)
			inode_unlock_shared(inode);
		else
			inode_unlock(inode);
502 503 504
		return ext4_buffered_write_iter(iocb, from);
	}

505 506
	ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
	if (ret <= 0)
507 508
		return ret;

509 510 511 512 513 514
	/* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
	if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
		ret = -EAGAIN;
		goto out;
	}

515
	offset = iocb->ki_pos;
516
	count = ret;
517 518

	/*
519 520 521 522 523 524 525 526
	 * Unaligned direct IO must be serialized among each other as zeroing
	 * of partial blocks of two competing unaligned IOs can result in data
	 * corruption.
	 *
	 * So we make sure we don't allow any unaligned IO in flight.
	 * For IOs where we need not wait (like unaligned non-AIO DIO),
	 * below inode_dio_wait() may anyway become a no-op, since we start
	 * with exclusive lock.
527
	 */
528 529
	if (unaligned_io)
		inode_dio_wait(inode);
530

531
	if (extend) {
532 533 534 535 536 537
		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			goto out;
		}

538
		ext4_fc_start_update(inode);
539
		ret = ext4_orphan_add(handle, inode);
540
		ext4_fc_stop_update(inode);
541 542 543 544 545 546 547 548
		if (ret) {
			ext4_journal_stop(handle);
			goto out;
		}

		ext4_journal_stop(handle);
	}

J
Jan Kara 已提交
549 550 551
	if (ilock_shared)
		iomap_ops = &ext4_iomap_overwrite_ops;
	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
552
			   (unaligned_io || extend) ? IOMAP_DIO_FORCE_WAIT : 0);
553 554
	if (ret == -ENOTBLK)
		ret = 0;
555 556 557 558 559

	if (extend)
		ret = ext4_handle_inode_extension(inode, offset, ret, count);

out:
560
	if (ilock_shared)
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
		inode_unlock_shared(inode);
	else
		inode_unlock(inode);

	if (ret >= 0 && iov_iter_count(from)) {
		ssize_t err;
		loff_t endbyte;

		offset = iocb->ki_pos;
		err = ext4_buffered_write_iter(iocb, from);
		if (err < 0)
			return err;

		/*
		 * We need to ensure that the pages within the page cache for
		 * the range covered by this I/O are written to disk and
		 * invalidated. This is in attempt to preserve the expected
		 * direct I/O semantics in the case we fallback to buffered I/O
		 * to complete off the I/O request.
		 */
		ret += err;
		endbyte = offset + err - 1;
		err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
						   offset, endbyte);
		if (!err)
			invalidate_mapping_pages(iocb->ki_filp->f_mapping,
						 offset >> PAGE_SHIFT,
						 endbyte >> PAGE_SHIFT);
	}

	return ret;
}

J
Jan Kara 已提交
594 595 596 597 598
#ifdef CONFIG_FS_DAX
static ssize_t
ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	ssize_t ret;
599 600
	size_t count;
	loff_t offset;
601 602
	handle_t *handle;
	bool extend = false;
603
	struct inode *inode = file_inode(iocb->ki_filp);
J
Jan Kara 已提交
604

605 606
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock(inode))
G
Goldwyn Rodrigues 已提交
607
			return -EAGAIN;
608
	} else {
G
Goldwyn Rodrigues 已提交
609 610
		inode_lock(inode);
	}
611

J
Jan Kara 已提交
612 613 614 615
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

616 617
	offset = iocb->ki_pos;
	count = iov_iter_count(from);
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635

	if (offset + count > EXT4_I(inode)->i_disksize) {
		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			goto out;
		}

		ret = ext4_orphan_add(handle, inode);
		if (ret) {
			ext4_journal_stop(handle);
			goto out;
		}

		extend = true;
		ext4_journal_stop(handle);
	}

J
Jan Kara 已提交
636
	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
637 638 639

	if (extend)
		ret = ext4_handle_inode_extension(inode, offset, ret, count);
J
Jan Kara 已提交
640
out:
C
Christoph Hellwig 已提交
641
	inode_unlock(inode);
J
Jan Kara 已提交
642 643 644 645 646 647
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
	return ret;
}
#endif

648
static ssize_t
A
Al Viro 已提交
649
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
650
{
651
	struct inode *inode = file_inode(iocb->ki_filp);
652

653 654 655
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

J
Jan Kara 已提交
656 657 658 659
#ifdef CONFIG_FS_DAX
	if (IS_DAX(inode))
		return ext4_dax_write_iter(iocb, from);
#endif
660 661
	if (iocb->ki_flags & IOCB_DIRECT)
		return ext4_dio_write_iter(iocb, from);
662 663
	else
		return ext4_buffered_write_iter(iocb, from);
664 665
}

R
Ross Zwisler 已提交
666
#ifdef CONFIG_FS_DAX
667
static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
668
		enum page_entry_size pe_size)
R
Ross Zwisler 已提交
669
{
670 671
	int error = 0;
	vm_fault_t result;
672
	int retries = 0;
673
	handle_t *handle = NULL;
674
	struct inode *inode = file_inode(vmf->vma->vm_file);
675
	struct super_block *sb = inode->i_sb;
676 677 678 679 680 681 682 683 684 685 686 687 688 689

	/*
	 * We have to distinguish real writes from writes which will result in a
	 * COW page; COW writes should *not* poke the journal (the file will not
	 * be changed). Doing so would cause unintended failures when mounted
	 * read-only.
	 *
	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
	 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
	 * we eventually come back with a COW page.
	 */
	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
		(vmf->vma->vm_flags & VM_SHARED);
690
	pfn_t pfn;
691 692 693

	if (write) {
		sb_start_pagefault(sb);
694
		file_update_time(vmf->vma->vm_file);
695
		down_read(&EXT4_I(inode)->i_mmap_sem);
696
retry:
697 698
		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
					       EXT4_DATA_TRANS_BLOCKS(sb));
699 700 701 702 703
		if (IS_ERR(handle)) {
			up_read(&EXT4_I(inode)->i_mmap_sem);
			sb_end_pagefault(sb);
			return VM_FAULT_SIGBUS;
		}
704 705
	} else {
		down_read(&EXT4_I(inode)->i_mmap_sem);
J
Jan Kara 已提交
706
	}
707
	result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
708
	if (write) {
709
		ext4_journal_stop(handle);
710 711 712 713

		if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
		    ext4_should_retry_alloc(sb, &retries))
			goto retry;
714 715 716
		/* Handling synchronous page fault? */
		if (result & VM_FAULT_NEEDDSYNC)
			result = dax_finish_sync_fault(vmf, pe_size, pfn);
717
		up_read(&EXT4_I(inode)->i_mmap_sem);
718
		sb_end_pagefault(sb);
719 720 721
	} else {
		up_read(&EXT4_I(inode)->i_mmap_sem);
	}
722 723

	return result;
R
Ross Zwisler 已提交
724 725
}

726
static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
727 728 729 730
{
	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
}

R
Ross Zwisler 已提交
731 732
static const struct vm_operations_struct ext4_dax_vm_ops = {
	.fault		= ext4_dax_fault,
733
	.huge_fault	= ext4_dax_huge_fault,
734
	.page_mkwrite	= ext4_dax_fault,
735
	.pfn_mkwrite	= ext4_dax_fault,
R
Ross Zwisler 已提交
736 737 738 739 740
};
#else
#define ext4_dax_vm_ops	ext4_file_vm_ops
#endif

741
static const struct vm_operations_struct ext4_file_vm_ops = {
742
	.fault		= ext4_filemap_fault,
743
	.map_pages	= filemap_map_pages,
744 745 746 747 748
	.page_mkwrite   = ext4_page_mkwrite,
};

static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
749
	struct inode *inode = file->f_mapping->host;
750 751
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct dax_device *dax_dev = sbi->s_daxdev;
752

753
	if (unlikely(ext4_forced_shutdown(sbi)))
754 755
		return -EIO;

756
	/*
757 758
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
759
	 */
760
	if (!daxdev_mapping_supported(vma, dax_dev))
761 762
		return -EOPNOTSUPP;

763
	file_accessed(file);
R
Ross Zwisler 已提交
764 765
	if (IS_DAX(file_inode(file))) {
		vma->vm_ops = &ext4_dax_vm_ops;
766
		vma->vm_flags |= VM_HUGEPAGE;
R
Ross Zwisler 已提交
767 768 769
	} else {
		vma->vm_ops = &ext4_file_vm_ops;
	}
770 771 772
	return 0;
}

773 774
static int ext4_sample_last_mounted(struct super_block *sb,
				    struct vfsmount *mnt)
775
{
776
	struct ext4_sb_info *sbi = EXT4_SB(sb);
777 778
	struct path path;
	char buf[64], *cp;
779 780 781
	handle_t *handle;
	int err;

782
	if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
783 784
		return 0;

785
	if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
786 787
		return 0;

788
	ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
789 790 791 792 793 794 795 796 797 798
	/*
	 * Sample where the filesystem has been mounted and
	 * store it in the superblock for sysadmin convenience
	 * when trying to sort through large numbers of block
	 * devices or filesystem images.
	 */
	memset(buf, 0, sizeof(buf));
	path.mnt = mnt;
	path.dentry = mnt->mnt_root;
	cp = d_path(&path, buf, sizeof(buf));
799
	err = 0;
800
	if (IS_ERR(cp))
801
		goto out;
802 803

	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
804
	err = PTR_ERR(handle);
805
	if (IS_ERR(handle))
806
		goto out;
807 808 809
	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
	if (err)
810
		goto out_journal;
811
	lock_buffer(sbi->s_sbh);
812
	strncpy(sbi->s_es->s_last_mounted, cp,
813
		sizeof(sbi->s_es->s_last_mounted));
814 815
	ext4_superblock_csum_set(sb);
	unlock_buffer(sbi->s_sbh);
J
Jan Kara 已提交
816
	ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
817
out_journal:
818
	ext4_journal_stop(handle);
819 820
out:
	sb_end_intwrite(sb);
821 822 823
	return err;
}

D
Dio Putra 已提交
824
static int ext4_file_open(struct inode *inode, struct file *filp)
825
{
826
	int ret;
827

828 829 830
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

831 832 833
	ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
	if (ret)
		return ret;
834

E
Eric Biggers 已提交
835 836
	ret = fscrypt_file_open(inode, filp);
	if (ret)
E
Eric Biggers 已提交
837 838 839 840
		return ret;

	ret = fsverity_file_open(inode, filp);
	if (ret)
E
Eric Biggers 已提交
841 842
		return ret;

843 844 845 846
	/*
	 * Set up the jbd2_inode if we are opening the inode for
	 * writing and the journal is present
	 */
847
	if (filp->f_mode & FMODE_WRITE) {
848
		ret = ext4_inode_attach_jinode(inode);
849 850
		if (ret < 0)
			return ret;
851
	}
G
Goldwyn Rodrigues 已提交
852

853
	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
854
	return dquot_file_open(inode, filp);
855 856
}

857
/*
858 859 860
 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 * by calling generic_file_llseek_size() with the appropriate maxbytes
 * value for each.
861
 */
862
loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
863 864 865 866 867 868 869 870 871
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes;

	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
	else
		maxbytes = inode->i_sb->s_maxbytes;

872
	switch (whence) {
873
	default:
874
		return generic_file_llseek_size(file, offset, whence,
875 876
						maxbytes, i_size_read(inode));
	case SEEK_HOLE:
877
		inode_lock_shared(inode);
878 879
		offset = iomap_seek_hole(inode, offset,
					 &ext4_iomap_report_ops);
880 881 882 883
		inode_unlock_shared(inode);
		break;
	case SEEK_DATA:
		inode_lock_shared(inode);
884 885
		offset = iomap_seek_data(inode, offset,
					 &ext4_iomap_report_ops);
886 887
		inode_unlock_shared(inode);
		break;
888 889
	}

890 891 892
	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, maxbytes);
893 894
}

895
const struct file_operations ext4_file_operations = {
896
	.llseek		= ext4_llseek,
897
	.read_iter	= ext4_file_read_iter,
A
Al Viro 已提交
898
	.write_iter	= ext4_file_write_iter,
899
	.iopoll		= iomap_dio_iopoll,
A
Andi Kleen 已提交
900
	.unlocked_ioctl = ext4_ioctl,
901
#ifdef CONFIG_COMPAT
902
	.compat_ioctl	= ext4_compat_ioctl,
903
#endif
904
	.mmap		= ext4_file_mmap,
905
	.mmap_supported_flags = MAP_SYNC,
906
	.open		= ext4_file_open,
907 908
	.release	= ext4_release_file,
	.fsync		= ext4_sync_file,
909
	.get_unmapped_area = thp_get_unmapped_area,
910
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
911
	.splice_write	= iter_file_splice_write,
912
	.fallocate	= ext4_fallocate,
913 914
};

915
const struct inode_operations ext4_file_inode_operations = {
916
	.setattr	= ext4_setattr,
D
David Howells 已提交
917
	.getattr	= ext4_file_getattr,
918
	.listxattr	= ext4_listxattr,
919
	.get_acl	= ext4_get_acl,
920
	.set_acl	= ext4_set_acl,
921
	.fiemap		= ext4_fiemap,
M
Miklos Szeredi 已提交
922 923
	.fileattr_get	= ext4_fileattr_get,
	.fileattr_set	= ext4_fileattr_set,
924 925
};