file.c 23.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/file.c
4 5 6 7 8 9 10 11 12 13 14 15
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/file.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
16
 *  ext4 fs regular file handling primitives
17 18 19 20 21 22 23
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 */

#include <linux/time.h>
#include <linux/fs.h>
24
#include <linux/iomap.h>
25 26
#include <linux/mount.h>
#include <linux/path.h>
27
#include <linux/dax.h>
28
#include <linux/quotaops.h>
29
#include <linux/pagevec.h>
30
#include <linux/uio.h>
31
#include <linux/mman.h>
32
#include <linux/backing-dev.h>
33 34
#include "ext4.h"
#include "ext4_jbd2.h"
35 36
#include "xattr.h"
#include "acl.h"
37
#include "truncate.h"
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
static bool ext4_dio_supported(struct inode *inode)
{
	if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
		return false;
	if (fsverity_active(inode))
		return false;
	if (ext4_should_journal_data(inode))
		return false;
	if (ext4_has_inline_data(inode))
		return false;
	return true;
}

static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	ssize_t ret;
	struct inode *inode = file_inode(iocb->ki_filp);

	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock_shared(inode))
			return -EAGAIN;
	} else {
		inode_lock_shared(inode);
	}

	if (!ext4_dio_supported(inode)) {
		inode_unlock_shared(inode);
		/*
		 * Fallback to buffered I/O if the operation being performed on
		 * the inode is not supported by direct I/O. The IOCB_DIRECT
		 * flag needs to be cleared here in order to ensure that the
		 * direct I/O path within generic_file_read_iter() is not
		 * taken.
		 */
		iocb->ki_flags &= ~IOCB_DIRECT;
		return generic_file_read_iter(iocb, to);
	}

	ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
			   is_sync_kiocb(iocb));
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}

85 86 87 88 89 90
#ifdef CONFIG_FS_DAX
static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

91 92
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock_shared(inode))
G
Goldwyn Rodrigues 已提交
93
			return -EAGAIN;
94
	} else {
G
Goldwyn Rodrigues 已提交
95 96
		inode_lock_shared(inode);
	}
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
	/*
	 * Recheck under inode lock - at this point we are sure it cannot
	 * change anymore
	 */
	if (!IS_DAX(inode)) {
		inode_unlock_shared(inode);
		/* Fallback to buffered IO in case we cannot support DAX */
		return generic_file_read_iter(iocb, to);
	}
	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}
#endif

static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
116 117 118
	struct inode *inode = file_inode(iocb->ki_filp);

	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
119 120
		return -EIO;

121 122 123 124
	if (!iov_iter_count(to))
		return 0; /* skip atime */

#ifdef CONFIG_FS_DAX
125
	if (IS_DAX(inode))
126 127
		return ext4_dax_read_iter(iocb, to);
#endif
128 129 130
	if (iocb->ki_flags & IOCB_DIRECT)
		return ext4_dio_read_iter(iocb, to);

131 132 133
	return generic_file_read_iter(iocb, to);
}

134 135
/*
 * Called when an inode is released. Note that this is different
136
 * from ext4_file_open: open gets called at every open, but release
137 138
 * gets called only when /all/ the files are closed.
 */
139
static int ext4_release_file(struct inode *inode, struct file *filp)
140
{
141
	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
142
		ext4_alloc_da_blocks(inode);
143
		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
144
	}
145 146
	/* if we are the last writer on the inode, drop the block reservation */
	if ((filp->f_mode & FMODE_WRITE) &&
147
			(atomic_read(&inode->i_writecount) == 1) &&
D
Dio Putra 已提交
148
			!EXT4_I(inode)->i_reserved_data_blocks) {
149
		down_write(&EXT4_I(inode)->i_data_sem);
150
		ext4_discard_preallocations(inode, 0);
151
		up_write(&EXT4_I(inode)->i_data_sem);
152 153
	}
	if (is_dx(inode) && filp->private_data)
154
		ext4_htree_free_dir_info(filp->private_data);
155 156 157 158

	return 0;
}

159 160 161 162 163 164 165 166 167
/*
 * This tests whether the IO in question is block-aligned or not.
 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 * are converted to written only after the IO is complete.  Until they are
 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 * threads are at work on the same unwritten block, they must be synchronized
 * or one thread will zero the other's data, causing corruption.
 */
168 169
static bool
ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
170 171
{
	struct super_block *sb = inode->i_sb;
172
	unsigned long blockmask = sb->s_blocksize - 1;
173

A
Al Viro 已提交
174
	if ((pos | iov_iter_alignment(from)) & blockmask)
175
		return true;
176

177 178 179 180 181 182 183 184 185 186
	return false;
}

static bool
ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
{
	if (offset + len > i_size_read(inode) ||
	    offset + len > EXT4_I(inode)->i_disksize)
		return true;
	return false;
187 188
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/* Is IO overwriting allocated and initialized blocks? */
static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
{
	struct ext4_map_blocks map;
	unsigned int blkbits = inode->i_blkbits;
	int err, blklen;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = pos >> blkbits;
	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
	blklen = map.m_len;

	err = ext4_map_blocks(NULL, inode, &map, 0);
	/*
	 * 'err==len' means that all of the blocks have been preallocated,
	 * regardless of whether they have been initialized or not. To exclude
	 * unwritten extents, we need to check m_flags.
	 */
	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
}

212 213
static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
					 struct iov_iter *from)
214 215 216 217
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

218 219 220
	if (unlikely(IS_IMMUTABLE(inode)))
		return -EPERM;

221 222 223
	ret = generic_write_checks(iocb, from);
	if (ret <= 0)
		return ret;
224

225 226 227 228 229 230 231 232 233 234 235
	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);

		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
			return -EFBIG;
		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
	}
236

237 238 239 240 241 242 243 244 245 246 247
	return iov_iter_count(from);
}

static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
	ssize_t ret, count;

	count = ext4_generic_write_checks(iocb, from);
	if (count <= 0)
		return count;

248 249 250
	ret = file_modified(iocb->ki_filp);
	if (ret)
		return ret;
251
	return count;
252 253
}

254 255 256 257 258 259 260 261 262
static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
					struct iov_iter *from)
{
	ssize_t ret;
	struct inode *inode = file_inode(iocb->ki_filp);

	if (iocb->ki_flags & IOCB_NOWAIT)
		return -EOPNOTSUPP;

263
	ext4_fc_start_update(inode);
264 265 266 267 268 269 270 271 272 273 274
	inode_lock(inode);
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

	current->backing_dev_info = inode_to_bdi(inode);
	ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
	current->backing_dev_info = NULL;

out:
	inode_unlock(inode);
275
	ext4_fc_stop_update(inode);
276 277 278 279 280 281 282 283
	if (likely(ret > 0)) {
		iocb->ki_pos += ret;
		ret = generic_write_sync(iocb, ret);
	}

	return ret;
}

284 285 286 287 288 289 290
static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
					   ssize_t written, size_t count)
{
	handle_t *handle;
	bool truncate = false;
	u8 blkbits = inode->i_blkbits;
	ext4_lblk_t written_blk, end_blk;
291
	int ret;
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331

	/*
	 * Note that EXT4_I(inode)->i_disksize can get extended up to
	 * inode->i_size while the I/O was running due to writeback of delalloc
	 * blocks. But, the code in ext4_iomap_alloc() is careful to use
	 * zeroed/unwritten extents if this is possible; thus we won't leave
	 * uninitialized blocks in a file even if we didn't succeed in writing
	 * as much as we intended.
	 */
	WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
	if (offset + count <= EXT4_I(inode)->i_disksize) {
		/*
		 * We need to ensure that the inode is removed from the orphan
		 * list if it has been added prematurely, due to writeback of
		 * delalloc blocks.
		 */
		if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
			handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);

			if (IS_ERR(handle)) {
				ext4_orphan_del(NULL, inode);
				return PTR_ERR(handle);
			}

			ext4_orphan_del(handle, inode);
			ext4_journal_stop(handle);
		}

		return written;
	}

	if (written < 0)
		goto truncate;

	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
	if (IS_ERR(handle)) {
		written = PTR_ERR(handle);
		goto truncate;
	}

332 333 334 335 336 337 338 339
	if (ext4_update_inode_size(inode, offset + written)) {
		ret = ext4_mark_inode_dirty(handle, inode);
		if (unlikely(ret)) {
			written = ret;
			ext4_journal_stop(handle);
			goto truncate;
		}
	}
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371

	/*
	 * We may need to truncate allocated but not written blocks beyond EOF.
	 */
	written_blk = ALIGN(offset + written, 1 << blkbits);
	end_blk = ALIGN(offset + count, 1 << blkbits);
	if (written_blk < end_blk && ext4_can_truncate(inode))
		truncate = true;

	/*
	 * Remove the inode from the orphan list if it has been extended and
	 * everything went OK.
	 */
	if (!truncate && inode->i_nlink)
		ext4_orphan_del(handle, inode);
	ext4_journal_stop(handle);

	if (truncate) {
truncate:
		ext4_truncate_failed_write(inode);
		/*
		 * If the truncate operation failed early, then the inode may
		 * still be on the orphan list. In that case, we need to try
		 * remove the inode from the in-memory linked list.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
	}

	return written;
}

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
				 int error, unsigned int flags)
{
	loff_t offset = iocb->ki_pos;
	struct inode *inode = file_inode(iocb->ki_filp);

	if (error)
		return error;

	if (size && flags & IOMAP_DIO_UNWRITTEN)
		return ext4_convert_unwritten_extents(NULL, inode,
						      offset, size);

	return 0;
}

static const struct iomap_dio_ops ext4_dio_write_ops = {
	.end_io = ext4_dio_write_end_io,
};

392 393 394 395 396 397 398 399 400 401 402
/*
 * The intention here is to start with shared lock acquired then see if any
 * condition requires an exclusive inode lock. If yes, then we restart the
 * whole operation by releasing the shared lock and acquiring exclusive lock.
 *
 * - For unaligned_io we never take shared lock as it may cause data corruption
 *   when two unaligned IO tries to modify the same block e.g. while zeroing.
 *
 * - For extending writes case we don't take the shared lock, since it requires
 *   updating inode i_disksize and/or orphan handling with exclusive lock.
 *
403 404
 * - shared locking will only be true mostly with overwrites. Otherwise we will
 *   switch to exclusive i_rwsem lock.
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
 */
static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
				     bool *ilock_shared, bool *extend)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
	loff_t offset;
	size_t count;
	ssize_t ret;

restart:
	ret = ext4_generic_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

	offset = iocb->ki_pos;
	count = ret;
	if (ext4_extending_io(inode, offset, count))
		*extend = true;
	/*
	 * Determine whether the IO operation will overwrite allocated
426
	 * and initialized blocks.
427 428 429 430 431
	 * We need exclusive i_rwsem for changing security info
	 * in file_modified().
	 */
	if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
	     !ext4_overwrite_io(inode, offset, count))) {
432 433 434 435
		if (iocb->ki_flags & IOCB_NOWAIT) {
			ret = -EAGAIN;
			goto out;
		}
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
		inode_unlock_shared(inode);
		*ilock_shared = false;
		inode_lock(inode);
		goto restart;
	}

	ret = file_modified(file);
	if (ret < 0)
		goto out;

	return count;
out:
	if (*ilock_shared)
		inode_unlock_shared(inode);
	else
		inode_unlock(inode);
	return ret;
}

455 456 457 458 459
static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	ssize_t ret;
	handle_t *handle;
	struct inode *inode = file_inode(iocb->ki_filp);
460 461
	loff_t offset = iocb->ki_pos;
	size_t count = iov_iter_count(from);
J
Jan Kara 已提交
462
	const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	bool extend = false, unaligned_io = false;
	bool ilock_shared = true;

	/*
	 * We initially start with shared inode lock unless it is
	 * unaligned IO which needs exclusive lock anyways.
	 */
	if (ext4_unaligned_io(inode, from, offset)) {
		unaligned_io = true;
		ilock_shared = false;
	}
	/*
	 * Quick check here without any i_rwsem lock to see if it is extending
	 * IO. A more reliable check is done in ext4_dio_write_checks() with
	 * proper locking in place.
	 */
	if (offset + count > i_size_read(inode))
		ilock_shared = false;
481 482

	if (iocb->ki_flags & IOCB_NOWAIT) {
483 484 485 486 487 488 489
		if (ilock_shared) {
			if (!inode_trylock_shared(inode))
				return -EAGAIN;
		} else {
			if (!inode_trylock(inode))
				return -EAGAIN;
		}
490
	} else {
491 492 493 494
		if (ilock_shared)
			inode_lock_shared(inode);
		else
			inode_lock(inode);
495 496
	}

497
	/* Fallback to buffered I/O if the inode does not support direct I/O. */
498
	if (!ext4_dio_supported(inode)) {
499 500 501 502
		if (ilock_shared)
			inode_unlock_shared(inode);
		else
			inode_unlock(inode);
503 504 505
		return ext4_buffered_write_iter(iocb, from);
	}

506 507
	ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
	if (ret <= 0)
508 509
		return ret;

510 511 512 513 514 515
	/* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
	if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
		ret = -EAGAIN;
		goto out;
	}

516
	offset = iocb->ki_pos;
517
	count = ret;
518 519

	/*
520 521 522 523 524 525 526 527
	 * Unaligned direct IO must be serialized among each other as zeroing
	 * of partial blocks of two competing unaligned IOs can result in data
	 * corruption.
	 *
	 * So we make sure we don't allow any unaligned IO in flight.
	 * For IOs where we need not wait (like unaligned non-AIO DIO),
	 * below inode_dio_wait() may anyway become a no-op, since we start
	 * with exclusive lock.
528
	 */
529 530
	if (unaligned_io)
		inode_dio_wait(inode);
531

532
	if (extend) {
533 534 535 536 537 538
		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			goto out;
		}

539
		ext4_fc_start_update(inode);
540
		ret = ext4_orphan_add(handle, inode);
541
		ext4_fc_stop_update(inode);
542 543 544 545 546 547 548 549
		if (ret) {
			ext4_journal_stop(handle);
			goto out;
		}

		ext4_journal_stop(handle);
	}

J
Jan Kara 已提交
550 551 552
	if (ilock_shared)
		iomap_ops = &ext4_iomap_overwrite_ops;
	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
553
			   is_sync_kiocb(iocb) || unaligned_io || extend);
554 555
	if (ret == -ENOTBLK)
		ret = 0;
556 557 558 559 560

	if (extend)
		ret = ext4_handle_inode_extension(inode, offset, ret, count);

out:
561
	if (ilock_shared)
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
		inode_unlock_shared(inode);
	else
		inode_unlock(inode);

	if (ret >= 0 && iov_iter_count(from)) {
		ssize_t err;
		loff_t endbyte;

		offset = iocb->ki_pos;
		err = ext4_buffered_write_iter(iocb, from);
		if (err < 0)
			return err;

		/*
		 * We need to ensure that the pages within the page cache for
		 * the range covered by this I/O are written to disk and
		 * invalidated. This is in attempt to preserve the expected
		 * direct I/O semantics in the case we fallback to buffered I/O
		 * to complete off the I/O request.
		 */
		ret += err;
		endbyte = offset + err - 1;
		err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
						   offset, endbyte);
		if (!err)
			invalidate_mapping_pages(iocb->ki_filp->f_mapping,
						 offset >> PAGE_SHIFT,
						 endbyte >> PAGE_SHIFT);
	}

	return ret;
}

J
Jan Kara 已提交
595 596 597 598 599
#ifdef CONFIG_FS_DAX
static ssize_t
ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	ssize_t ret;
600 601
	size_t count;
	loff_t offset;
602 603
	handle_t *handle;
	bool extend = false;
604
	struct inode *inode = file_inode(iocb->ki_filp);
J
Jan Kara 已提交
605

606 607
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock(inode))
G
Goldwyn Rodrigues 已提交
608
			return -EAGAIN;
609
	} else {
G
Goldwyn Rodrigues 已提交
610 611
		inode_lock(inode);
	}
612

J
Jan Kara 已提交
613 614 615 616
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

617 618
	offset = iocb->ki_pos;
	count = iov_iter_count(from);
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636

	if (offset + count > EXT4_I(inode)->i_disksize) {
		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			goto out;
		}

		ret = ext4_orphan_add(handle, inode);
		if (ret) {
			ext4_journal_stop(handle);
			goto out;
		}

		extend = true;
		ext4_journal_stop(handle);
	}

J
Jan Kara 已提交
637
	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
638 639 640

	if (extend)
		ret = ext4_handle_inode_extension(inode, offset, ret, count);
J
Jan Kara 已提交
641
out:
C
Christoph Hellwig 已提交
642
	inode_unlock(inode);
J
Jan Kara 已提交
643 644 645 646 647 648
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
	return ret;
}
#endif

649
static ssize_t
A
Al Viro 已提交
650
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
651
{
652
	struct inode *inode = file_inode(iocb->ki_filp);
653

654 655 656
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

J
Jan Kara 已提交
657 658 659 660
#ifdef CONFIG_FS_DAX
	if (IS_DAX(inode))
		return ext4_dax_write_iter(iocb, from);
#endif
661 662
	if (iocb->ki_flags & IOCB_DIRECT)
		return ext4_dio_write_iter(iocb, from);
663 664
	else
		return ext4_buffered_write_iter(iocb, from);
665 666
}

R
Ross Zwisler 已提交
667
#ifdef CONFIG_FS_DAX
668
static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
669
		enum page_entry_size pe_size)
R
Ross Zwisler 已提交
670
{
671 672
	int error = 0;
	vm_fault_t result;
673
	int retries = 0;
674
	handle_t *handle = NULL;
675
	struct inode *inode = file_inode(vmf->vma->vm_file);
676
	struct super_block *sb = inode->i_sb;
677 678 679 680 681 682 683 684 685 686 687 688 689 690

	/*
	 * We have to distinguish real writes from writes which will result in a
	 * COW page; COW writes should *not* poke the journal (the file will not
	 * be changed). Doing so would cause unintended failures when mounted
	 * read-only.
	 *
	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
	 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
	 * we eventually come back with a COW page.
	 */
	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
		(vmf->vma->vm_flags & VM_SHARED);
691
	pfn_t pfn;
692 693 694

	if (write) {
		sb_start_pagefault(sb);
695
		file_update_time(vmf->vma->vm_file);
696
		down_read(&EXT4_I(inode)->i_mmap_sem);
697
retry:
698 699
		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
					       EXT4_DATA_TRANS_BLOCKS(sb));
700 701 702 703 704
		if (IS_ERR(handle)) {
			up_read(&EXT4_I(inode)->i_mmap_sem);
			sb_end_pagefault(sb);
			return VM_FAULT_SIGBUS;
		}
705 706
	} else {
		down_read(&EXT4_I(inode)->i_mmap_sem);
J
Jan Kara 已提交
707
	}
708
	result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
709
	if (write) {
710
		ext4_journal_stop(handle);
711 712 713 714

		if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
		    ext4_should_retry_alloc(sb, &retries))
			goto retry;
715 716 717
		/* Handling synchronous page fault? */
		if (result & VM_FAULT_NEEDDSYNC)
			result = dax_finish_sync_fault(vmf, pe_size, pfn);
718
		up_read(&EXT4_I(inode)->i_mmap_sem);
719
		sb_end_pagefault(sb);
720 721 722
	} else {
		up_read(&EXT4_I(inode)->i_mmap_sem);
	}
723 724

	return result;
R
Ross Zwisler 已提交
725 726
}

727
static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
728 729 730 731
{
	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
}

R
Ross Zwisler 已提交
732 733
static const struct vm_operations_struct ext4_dax_vm_ops = {
	.fault		= ext4_dax_fault,
734
	.huge_fault	= ext4_dax_huge_fault,
735
	.page_mkwrite	= ext4_dax_fault,
736
	.pfn_mkwrite	= ext4_dax_fault,
R
Ross Zwisler 已提交
737 738 739 740 741
};
#else
#define ext4_dax_vm_ops	ext4_file_vm_ops
#endif

742
static const struct vm_operations_struct ext4_file_vm_ops = {
743
	.fault		= ext4_filemap_fault,
744
	.map_pages	= filemap_map_pages,
745 746 747 748 749
	.page_mkwrite   = ext4_page_mkwrite,
};

static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
750
	struct inode *inode = file->f_mapping->host;
751 752
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct dax_device *dax_dev = sbi->s_daxdev;
753

754
	if (unlikely(ext4_forced_shutdown(sbi)))
755 756
		return -EIO;

757
	/*
758 759
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
760
	 */
761
	if (!daxdev_mapping_supported(vma, dax_dev))
762 763
		return -EOPNOTSUPP;

764
	file_accessed(file);
R
Ross Zwisler 已提交
765 766
	if (IS_DAX(file_inode(file))) {
		vma->vm_ops = &ext4_dax_vm_ops;
767
		vma->vm_flags |= VM_HUGEPAGE;
R
Ross Zwisler 已提交
768 769 770
	} else {
		vma->vm_ops = &ext4_file_vm_ops;
	}
771 772 773
	return 0;
}

774 775
static int ext4_sample_last_mounted(struct super_block *sb,
				    struct vfsmount *mnt)
776
{
777
	struct ext4_sb_info *sbi = EXT4_SB(sb);
778 779
	struct path path;
	char buf[64], *cp;
780 781 782
	handle_t *handle;
	int err;

783
	if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
784 785
		return 0;

786
	if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
787 788
		return 0;

789
	ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
790 791 792 793 794 795 796 797 798 799
	/*
	 * Sample where the filesystem has been mounted and
	 * store it in the superblock for sysadmin convenience
	 * when trying to sort through large numbers of block
	 * devices or filesystem images.
	 */
	memset(buf, 0, sizeof(buf));
	path.mnt = mnt;
	path.dentry = mnt->mnt_root;
	cp = d_path(&path, buf, sizeof(buf));
800
	err = 0;
801
	if (IS_ERR(cp))
802
		goto out;
803 804

	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
805
	err = PTR_ERR(handle);
806
	if (IS_ERR(handle))
807
		goto out;
808 809 810
	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
	if (err)
811
		goto out_journal;
812
	lock_buffer(sbi->s_sbh);
813 814
	strlcpy(sbi->s_es->s_last_mounted, cp,
		sizeof(sbi->s_es->s_last_mounted));
815 816
	ext4_superblock_csum_set(sb);
	unlock_buffer(sbi->s_sbh);
817
	ext4_handle_dirty_super(handle, sb);
818
out_journal:
819
	ext4_journal_stop(handle);
820 821
out:
	sb_end_intwrite(sb);
822 823 824
	return err;
}

D
Dio Putra 已提交
825
static int ext4_file_open(struct inode *inode, struct file *filp)
826
{
827
	int ret;
828

829 830 831
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

832 833 834
	ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
	if (ret)
		return ret;
835

E
Eric Biggers 已提交
836 837
	ret = fscrypt_file_open(inode, filp);
	if (ret)
E
Eric Biggers 已提交
838 839 840 841
		return ret;

	ret = fsverity_file_open(inode, filp);
	if (ret)
E
Eric Biggers 已提交
842 843
		return ret;

844 845 846 847
	/*
	 * Set up the jbd2_inode if we are opening the inode for
	 * writing and the journal is present
	 */
848
	if (filp->f_mode & FMODE_WRITE) {
849
		ret = ext4_inode_attach_jinode(inode);
850 851
		if (ret < 0)
			return ret;
852
	}
G
Goldwyn Rodrigues 已提交
853

854
	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
855
	return dquot_file_open(inode, filp);
856 857
}

858
/*
859 860 861
 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 * by calling generic_file_llseek_size() with the appropriate maxbytes
 * value for each.
862
 */
863
loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
864 865 866 867 868 869 870 871 872
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes;

	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
	else
		maxbytes = inode->i_sb->s_maxbytes;

873
	switch (whence) {
874
	default:
875
		return generic_file_llseek_size(file, offset, whence,
876 877
						maxbytes, i_size_read(inode));
	case SEEK_HOLE:
878
		inode_lock_shared(inode);
879 880
		offset = iomap_seek_hole(inode, offset,
					 &ext4_iomap_report_ops);
881 882 883 884
		inode_unlock_shared(inode);
		break;
	case SEEK_DATA:
		inode_lock_shared(inode);
885 886
		offset = iomap_seek_data(inode, offset,
					 &ext4_iomap_report_ops);
887 888
		inode_unlock_shared(inode);
		break;
889 890
	}

891 892 893
	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, maxbytes);
894 895
}

896
const struct file_operations ext4_file_operations = {
897
	.llseek		= ext4_llseek,
898
	.read_iter	= ext4_file_read_iter,
A
Al Viro 已提交
899
	.write_iter	= ext4_file_write_iter,
900
	.iopoll		= iomap_dio_iopoll,
A
Andi Kleen 已提交
901
	.unlocked_ioctl = ext4_ioctl,
902
#ifdef CONFIG_COMPAT
903
	.compat_ioctl	= ext4_compat_ioctl,
904
#endif
905
	.mmap		= ext4_file_mmap,
906
	.mmap_supported_flags = MAP_SYNC,
907
	.open		= ext4_file_open,
908 909
	.release	= ext4_release_file,
	.fsync		= ext4_sync_file,
910
	.get_unmapped_area = thp_get_unmapped_area,
911
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
912
	.splice_write	= iter_file_splice_write,
913
	.fallocate	= ext4_fallocate,
914 915
};

916
const struct inode_operations ext4_file_inode_operations = {
917
	.setattr	= ext4_setattr,
D
David Howells 已提交
918
	.getattr	= ext4_file_getattr,
919
	.listxattr	= ext4_listxattr,
920
	.get_acl	= ext4_get_acl,
921
	.set_acl	= ext4_set_acl,
922
	.fiemap		= ext4_fiemap,
923 924
};