file.c 22.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/file.c
4 5 6 7 8 9 10 11 12 13 14 15
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/file.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
16
 *  ext4 fs regular file handling primitives
17 18 19 20 21 22 23
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 */

#include <linux/time.h>
#include <linux/fs.h>
24
#include <linux/iomap.h>
25 26
#include <linux/mount.h>
#include <linux/path.h>
27
#include <linux/dax.h>
28
#include <linux/quotaops.h>
29
#include <linux/pagevec.h>
30
#include <linux/uio.h>
31
#include <linux/mman.h>
32
#include <linux/backing-dev.h>
33 34
#include "ext4.h"
#include "ext4_jbd2.h"
35 36
#include "xattr.h"
#include "acl.h"
37
#include "truncate.h"
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
static bool ext4_dio_supported(struct inode *inode)
{
	if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
		return false;
	if (fsverity_active(inode))
		return false;
	if (ext4_should_journal_data(inode))
		return false;
	if (ext4_has_inline_data(inode))
		return false;
	return true;
}

static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	ssize_t ret;
	struct inode *inode = file_inode(iocb->ki_filp);

	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock_shared(inode))
			return -EAGAIN;
	} else {
		inode_lock_shared(inode);
	}

	if (!ext4_dio_supported(inode)) {
		inode_unlock_shared(inode);
		/*
		 * Fallback to buffered I/O if the operation being performed on
		 * the inode is not supported by direct I/O. The IOCB_DIRECT
		 * flag needs to be cleared here in order to ensure that the
		 * direct I/O path within generic_file_read_iter() is not
		 * taken.
		 */
		iocb->ki_flags &= ~IOCB_DIRECT;
		return generic_file_read_iter(iocb, to);
	}

	ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
			   is_sync_kiocb(iocb));
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}

85 86 87 88 89 90
#ifdef CONFIG_FS_DAX
static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

91 92
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock_shared(inode))
G
Goldwyn Rodrigues 已提交
93
			return -EAGAIN;
94
	} else {
G
Goldwyn Rodrigues 已提交
95 96
		inode_lock_shared(inode);
	}
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
	/*
	 * Recheck under inode lock - at this point we are sure it cannot
	 * change anymore
	 */
	if (!IS_DAX(inode)) {
		inode_unlock_shared(inode);
		/* Fallback to buffered IO in case we cannot support DAX */
		return generic_file_read_iter(iocb, to);
	}
	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}
#endif

static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
116 117 118
	struct inode *inode = file_inode(iocb->ki_filp);

	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
119 120
		return -EIO;

121 122 123 124
	if (!iov_iter_count(to))
		return 0; /* skip atime */

#ifdef CONFIG_FS_DAX
125
	if (IS_DAX(inode))
126 127
		return ext4_dax_read_iter(iocb, to);
#endif
128 129 130
	if (iocb->ki_flags & IOCB_DIRECT)
		return ext4_dio_read_iter(iocb, to);

131 132 133
	return generic_file_read_iter(iocb, to);
}

134 135
/*
 * Called when an inode is released. Note that this is different
136
 * from ext4_file_open: open gets called at every open, but release
137 138
 * gets called only when /all/ the files are closed.
 */
139
static int ext4_release_file(struct inode *inode, struct file *filp)
140
{
141
	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
142
		ext4_alloc_da_blocks(inode);
143
		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
144
	}
145 146
	/* if we are the last writer on the inode, drop the block reservation */
	if ((filp->f_mode & FMODE_WRITE) &&
147
			(atomic_read(&inode->i_writecount) == 1) &&
D
Dio Putra 已提交
148
			!EXT4_I(inode)->i_reserved_data_blocks) {
149
		down_write(&EXT4_I(inode)->i_data_sem);
150
		ext4_discard_preallocations(inode);
151
		up_write(&EXT4_I(inode)->i_data_sem);
152 153
	}
	if (is_dx(inode) && filp->private_data)
154
		ext4_htree_free_dir_info(filp->private_data);
155 156 157 158

	return 0;
}

159 160 161 162 163 164 165 166 167
/*
 * This tests whether the IO in question is block-aligned or not.
 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 * are converted to written only after the IO is complete.  Until they are
 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 * threads are at work on the same unwritten block, they must be synchronized
 * or one thread will zero the other's data, causing corruption.
 */
168 169
static bool
ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
170 171
{
	struct super_block *sb = inode->i_sb;
172
	unsigned long blockmask = sb->s_blocksize - 1;
173

A
Al Viro 已提交
174
	if ((pos | iov_iter_alignment(from)) & blockmask)
175
		return true;
176

177 178 179 180 181 182 183 184 185 186
	return false;
}

static bool
ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
{
	if (offset + len > i_size_read(inode) ||
	    offset + len > EXT4_I(inode)->i_disksize)
		return true;
	return false;
187 188
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/* Is IO overwriting allocated and initialized blocks? */
static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
{
	struct ext4_map_blocks map;
	unsigned int blkbits = inode->i_blkbits;
	int err, blklen;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = pos >> blkbits;
	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
	blklen = map.m_len;

	err = ext4_map_blocks(NULL, inode, &map, 0);
	/*
	 * 'err==len' means that all of the blocks have been preallocated,
	 * regardless of whether they have been initialized or not. To exclude
	 * unwritten extents, we need to check m_flags.
	 */
	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
}

212 213
static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
					 struct iov_iter *from)
214 215 216 217
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

218 219 220
	if (unlikely(IS_IMMUTABLE(inode)))
		return -EPERM;

221 222 223
	ret = generic_write_checks(iocb, from);
	if (ret <= 0)
		return ret;
224

225 226 227 228 229 230 231 232 233 234 235
	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);

		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
			return -EFBIG;
		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
	}
236

237 238 239 240 241 242 243 244 245 246 247
	return iov_iter_count(from);
}

static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
	ssize_t ret, count;

	count = ext4_generic_write_checks(iocb, from);
	if (count <= 0)
		return count;

248 249 250
	ret = file_modified(iocb->ki_filp);
	if (ret)
		return ret;
251
	return count;
252 253
}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
					struct iov_iter *from)
{
	ssize_t ret;
	struct inode *inode = file_inode(iocb->ki_filp);

	if (iocb->ki_flags & IOCB_NOWAIT)
		return -EOPNOTSUPP;

	inode_lock(inode);
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

	current->backing_dev_info = inode_to_bdi(inode);
	ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
	current->backing_dev_info = NULL;

out:
	inode_unlock(inode);
	if (likely(ret > 0)) {
		iocb->ki_pos += ret;
		ret = generic_write_sync(iocb, ret);
	}

	return ret;
}

282 283 284 285 286 287 288
static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
					   ssize_t written, size_t count)
{
	handle_t *handle;
	bool truncate = false;
	u8 blkbits = inode->i_blkbits;
	ext4_lblk_t written_blk, end_blk;
289
	int ret;
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329

	/*
	 * Note that EXT4_I(inode)->i_disksize can get extended up to
	 * inode->i_size while the I/O was running due to writeback of delalloc
	 * blocks. But, the code in ext4_iomap_alloc() is careful to use
	 * zeroed/unwritten extents if this is possible; thus we won't leave
	 * uninitialized blocks in a file even if we didn't succeed in writing
	 * as much as we intended.
	 */
	WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
	if (offset + count <= EXT4_I(inode)->i_disksize) {
		/*
		 * We need to ensure that the inode is removed from the orphan
		 * list if it has been added prematurely, due to writeback of
		 * delalloc blocks.
		 */
		if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
			handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);

			if (IS_ERR(handle)) {
				ext4_orphan_del(NULL, inode);
				return PTR_ERR(handle);
			}

			ext4_orphan_del(handle, inode);
			ext4_journal_stop(handle);
		}

		return written;
	}

	if (written < 0)
		goto truncate;

	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
	if (IS_ERR(handle)) {
		written = PTR_ERR(handle);
		goto truncate;
	}

330 331 332 333 334 335 336 337
	if (ext4_update_inode_size(inode, offset + written)) {
		ret = ext4_mark_inode_dirty(handle, inode);
		if (unlikely(ret)) {
			written = ret;
			ext4_journal_stop(handle);
			goto truncate;
		}
	}
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369

	/*
	 * We may need to truncate allocated but not written blocks beyond EOF.
	 */
	written_blk = ALIGN(offset + written, 1 << blkbits);
	end_blk = ALIGN(offset + count, 1 << blkbits);
	if (written_blk < end_blk && ext4_can_truncate(inode))
		truncate = true;

	/*
	 * Remove the inode from the orphan list if it has been extended and
	 * everything went OK.
	 */
	if (!truncate && inode->i_nlink)
		ext4_orphan_del(handle, inode);
	ext4_journal_stop(handle);

	if (truncate) {
truncate:
		ext4_truncate_failed_write(inode);
		/*
		 * If the truncate operation failed early, then the inode may
		 * still be on the orphan list. In that case, we need to try
		 * remove the inode from the in-memory linked list.
		 */
		if (inode->i_nlink)
			ext4_orphan_del(NULL, inode);
	}

	return written;
}

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
				 int error, unsigned int flags)
{
	loff_t offset = iocb->ki_pos;
	struct inode *inode = file_inode(iocb->ki_filp);

	if (error)
		return error;

	if (size && flags & IOMAP_DIO_UNWRITTEN)
		return ext4_convert_unwritten_extents(NULL, inode,
						      offset, size);

	return 0;
}

static const struct iomap_dio_ops ext4_dio_write_ops = {
	.end_io = ext4_dio_write_end_io,
};

390 391 392 393 394 395 396 397 398 399 400
/*
 * The intention here is to start with shared lock acquired then see if any
 * condition requires an exclusive inode lock. If yes, then we restart the
 * whole operation by releasing the shared lock and acquiring exclusive lock.
 *
 * - For unaligned_io we never take shared lock as it may cause data corruption
 *   when two unaligned IO tries to modify the same block e.g. while zeroing.
 *
 * - For extending writes case we don't take the shared lock, since it requires
 *   updating inode i_disksize and/or orphan handling with exclusive lock.
 *
401 402
 * - shared locking will only be true mostly with overwrites. Otherwise we will
 *   switch to exclusive i_rwsem lock.
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
 */
static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
				     bool *ilock_shared, bool *extend)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
	loff_t offset;
	size_t count;
	ssize_t ret;

restart:
	ret = ext4_generic_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

	offset = iocb->ki_pos;
	count = ret;
	if (ext4_extending_io(inode, offset, count))
		*extend = true;
	/*
	 * Determine whether the IO operation will overwrite allocated
424
	 * and initialized blocks.
425 426 427 428 429
	 * We need exclusive i_rwsem for changing security info
	 * in file_modified().
	 */
	if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
	     !ext4_overwrite_io(inode, offset, count))) {
430 431 432 433
		if (iocb->ki_flags & IOCB_NOWAIT) {
			ret = -EAGAIN;
			goto out;
		}
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
		inode_unlock_shared(inode);
		*ilock_shared = false;
		inode_lock(inode);
		goto restart;
	}

	ret = file_modified(file);
	if (ret < 0)
		goto out;

	return count;
out:
	if (*ilock_shared)
		inode_unlock_shared(inode);
	else
		inode_unlock(inode);
	return ret;
}

453 454 455 456 457
static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	ssize_t ret;
	handle_t *handle;
	struct inode *inode = file_inode(iocb->ki_filp);
458 459
	loff_t offset = iocb->ki_pos;
	size_t count = iov_iter_count(from);
J
Jan Kara 已提交
460
	const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
	bool extend = false, unaligned_io = false;
	bool ilock_shared = true;

	/*
	 * We initially start with shared inode lock unless it is
	 * unaligned IO which needs exclusive lock anyways.
	 */
	if (ext4_unaligned_io(inode, from, offset)) {
		unaligned_io = true;
		ilock_shared = false;
	}
	/*
	 * Quick check here without any i_rwsem lock to see if it is extending
	 * IO. A more reliable check is done in ext4_dio_write_checks() with
	 * proper locking in place.
	 */
	if (offset + count > i_size_read(inode))
		ilock_shared = false;
479 480

	if (iocb->ki_flags & IOCB_NOWAIT) {
481 482 483 484 485 486 487
		if (ilock_shared) {
			if (!inode_trylock_shared(inode))
				return -EAGAIN;
		} else {
			if (!inode_trylock(inode))
				return -EAGAIN;
		}
488
	} else {
489 490 491 492
		if (ilock_shared)
			inode_lock_shared(inode);
		else
			inode_lock(inode);
493 494
	}

495
	/* Fallback to buffered I/O if the inode does not support direct I/O. */
496
	if (!ext4_dio_supported(inode)) {
497 498 499 500
		if (ilock_shared)
			inode_unlock_shared(inode);
		else
			inode_unlock(inode);
501 502 503
		return ext4_buffered_write_iter(iocb, from);
	}

504 505
	ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
	if (ret <= 0)
506 507
		return ret;

508 509 510 511 512 513
	/* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
	if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
		ret = -EAGAIN;
		goto out;
	}

514
	offset = iocb->ki_pos;
515
	count = ret;
516 517

	/*
518 519 520 521 522 523 524 525
	 * Unaligned direct IO must be serialized among each other as zeroing
	 * of partial blocks of two competing unaligned IOs can result in data
	 * corruption.
	 *
	 * So we make sure we don't allow any unaligned IO in flight.
	 * For IOs where we need not wait (like unaligned non-AIO DIO),
	 * below inode_dio_wait() may anyway become a no-op, since we start
	 * with exclusive lock.
526
	 */
527 528
	if (unaligned_io)
		inode_dio_wait(inode);
529

530
	if (extend) {
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			goto out;
		}

		ret = ext4_orphan_add(handle, inode);
		if (ret) {
			ext4_journal_stop(handle);
			goto out;
		}

		ext4_journal_stop(handle);
	}

J
Jan Kara 已提交
546 547 548
	if (ilock_shared)
		iomap_ops = &ext4_iomap_overwrite_ops;
	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
549
			   is_sync_kiocb(iocb) || unaligned_io || extend);
550 551 552 553 554

	if (extend)
		ret = ext4_handle_inode_extension(inode, offset, ret, count);

out:
555
	if (ilock_shared)
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
		inode_unlock_shared(inode);
	else
		inode_unlock(inode);

	if (ret >= 0 && iov_iter_count(from)) {
		ssize_t err;
		loff_t endbyte;

		offset = iocb->ki_pos;
		err = ext4_buffered_write_iter(iocb, from);
		if (err < 0)
			return err;

		/*
		 * We need to ensure that the pages within the page cache for
		 * the range covered by this I/O are written to disk and
		 * invalidated. This is in attempt to preserve the expected
		 * direct I/O semantics in the case we fallback to buffered I/O
		 * to complete off the I/O request.
		 */
		ret += err;
		endbyte = offset + err - 1;
		err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
						   offset, endbyte);
		if (!err)
			invalidate_mapping_pages(iocb->ki_filp->f_mapping,
						 offset >> PAGE_SHIFT,
						 endbyte >> PAGE_SHIFT);
	}

	return ret;
}

J
Jan Kara 已提交
589 590 591 592 593
#ifdef CONFIG_FS_DAX
static ssize_t
ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	ssize_t ret;
594 595
	size_t count;
	loff_t offset;
596 597
	handle_t *handle;
	bool extend = false;
598
	struct inode *inode = file_inode(iocb->ki_filp);
J
Jan Kara 已提交
599

600 601
	if (iocb->ki_flags & IOCB_NOWAIT) {
		if (!inode_trylock(inode))
G
Goldwyn Rodrigues 已提交
602
			return -EAGAIN;
603
	} else {
G
Goldwyn Rodrigues 已提交
604 605
		inode_lock(inode);
	}
606

J
Jan Kara 已提交
607 608 609 610
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

611 612
	offset = iocb->ki_pos;
	count = iov_iter_count(from);
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630

	if (offset + count > EXT4_I(inode)->i_disksize) {
		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			goto out;
		}

		ret = ext4_orphan_add(handle, inode);
		if (ret) {
			ext4_journal_stop(handle);
			goto out;
		}

		extend = true;
		ext4_journal_stop(handle);
	}

J
Jan Kara 已提交
631
	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
632 633 634

	if (extend)
		ret = ext4_handle_inode_extension(inode, offset, ret, count);
J
Jan Kara 已提交
635
out:
C
Christoph Hellwig 已提交
636
	inode_unlock(inode);
J
Jan Kara 已提交
637 638 639 640 641 642
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
	return ret;
}
#endif

643
static ssize_t
A
Al Viro 已提交
644
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
645
{
646
	struct inode *inode = file_inode(iocb->ki_filp);
647

648 649 650
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

J
Jan Kara 已提交
651 652 653 654
#ifdef CONFIG_FS_DAX
	if (IS_DAX(inode))
		return ext4_dax_write_iter(iocb, from);
#endif
655 656
	if (iocb->ki_flags & IOCB_DIRECT)
		return ext4_dio_write_iter(iocb, from);
J
Jan Kara 已提交
657

658
	return ext4_buffered_write_iter(iocb, from);
659 660
}

R
Ross Zwisler 已提交
661
#ifdef CONFIG_FS_DAX
662
static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
663
		enum page_entry_size pe_size)
R
Ross Zwisler 已提交
664
{
665 666
	int error = 0;
	vm_fault_t result;
667
	int retries = 0;
668
	handle_t *handle = NULL;
669
	struct inode *inode = file_inode(vmf->vma->vm_file);
670
	struct super_block *sb = inode->i_sb;
671 672 673 674 675 676 677 678 679 680 681 682 683 684

	/*
	 * We have to distinguish real writes from writes which will result in a
	 * COW page; COW writes should *not* poke the journal (the file will not
	 * be changed). Doing so would cause unintended failures when mounted
	 * read-only.
	 *
	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
	 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
	 * we eventually come back with a COW page.
	 */
	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
		(vmf->vma->vm_flags & VM_SHARED);
685
	pfn_t pfn;
686 687 688

	if (write) {
		sb_start_pagefault(sb);
689
		file_update_time(vmf->vma->vm_file);
690
		down_read(&EXT4_I(inode)->i_mmap_sem);
691
retry:
692 693
		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
					       EXT4_DATA_TRANS_BLOCKS(sb));
694 695 696 697 698
		if (IS_ERR(handle)) {
			up_read(&EXT4_I(inode)->i_mmap_sem);
			sb_end_pagefault(sb);
			return VM_FAULT_SIGBUS;
		}
699 700
	} else {
		down_read(&EXT4_I(inode)->i_mmap_sem);
J
Jan Kara 已提交
701
	}
702
	result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
703
	if (write) {
704
		ext4_journal_stop(handle);
705 706 707 708

		if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
		    ext4_should_retry_alloc(sb, &retries))
			goto retry;
709 710 711
		/* Handling synchronous page fault? */
		if (result & VM_FAULT_NEEDDSYNC)
			result = dax_finish_sync_fault(vmf, pe_size, pfn);
712
		up_read(&EXT4_I(inode)->i_mmap_sem);
713
		sb_end_pagefault(sb);
714 715 716
	} else {
		up_read(&EXT4_I(inode)->i_mmap_sem);
	}
717 718

	return result;
R
Ross Zwisler 已提交
719 720
}

721
static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
722 723 724 725
{
	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
}

R
Ross Zwisler 已提交
726 727
static const struct vm_operations_struct ext4_dax_vm_ops = {
	.fault		= ext4_dax_fault,
728
	.huge_fault	= ext4_dax_huge_fault,
729
	.page_mkwrite	= ext4_dax_fault,
730
	.pfn_mkwrite	= ext4_dax_fault,
R
Ross Zwisler 已提交
731 732 733 734 735
};
#else
#define ext4_dax_vm_ops	ext4_file_vm_ops
#endif

736
static const struct vm_operations_struct ext4_file_vm_ops = {
737
	.fault		= ext4_filemap_fault,
738
	.map_pages	= filemap_map_pages,
739 740 741 742 743
	.page_mkwrite   = ext4_page_mkwrite,
};

static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
744
	struct inode *inode = file->f_mapping->host;
745 746
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct dax_device *dax_dev = sbi->s_daxdev;
747

748
	if (unlikely(ext4_forced_shutdown(sbi)))
749 750
		return -EIO;

751
	/*
752 753
	 * We don't support synchronous mappings for non-DAX files and
	 * for DAX files if underneath dax_device is not synchronous.
754
	 */
755
	if (!daxdev_mapping_supported(vma, dax_dev))
756 757
		return -EOPNOTSUPP;

758
	file_accessed(file);
R
Ross Zwisler 已提交
759 760
	if (IS_DAX(file_inode(file))) {
		vma->vm_ops = &ext4_dax_vm_ops;
761
		vma->vm_flags |= VM_HUGEPAGE;
R
Ross Zwisler 已提交
762 763 764
	} else {
		vma->vm_ops = &ext4_file_vm_ops;
	}
765 766 767
	return 0;
}

768 769
static int ext4_sample_last_mounted(struct super_block *sb,
				    struct vfsmount *mnt)
770
{
771
	struct ext4_sb_info *sbi = EXT4_SB(sb);
772 773
	struct path path;
	char buf[64], *cp;
774 775 776 777 778 779
	handle_t *handle;
	int err;

	if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
		return 0;

780
	if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
781 782 783 784 785 786 787 788 789 790 791 792 793
		return 0;

	sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
	/*
	 * Sample where the filesystem has been mounted and
	 * store it in the superblock for sysadmin convenience
	 * when trying to sort through large numbers of block
	 * devices or filesystem images.
	 */
	memset(buf, 0, sizeof(buf));
	path.mnt = mnt;
	path.dentry = mnt->mnt_root;
	cp = d_path(&path, buf, sizeof(buf));
794
	err = 0;
795
	if (IS_ERR(cp))
796
		goto out;
797 798

	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
799
	err = PTR_ERR(handle);
800
	if (IS_ERR(handle))
801
		goto out;
802 803 804
	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
	if (err)
805
		goto out_journal;
806 807 808
	strlcpy(sbi->s_es->s_last_mounted, cp,
		sizeof(sbi->s_es->s_last_mounted));
	ext4_handle_dirty_super(handle, sb);
809
out_journal:
810
	ext4_journal_stop(handle);
811 812
out:
	sb_end_intwrite(sb);
813 814 815
	return err;
}

D
Dio Putra 已提交
816
static int ext4_file_open(struct inode *inode, struct file *filp)
817
{
818
	int ret;
819

820 821 822
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

823 824 825
	ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
	if (ret)
		return ret;
826

E
Eric Biggers 已提交
827 828
	ret = fscrypt_file_open(inode, filp);
	if (ret)
E
Eric Biggers 已提交
829 830 831 832
		return ret;

	ret = fsverity_file_open(inode, filp);
	if (ret)
E
Eric Biggers 已提交
833 834
		return ret;

835 836 837 838
	/*
	 * Set up the jbd2_inode if we are opening the inode for
	 * writing and the journal is present
	 */
839
	if (filp->f_mode & FMODE_WRITE) {
840
		ret = ext4_inode_attach_jinode(inode);
841 842
		if (ret < 0)
			return ret;
843
	}
G
Goldwyn Rodrigues 已提交
844

845
	filp->f_mode |= FMODE_NOWAIT;
846
	return dquot_file_open(inode, filp);
847 848
}

849
/*
850 851 852
 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 * by calling generic_file_llseek_size() with the appropriate maxbytes
 * value for each.
853
 */
854
loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
855 856 857 858 859 860 861 862 863
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes;

	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
	else
		maxbytes = inode->i_sb->s_maxbytes;

864
	switch (whence) {
865
	default:
866
		return generic_file_llseek_size(file, offset, whence,
867 868
						maxbytes, i_size_read(inode));
	case SEEK_HOLE:
869
		inode_lock_shared(inode);
870 871
		offset = iomap_seek_hole(inode, offset,
					 &ext4_iomap_report_ops);
872 873 874 875
		inode_unlock_shared(inode);
		break;
	case SEEK_DATA:
		inode_lock_shared(inode);
876 877
		offset = iomap_seek_data(inode, offset,
					 &ext4_iomap_report_ops);
878 879
		inode_unlock_shared(inode);
		break;
880 881
	}

882 883 884
	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, maxbytes);
885 886
}

887
const struct file_operations ext4_file_operations = {
888
	.llseek		= ext4_llseek,
889
	.read_iter	= ext4_file_read_iter,
A
Al Viro 已提交
890
	.write_iter	= ext4_file_write_iter,
891
	.iopoll		= iomap_dio_iopoll,
A
Andi Kleen 已提交
892
	.unlocked_ioctl = ext4_ioctl,
893
#ifdef CONFIG_COMPAT
894
	.compat_ioctl	= ext4_compat_ioctl,
895
#endif
896
	.mmap		= ext4_file_mmap,
897
	.mmap_supported_flags = MAP_SYNC,
898
	.open		= ext4_file_open,
899 900
	.release	= ext4_release_file,
	.fsync		= ext4_sync_file,
901
	.get_unmapped_area = thp_get_unmapped_area,
902
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
903
	.splice_write	= iter_file_splice_write,
904
	.fallocate	= ext4_fallocate,
905 906
};

907
const struct inode_operations ext4_file_inode_operations = {
908
	.setattr	= ext4_setattr,
D
David Howells 已提交
909
	.getattr	= ext4_file_getattr,
910
	.listxattr	= ext4_listxattr,
911
	.get_acl	= ext4_get_acl,
912
	.set_acl	= ext4_set_acl,
913
	.fiemap		= ext4_fiemap,
914 915
};