file.c 13.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/file.c
4 5 6 7 8 9 10 11 12 13 14 15
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/file.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
16
 *  ext4 fs regular file handling primitives
17 18 19 20 21 22 23
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 */

#include <linux/time.h>
#include <linux/fs.h>
24
#include <linux/iomap.h>
25 26
#include <linux/mount.h>
#include <linux/path.h>
27
#include <linux/dax.h>
28
#include <linux/quotaops.h>
29
#include <linux/pagevec.h>
30
#include <linux/uio.h>
31
#include <linux/mman.h>
32 33
#include "ext4.h"
#include "ext4_jbd2.h"
34 35 36
#include "xattr.h"
#include "acl.h"

37 38 39 40 41 42
#ifdef CONFIG_FS_DAX
static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

G
Goldwyn Rodrigues 已提交
43 44 45 46 47
	if (!inode_trylock_shared(inode)) {
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
		inode_lock_shared(inode);
	}
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
	/*
	 * Recheck under inode lock - at this point we are sure it cannot
	 * change anymore
	 */
	if (!IS_DAX(inode)) {
		inode_unlock_shared(inode);
		/* Fallback to buffered IO in case we cannot support DAX */
		return generic_file_read_iter(iocb, to);
	}
	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}
#endif

static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
67 68 69
	if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
		return -EIO;

70 71 72 73 74 75 76 77 78 79
	if (!iov_iter_count(to))
		return 0; /* skip atime */

#ifdef CONFIG_FS_DAX
	if (IS_DAX(file_inode(iocb->ki_filp)))
		return ext4_dax_read_iter(iocb, to);
#endif
	return generic_file_read_iter(iocb, to);
}

80 81
/*
 * Called when an inode is released. Note that this is different
82
 * from ext4_file_open: open gets called at every open, but release
83 84
 * gets called only when /all/ the files are closed.
 */
85
static int ext4_release_file(struct inode *inode, struct file *filp)
86
{
87
	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
88
		ext4_alloc_da_blocks(inode);
89
		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
90
	}
91 92
	/* if we are the last writer on the inode, drop the block reservation */
	if ((filp->f_mode & FMODE_WRITE) &&
93 94
			(atomic_read(&inode->i_writecount) == 1) &&
		        !EXT4_I(inode)->i_reserved_data_blocks)
95
	{
96
		down_write(&EXT4_I(inode)->i_data_sem);
97
		ext4_discard_preallocations(inode);
98
		up_write(&EXT4_I(inode)->i_data_sem);
99 100
	}
	if (is_dx(inode) && filp->private_data)
101
		ext4_htree_free_dir_info(filp->private_data);
102 103 104 105

	return 0;
}

106
static void ext4_unwritten_wait(struct inode *inode)
107 108 109
{
	wait_queue_head_t *wq = ext4_ioend_wq(inode);

110
	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
111 112 113 114 115 116 117 118 119 120 121 122
}

/*
 * This tests whether the IO in question is block-aligned or not.
 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 * are converted to written only after the IO is complete.  Until they are
 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 * threads are at work on the same unwritten block, they must be synchronized
 * or one thread will zero the other's data, causing corruption.
 */
static int
A
Al Viro 已提交
123
ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
124 125 126 127
{
	struct super_block *sb = inode->i_sb;
	int blockmask = sb->s_blocksize - 1;

128
	if (pos >= i_size_read(inode))
129 130
		return 0;

A
Al Viro 已提交
131
	if ((pos | iov_iter_alignment(from)) & blockmask)
132 133 134 135 136
		return 1;

	return 0;
}

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/* Is IO overwriting allocated and initialized blocks? */
static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
{
	struct ext4_map_blocks map;
	unsigned int blkbits = inode->i_blkbits;
	int err, blklen;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = pos >> blkbits;
	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
	blklen = map.m_len;

	err = ext4_map_blocks(NULL, inode, &map, 0);
	/*
	 * 'err==len' means that all of the blocks have been preallocated,
	 * regardless of whether they have been initialized or not. To exclude
	 * unwritten extents, we need to check m_flags.
	 */
	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
}

static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

	ret = generic_write_checks(iocb, from);
	if (ret <= 0)
		return ret;
	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);

		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
			return -EFBIG;
		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
	}
	return iov_iter_count(from);
}

J
Jan Kara 已提交
182 183 184 185 186 187 188
#ifdef CONFIG_FS_DAX
static ssize_t
ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

G
Goldwyn Rodrigues 已提交
189 190 191 192 193
	if (!inode_trylock(inode)) {
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
		inode_lock(inode);
	}
J
Jan Kara 已提交
194 195 196 197 198 199 200 201 202 203 204 205
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;
	ret = file_remove_privs(iocb->ki_filp);
	if (ret)
		goto out;
	ret = file_update_time(iocb->ki_filp);
	if (ret)
		goto out;

	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
out:
C
Christoph Hellwig 已提交
206
	inode_unlock(inode);
J
Jan Kara 已提交
207 208 209 210 211 212
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
	return ret;
}
#endif

213
static ssize_t
A
Al Viro 已提交
214
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
215
{
216
	struct inode *inode = file_inode(iocb->ki_filp);
217
	int o_direct = iocb->ki_flags & IOCB_DIRECT;
218
	int unaligned_aio = 0;
219
	int overwrite = 0;
220
	ssize_t ret;
221

222 223 224
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

J
Jan Kara 已提交
225 226 227 228
#ifdef CONFIG_FS_DAX
	if (IS_DAX(inode))
		return ext4_dax_write_iter(iocb, from);
#endif
229 230
	if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT))
		return -EOPNOTSUPP;
J
Jan Kara 已提交
231

G
Goldwyn Rodrigues 已提交
232 233 234 235 236 237
	if (!inode_trylock(inode)) {
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
		inode_lock(inode);
	}

238
	ret = ext4_write_checks(iocb, from);
239 240 241
	if (ret <= 0)
		goto out;

242
	/*
243 244 245
	 * Unaligned direct AIO must be serialized among each other as zeroing
	 * of partial blocks of two competing unaligned AIOs can result in data
	 * corruption.
246
	 */
247
	if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
248
	    !is_sync_kiocb(iocb) &&
249 250
	    ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
		unaligned_aio = 1;
251 252 253
		ext4_unwritten_wait(inode);
	}

254
	iocb->private = &overwrite;
255
	/* Check whether we do a DIO overwrite or not */
G
Goldwyn Rodrigues 已提交
256 257 258 259 260 261 262 263 264
	if (o_direct && !unaligned_aio) {
		if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
			if (ext4_should_dioread_nolock(inode))
				overwrite = 1;
		} else if (iocb->ki_flags & IOCB_NOWAIT) {
			ret = -EAGAIN;
			goto out;
		}
	}
265

A
Al Viro 已提交
266
	ret = __generic_file_write_iter(iocb, from);
A
Al Viro 已提交
267
	inode_unlock(inode);
268

269 270
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
271

272 273 274
	return ret;

out:
A
Al Viro 已提交
275
	inode_unlock(inode);
276
	return ret;
277 278
}

R
Ross Zwisler 已提交
279
#ifdef CONFIG_FS_DAX
280
static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
281
		enum page_entry_size pe_size)
R
Ross Zwisler 已提交
282
{
283 284
	int error = 0;
	vm_fault_t result;
285
	int retries = 0;
286
	handle_t *handle = NULL;
287
	struct inode *inode = file_inode(vmf->vma->vm_file);
288
	struct super_block *sb = inode->i_sb;
289 290 291 292 293 294 295 296 297 298 299 300 301 302

	/*
	 * We have to distinguish real writes from writes which will result in a
	 * COW page; COW writes should *not* poke the journal (the file will not
	 * be changed). Doing so would cause unintended failures when mounted
	 * read-only.
	 *
	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
	 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
	 * we eventually come back with a COW page.
	 */
	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
		(vmf->vma->vm_flags & VM_SHARED);
303
	pfn_t pfn;
304 305 306

	if (write) {
		sb_start_pagefault(sb);
307
		file_update_time(vmf->vma->vm_file);
308
		down_read(&EXT4_I(inode)->i_mmap_sem);
309
retry:
310 311
		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
					       EXT4_DATA_TRANS_BLOCKS(sb));
312 313 314 315 316
		if (IS_ERR(handle)) {
			up_read(&EXT4_I(inode)->i_mmap_sem);
			sb_end_pagefault(sb);
			return VM_FAULT_SIGBUS;
		}
317 318
	} else {
		down_read(&EXT4_I(inode)->i_mmap_sem);
J
Jan Kara 已提交
319
	}
320
	result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
321
	if (write) {
322
		ext4_journal_stop(handle);
323 324 325 326

		if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
		    ext4_should_retry_alloc(sb, &retries))
			goto retry;
327 328 329
		/* Handling synchronous page fault? */
		if (result & VM_FAULT_NEEDDSYNC)
			result = dax_finish_sync_fault(vmf, pe_size, pfn);
330
		up_read(&EXT4_I(inode)->i_mmap_sem);
331
		sb_end_pagefault(sb);
332 333 334
	} else {
		up_read(&EXT4_I(inode)->i_mmap_sem);
	}
335 336

	return result;
R
Ross Zwisler 已提交
337 338
}

339
static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
340 341 342 343
{
	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
}

R
Ross Zwisler 已提交
344 345
static const struct vm_operations_struct ext4_dax_vm_ops = {
	.fault		= ext4_dax_fault,
346
	.huge_fault	= ext4_dax_huge_fault,
347
	.page_mkwrite	= ext4_dax_fault,
348
	.pfn_mkwrite	= ext4_dax_fault,
R
Ross Zwisler 已提交
349 350 351 352 353
};
#else
#define ext4_dax_vm_ops	ext4_file_vm_ops
#endif

354
static const struct vm_operations_struct ext4_file_vm_ops = {
355
	.fault		= ext4_filemap_fault,
356
	.map_pages	= filemap_map_pages,
357 358 359 360 361
	.page_mkwrite   = ext4_page_mkwrite,
};

static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
362 363
	struct inode *inode = file->f_mapping->host;

364 365 366
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

367 368 369 370 371 372 373
	/*
	 * We don't support synchronous mappings for non-DAX files. At least
	 * until someone comes with a sensible use case.
	 */
	if (!IS_DAX(file_inode(file)) && (vma->vm_flags & VM_SYNC))
		return -EOPNOTSUPP;

374
	file_accessed(file);
R
Ross Zwisler 已提交
375 376
	if (IS_DAX(file_inode(file))) {
		vma->vm_ops = &ext4_dax_vm_ops;
M
Matthew Wilcox 已提交
377
		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
R
Ross Zwisler 已提交
378 379 380
	} else {
		vma->vm_ops = &ext4_file_vm_ops;
	}
381 382 383
	return 0;
}

384 385 386 387 388 389 390
static int ext4_file_open(struct inode * inode, struct file * filp)
{
	struct super_block *sb = inode->i_sb;
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct vfsmount *mnt = filp->f_path.mnt;
	struct path path;
	char buf[64], *cp;
391
	int ret;
392

393 394 395
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

396
	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
397
		     !sb_rdonly(sb))) {
398 399 400 401 402 403 404 405
		sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
		/*
		 * Sample where the filesystem has been mounted and
		 * store it in the superblock for sysadmin convenience
		 * when trying to sort through large numbers of block
		 * devices or filesystem images.
		 */
		memset(buf, 0, sizeof(buf));
406 407
		path.mnt = mnt;
		path.dentry = mnt->mnt_root;
408 409
		cp = d_path(&path, buf, sizeof(buf));
		if (!IS_ERR(cp)) {
410 411 412
			handle_t *handle;
			int err;

413
			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
414 415
			if (IS_ERR(handle))
				return PTR_ERR(handle);
416
			BUFFER_TRACE(sbi->s_sbh, "get_write_access");
417 418 419 420 421
			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
			if (err) {
				ext4_journal_stop(handle);
				return err;
			}
422 423
			strlcpy(sbi->s_es->s_last_mounted, cp,
				sizeof(sbi->s_es->s_last_mounted));
424 425
			ext4_handle_dirty_super(handle, sb);
			ext4_journal_stop(handle);
426 427
		}
	}
428

E
Eric Biggers 已提交
429 430 431 432
	ret = fscrypt_file_open(inode, filp);
	if (ret)
		return ret;

433 434 435 436
	/*
	 * Set up the jbd2_inode if we are opening the inode for
	 * writing and the journal is present
	 */
437
	if (filp->f_mode & FMODE_WRITE) {
438
		ret = ext4_inode_attach_jinode(inode);
439 440
		if (ret < 0)
			return ret;
441
	}
G
Goldwyn Rodrigues 已提交
442

443
	filp->f_mode |= FMODE_NOWAIT;
444
	return dquot_file_open(inode, filp);
445 446
}

447
/*
448 449 450
 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 * by calling generic_file_llseek_size() with the appropriate maxbytes
 * value for each.
451
 */
452
loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
453 454 455 456 457 458 459 460 461
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes;

	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
	else
		maxbytes = inode->i_sb->s_maxbytes;

462
	switch (whence) {
463
	default:
464
		return generic_file_llseek_size(file, offset, whence,
465 466
						maxbytes, i_size_read(inode));
	case SEEK_HOLE:
467 468 469 470 471 472 473 474 475
		inode_lock_shared(inode);
		offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
		inode_unlock_shared(inode);
		break;
	case SEEK_DATA:
		inode_lock_shared(inode);
		offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
		inode_unlock_shared(inode);
		break;
476 477
	}

478 479 480
	if (offset < 0)
		return offset;
	return vfs_setpos(file, offset, maxbytes);
481 482
}

483
const struct file_operations ext4_file_operations = {
484
	.llseek		= ext4_llseek,
485
	.read_iter	= ext4_file_read_iter,
A
Al Viro 已提交
486
	.write_iter	= ext4_file_write_iter,
A
Andi Kleen 已提交
487
	.unlocked_ioctl = ext4_ioctl,
488
#ifdef CONFIG_COMPAT
489
	.compat_ioctl	= ext4_compat_ioctl,
490
#endif
491
	.mmap		= ext4_file_mmap,
492
	.mmap_supported_flags = MAP_SYNC,
493
	.open		= ext4_file_open,
494 495
	.release	= ext4_release_file,
	.fsync		= ext4_sync_file,
496
	.get_unmapped_area = thp_get_unmapped_area,
497
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
498
	.splice_write	= iter_file_splice_write,
499
	.fallocate	= ext4_fallocate,
500 501
};

502
const struct inode_operations ext4_file_inode_operations = {
503
	.setattr	= ext4_setattr,
D
David Howells 已提交
504
	.getattr	= ext4_file_getattr,
505
	.listxattr	= ext4_listxattr,
506
	.get_acl	= ext4_get_acl,
507
	.set_acl	= ext4_set_acl,
508
	.fiemap		= ext4_fiemap,
509 510
};