file.c 18.2 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/file.c
3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/file.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
15
 *  ext4 fs regular file handling primitives
16 17 18 19 20 21 22
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 */

#include <linux/time.h>
#include <linux/fs.h>
23 24
#include <linux/mount.h>
#include <linux/path.h>
25
#include <linux/dax.h>
26
#include <linux/quotaops.h>
27
#include <linux/pagevec.h>
28
#include <linux/uio.h>
29 30
#include "ext4.h"
#include "ext4_jbd2.h"
31 32 33
#include "xattr.h"
#include "acl.h"

34 35 36 37 38 39
#ifdef CONFIG_FS_DAX
static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

G
Goldwyn Rodrigues 已提交
40 41 42 43 44
	if (!inode_trylock_shared(inode)) {
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
		inode_lock_shared(inode);
	}
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
	/*
	 * Recheck under inode lock - at this point we are sure it cannot
	 * change anymore
	 */
	if (!IS_DAX(inode)) {
		inode_unlock_shared(inode);
		/* Fallback to buffered IO in case we cannot support DAX */
		return generic_file_read_iter(iocb, to);
	}
	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}
#endif

static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
64 65 66
	if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
		return -EIO;

67 68 69 70 71 72 73 74 75 76
	if (!iov_iter_count(to))
		return 0; /* skip atime */

#ifdef CONFIG_FS_DAX
	if (IS_DAX(file_inode(iocb->ki_filp)))
		return ext4_dax_read_iter(iocb, to);
#endif
	return generic_file_read_iter(iocb, to);
}

77 78
/*
 * Called when an inode is released. Note that this is different
79
 * from ext4_file_open: open gets called at every open, but release
80 81
 * gets called only when /all/ the files are closed.
 */
82
static int ext4_release_file(struct inode *inode, struct file *filp)
83
{
84
	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
85
		ext4_alloc_da_blocks(inode);
86
		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
87
	}
88 89
	/* if we are the last writer on the inode, drop the block reservation */
	if ((filp->f_mode & FMODE_WRITE) &&
90 91
			(atomic_read(&inode->i_writecount) == 1) &&
		        !EXT4_I(inode)->i_reserved_data_blocks)
92
	{
93
		down_write(&EXT4_I(inode)->i_data_sem);
94
		ext4_discard_preallocations(inode);
95
		up_write(&EXT4_I(inode)->i_data_sem);
96 97
	}
	if (is_dx(inode) && filp->private_data)
98
		ext4_htree_free_dir_info(filp->private_data);
99 100 101 102

	return 0;
}

103
static void ext4_unwritten_wait(struct inode *inode)
104 105 106
{
	wait_queue_head_t *wq = ext4_ioend_wq(inode);

107
	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
108 109 110 111 112 113 114 115 116 117 118 119
}

/*
 * This tests whether the IO in question is block-aligned or not.
 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 * are converted to written only after the IO is complete.  Until they are
 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 * threads are at work on the same unwritten block, they must be synchronized
 * or one thread will zero the other's data, causing corruption.
 */
static int
A
Al Viro 已提交
120
ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
121 122 123 124
{
	struct super_block *sb = inode->i_sb;
	int blockmask = sb->s_blocksize - 1;

125
	if (pos >= i_size_read(inode))
126 127
		return 0;

A
Al Viro 已提交
128
	if ((pos | iov_iter_alignment(from)) & blockmask)
129 130 131 132 133
		return 1;

	return 0;
}

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
/* Is IO overwriting allocated and initialized blocks? */
static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
{
	struct ext4_map_blocks map;
	unsigned int blkbits = inode->i_blkbits;
	int err, blklen;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = pos >> blkbits;
	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
	blklen = map.m_len;

	err = ext4_map_blocks(NULL, inode, &map, 0);
	/*
	 * 'err==len' means that all of the blocks have been preallocated,
	 * regardless of whether they have been initialized or not. To exclude
	 * unwritten extents, we need to check m_flags.
	 */
	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
}

static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

	ret = generic_write_checks(iocb, from);
	if (ret <= 0)
		return ret;
	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);

		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
			return -EFBIG;
		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
	}
	return iov_iter_count(from);
}

J
Jan Kara 已提交
179 180 181 182 183 184 185
#ifdef CONFIG_FS_DAX
static ssize_t
ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

G
Goldwyn Rodrigues 已提交
186 187 188 189 190
	if (!inode_trylock(inode)) {
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
		inode_lock(inode);
	}
J
Jan Kara 已提交
191 192 193 194 195 196 197 198 199 200 201 202
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;
	ret = file_remove_privs(iocb->ki_filp);
	if (ret)
		goto out;
	ret = file_update_time(iocb->ki_filp);
	if (ret)
		goto out;

	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
out:
C
Christoph Hellwig 已提交
203
	inode_unlock(inode);
J
Jan Kara 已提交
204 205 206 207 208 209
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
	return ret;
}
#endif

210
static ssize_t
A
Al Viro 已提交
211
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
212
{
213
	struct inode *inode = file_inode(iocb->ki_filp);
214
	int o_direct = iocb->ki_flags & IOCB_DIRECT;
215
	int unaligned_aio = 0;
216
	int overwrite = 0;
217
	ssize_t ret;
218

219 220 221
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

J
Jan Kara 已提交
222 223 224 225
#ifdef CONFIG_FS_DAX
	if (IS_DAX(inode))
		return ext4_dax_write_iter(iocb, from);
#endif
226 227
	if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT))
		return -EOPNOTSUPP;
J
Jan Kara 已提交
228

G
Goldwyn Rodrigues 已提交
229 230 231 232 233 234
	if (!inode_trylock(inode)) {
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
		inode_lock(inode);
	}

235
	ret = ext4_write_checks(iocb, from);
236 237 238
	if (ret <= 0)
		goto out;

239
	/*
240 241 242
	 * Unaligned direct AIO must be serialized among each other as zeroing
	 * of partial blocks of two competing unaligned AIOs can result in data
	 * corruption.
243
	 */
244
	if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
245
	    !is_sync_kiocb(iocb) &&
246 247
	    ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
		unaligned_aio = 1;
248 249 250
		ext4_unwritten_wait(inode);
	}

251
	iocb->private = &overwrite;
252
	/* Check whether we do a DIO overwrite or not */
G
Goldwyn Rodrigues 已提交
253 254 255 256 257 258 259 260 261
	if (o_direct && !unaligned_aio) {
		if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
			if (ext4_should_dioread_nolock(inode))
				overwrite = 1;
		} else if (iocb->ki_flags & IOCB_NOWAIT) {
			ret = -EAGAIN;
			goto out;
		}
	}
262

A
Al Viro 已提交
263
	ret = __generic_file_write_iter(iocb, from);
A
Al Viro 已提交
264
	inode_unlock(inode);
265

266 267
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
268

269 270 271
	return ret;

out:
A
Al Viro 已提交
272
	inode_unlock(inode);
273
	return ret;
274 275
}

R
Ross Zwisler 已提交
276
#ifdef CONFIG_FS_DAX
277 278
static int ext4_dax_huge_fault(struct vm_fault *vmf,
		enum page_entry_size pe_size)
R
Ross Zwisler 已提交
279
{
280
	int result;
281
	handle_t *handle = NULL;
282
	struct inode *inode = file_inode(vmf->vma->vm_file);
283
	struct super_block *sb = inode->i_sb;
284 285 286 287 288 289 290 291 292 293 294 295 296 297

	/*
	 * We have to distinguish real writes from writes which will result in a
	 * COW page; COW writes should *not* poke the journal (the file will not
	 * be changed). Doing so would cause unintended failures when mounted
	 * read-only.
	 *
	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
	 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
	 * we eventually come back with a COW page.
	 */
	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
		(vmf->vma->vm_flags & VM_SHARED);
298 299 300

	if (write) {
		sb_start_pagefault(sb);
301
		file_update_time(vmf->vma->vm_file);
302 303 304 305 306
		down_read(&EXT4_I(inode)->i_mmap_sem);
		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
					       EXT4_DATA_TRANS_BLOCKS(sb));
	} else {
		down_read(&EXT4_I(inode)->i_mmap_sem);
J
Jan Kara 已提交
307
	}
308
	if (!IS_ERR(handle))
309
		result = dax_iomap_fault(vmf, pe_size, NULL, &ext4_iomap_ops);
310 311 312 313 314 315
	else
		result = VM_FAULT_SIGBUS;
	if (write) {
		if (!IS_ERR(handle))
			ext4_journal_stop(handle);
		up_read(&EXT4_I(inode)->i_mmap_sem);
316
		sb_end_pagefault(sb);
317 318 319
	} else {
		up_read(&EXT4_I(inode)->i_mmap_sem);
	}
320 321

	return result;
R
Ross Zwisler 已提交
322 323
}

324 325 326 327 328
static int ext4_dax_fault(struct vm_fault *vmf)
{
	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
}

R
Ross Zwisler 已提交
329 330
static const struct vm_operations_struct ext4_dax_vm_ops = {
	.fault		= ext4_dax_fault,
331
	.huge_fault	= ext4_dax_huge_fault,
332
	.page_mkwrite	= ext4_dax_fault,
333
	.pfn_mkwrite	= ext4_dax_fault,
R
Ross Zwisler 已提交
334 335 336 337 338
};
#else
#define ext4_dax_vm_ops	ext4_file_vm_ops
#endif

339
static const struct vm_operations_struct ext4_file_vm_ops = {
340
	.fault		= ext4_filemap_fault,
341
	.map_pages	= filemap_map_pages,
342 343 344 345 346
	.page_mkwrite   = ext4_page_mkwrite,
};

static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
347 348
	struct inode *inode = file->f_mapping->host;

349 350 351
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

352
	file_accessed(file);
R
Ross Zwisler 已提交
353 354
	if (IS_DAX(file_inode(file))) {
		vma->vm_ops = &ext4_dax_vm_ops;
M
Matthew Wilcox 已提交
355
		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
R
Ross Zwisler 已提交
356 357 358
	} else {
		vma->vm_ops = &ext4_file_vm_ops;
	}
359 360 361
	return 0;
}

362 363 364 365 366
static int ext4_file_open(struct inode * inode, struct file * filp)
{
	struct super_block *sb = inode->i_sb;
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct vfsmount *mnt = filp->f_path.mnt;
367
	struct dentry *dir;
368 369
	struct path path;
	char buf[64], *cp;
370
	int ret;
371

372 373 374
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

375
	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
376
		     !sb_rdonly(sb))) {
377 378 379 380 381 382 383 384
		sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
		/*
		 * Sample where the filesystem has been mounted and
		 * store it in the superblock for sysadmin convenience
		 * when trying to sort through large numbers of block
		 * devices or filesystem images.
		 */
		memset(buf, 0, sizeof(buf));
385 386
		path.mnt = mnt;
		path.dentry = mnt->mnt_root;
387 388
		cp = d_path(&path, buf, sizeof(buf));
		if (!IS_ERR(cp)) {
389 390 391
			handle_t *handle;
			int err;

392
			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
393 394
			if (IS_ERR(handle))
				return PTR_ERR(handle);
395
			BUFFER_TRACE(sbi->s_sbh, "get_write_access");
396 397 398 399 400
			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
			if (err) {
				ext4_journal_stop(handle);
				return err;
			}
401 402
			strlcpy(sbi->s_es->s_last_mounted, cp,
				sizeof(sbi->s_es->s_last_mounted));
403 404
			ext4_handle_dirty_super(handle, sb);
			ext4_journal_stop(handle);
405 406
		}
	}
407
	if (ext4_encrypted_inode(inode)) {
408
		ret = fscrypt_get_encryption_info(inode);
409 410
		if (ret)
			return -EACCES;
411
		if (!fscrypt_has_encryption_key(inode))
412 413
			return -ENOKEY;
	}
414

M
Miklos Szeredi 已提交
415
	dir = dget_parent(file_dentry(filp));
416
	if (ext4_encrypted_inode(d_inode(dir)) &&
417
			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
418
		ext4_warning(inode->i_sb,
419
			     "Inconsistent encryption contexts: %lu/%lu",
420
			     (unsigned long) d_inode(dir)->i_ino,
421
			     (unsigned long) inode->i_ino);
422
		dput(dir);
423 424
		return -EPERM;
	}
425
	dput(dir);
426 427 428 429
	/*
	 * Set up the jbd2_inode if we are opening the inode for
	 * writing and the journal is present
	 */
430
	if (filp->f_mode & FMODE_WRITE) {
431
		ret = ext4_inode_attach_jinode(inode);
432 433
		if (ret < 0)
			return ret;
434
	}
G
Goldwyn Rodrigues 已提交
435

436
	filp->f_mode |= FMODE_NOWAIT;
437
	return dquot_file_open(inode, filp);
438 439
}

440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
/*
 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
 * file rather than ext4_ext_walk_space() because we can introduce
 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
 * function.  When extent status tree has been fully implemented, it will
 * track all extent status for a file and we can directly use it to
 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
 */

/*
 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
 * lookup page cache to check whether or not there has some data between
 * [startoff, endoff] because, if this range contains an unwritten extent,
 * we determine this extent as a data or a hole according to whether the
 * page cache has data or not.
 */
456 457
static int ext4_find_unwritten_pgoff(struct inode *inode,
				     int whence,
458
				     ext4_lblk_t end_blk,
459
				     loff_t *offset)
460 461
{
	struct pagevec pvec;
462
	unsigned int blkbits;
463 464
	pgoff_t index;
	pgoff_t end;
465
	loff_t endoff;
466 467 468 469
	loff_t startoff;
	loff_t lastoff;
	int found = 0;

470
	blkbits = inode->i_sb->s_blocksize_bits;
471 472
	startoff = *offset;
	lastoff = startoff;
473
	endoff = (loff_t)end_blk << blkbits;
474

475
	index = startoff >> PAGE_SHIFT;
476
	end = (endoff - 1) >> PAGE_SHIFT;
477 478 479

	pagevec_init(&pvec, 0);
	do {
480
		int i;
481 482
		unsigned long nr_pages;

483
		nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
484
					&index, end);
J
Jan Kara 已提交
485
		if (nr_pages == 0)
486 487 488 489 490 491 492
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct buffer_head *bh, *head;

			/*
J
Jan Kara 已提交
493 494
			 * If current offset is smaller than the page offset,
			 * there is a hole at this offset.
495
			 */
J
Jan Kara 已提交
496 497
			if (whence == SEEK_HOLE && lastoff < endoff &&
			    lastoff < page_offset(pvec.pages[i])) {
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
				found = 1;
				*offset = lastoff;
				goto out;
			}

			lock_page(page);

			if (unlikely(page->mapping != inode->i_mapping)) {
				unlock_page(page);
				continue;
			}

			if (!page_has_buffers(page)) {
				unlock_page(page);
				continue;
			}

			if (page_has_buffers(page)) {
				lastoff = page_offset(page);
				bh = head = page_buffers(page);
				do {
519 520
					if (lastoff + bh->b_size <= startoff)
						goto next;
521 522
					if (buffer_uptodate(bh) ||
					    buffer_unwritten(bh)) {
523
						if (whence == SEEK_DATA)
524 525
							found = 1;
					} else {
526
						if (whence == SEEK_HOLE)
527 528 529 530 531 532 533 534
							found = 1;
					}
					if (found) {
						*offset = max_t(loff_t,
							startoff, lastoff);
						unlock_page(page);
						goto out;
					}
535
next:
536 537 538 539 540 541 542 543 544 545 546 547
					lastoff += bh->b_size;
					bh = bh->b_this_page;
				} while (bh != head);
			}

			lastoff = page_offset(page) + PAGE_SIZE;
			unlock_page(page);
		}

		pagevec_release(&pvec);
	} while (index <= end);

548
	/* There are no pages upto endoff - that would be a hole in there. */
J
Jan Kara 已提交
549 550 551 552
	if (whence == SEEK_HOLE && lastoff < endoff) {
		found = 1;
		*offset = lastoff;
	}
553 554 555 556 557 558 559 560 561 562 563
out:
	pagevec_release(&pvec);
	return found;
}

/*
 * ext4_seek_data() retrieves the offset for SEEK_DATA.
 */
static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
{
	struct inode *inode = file->f_mapping->host;
564 565 566 567
	struct extent_status es;
	ext4_lblk_t start, last, end;
	loff_t dataoff, isize;
	int blkbits;
568
	int ret;
569

A
Al Viro 已提交
570
	inode_lock(inode);
571 572

	isize = i_size_read(inode);
573
	if (offset < 0 || offset >= isize) {
A
Al Viro 已提交
574
		inode_unlock(inode);
575 576
		return -ENXIO;
	}
577 578 579 580 581 582 583 584

	blkbits = inode->i_sb->s_blocksize_bits;
	start = offset >> blkbits;
	last = start;
	end = isize >> blkbits;
	dataoff = offset;

	do {
585 586 587 588 589 590 591
		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
		if (ret <= 0) {
			/* No extent found -> no data */
			if (ret == 0)
				ret = -ENXIO;
			inode_unlock(inode);
			return ret;
592
		}
593

594 595 596 597
		last = es.es_lblk;
		if (last != start)
			dataoff = (loff_t)last << blkbits;
		if (!ext4_es_is_unwritten(&es))
598 599
			break;

600 601 602 603 604
		/*
		 * If there is a unwritten extent at this offset,
		 * it will be as a data or a hole according to page
		 * cache that has data or not.
		 */
605 606 607 608
		if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
					      es.es_lblk + es.es_len, &dataoff))
			break;
		last += es.es_len;
609
		dataoff = (loff_t)last << blkbits;
610
		cond_resched();
611
	} while (last <= end);
612

A
Al Viro 已提交
613
	inode_unlock(inode);
614

615 616 617 618
	if (dataoff > isize)
		return -ENXIO;

	return vfs_setpos(file, dataoff, maxsize);
619 620 621
}

/*
622
 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
623 624 625 626
 */
static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
{
	struct inode *inode = file->f_mapping->host;
627 628 629 630
	struct extent_status es;
	ext4_lblk_t start, last, end;
	loff_t holeoff, isize;
	int blkbits;
631
	int ret;
632

A
Al Viro 已提交
633
	inode_lock(inode);
634 635

	isize = i_size_read(inode);
636
	if (offset < 0 || offset >= isize) {
A
Al Viro 已提交
637
		inode_unlock(inode);
638 639 640
		return -ENXIO;
	}

641 642 643 644 645
	blkbits = inode->i_sb->s_blocksize_bits;
	start = offset >> blkbits;
	last = start;
	end = isize >> blkbits;
	holeoff = offset;
646

647
	do {
648 649 650 651
		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
		if (ret < 0) {
			inode_unlock(inode);
			return ret;
652
		}
653 654 655 656 657
		/* Found a hole? */
		if (ret == 0 || es.es_lblk > last) {
			if (last != start)
				holeoff = (loff_t)last << blkbits;
			break;
658 659 660 661 662 663
		}
		/*
		 * If there is a unwritten extent at this offset,
		 * it will be as a data or a hole according to page
		 * cache that has data or not.
		 */
664 665 666 667
		if (ext4_es_is_unwritten(&es) &&
		    ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
					      last + es.es_len, &holeoff))
			break;
668

669 670 671
		last += es.es_len;
		holeoff = (loff_t)last << blkbits;
		cond_resched();
672 673
	} while (last <= end);

A
Al Viro 已提交
674
	inode_unlock(inode);
675

676 677 678 679
	if (holeoff > isize)
		holeoff = isize;

	return vfs_setpos(file, holeoff, maxsize);
680 681
}

682
/*
683 684 685
 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 * by calling generic_file_llseek_size() with the appropriate maxbytes
 * value for each.
686
 */
687
loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
688 689 690 691 692 693 694 695 696
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes;

	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
	else
		maxbytes = inode->i_sb->s_maxbytes;

697
	switch (whence) {
698 699 700
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
701
		return generic_file_llseek_size(file, offset, whence,
702 703 704 705 706 707 708 709
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
		return ext4_seek_data(file, offset, maxbytes);
	case SEEK_HOLE:
		return ext4_seek_hole(file, offset, maxbytes);
	}

	return -EINVAL;
710 711
}

712
const struct file_operations ext4_file_operations = {
713
	.llseek		= ext4_llseek,
714
	.read_iter	= ext4_file_read_iter,
A
Al Viro 已提交
715
	.write_iter	= ext4_file_write_iter,
A
Andi Kleen 已提交
716
	.unlocked_ioctl = ext4_ioctl,
717
#ifdef CONFIG_COMPAT
718
	.compat_ioctl	= ext4_compat_ioctl,
719
#endif
720
	.mmap		= ext4_file_mmap,
721
	.open		= ext4_file_open,
722 723
	.release	= ext4_release_file,
	.fsync		= ext4_sync_file,
724
	.get_unmapped_area = thp_get_unmapped_area,
725
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
726
	.splice_write	= iter_file_splice_write,
727
	.fallocate	= ext4_fallocate,
728 729
};

730
const struct inode_operations ext4_file_inode_operations = {
731
	.setattr	= ext4_setattr,
D
David Howells 已提交
732
	.getattr	= ext4_file_getattr,
733
	.listxattr	= ext4_listxattr,
734
	.get_acl	= ext4_get_acl,
735
	.set_acl	= ext4_set_acl,
736
	.fiemap		= ext4_fiemap,
737 738
};