file.c 18.4 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/file.c
3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/file.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
15
 *  ext4 fs regular file handling primitives
16 17 18 19 20 21 22
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 */

#include <linux/time.h>
#include <linux/fs.h>
23 24
#include <linux/mount.h>
#include <linux/path.h>
25
#include <linux/dax.h>
26
#include <linux/quotaops.h>
27
#include <linux/pagevec.h>
28
#include <linux/uio.h>
29 30
#include "ext4.h"
#include "ext4_jbd2.h"
31 32 33
#include "xattr.h"
#include "acl.h"

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
#ifdef CONFIG_FS_DAX
static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

	inode_lock_shared(inode);
	/*
	 * Recheck under inode lock - at this point we are sure it cannot
	 * change anymore
	 */
	if (!IS_DAX(inode)) {
		inode_unlock_shared(inode);
		/* Fallback to buffered IO in case we cannot support DAX */
		return generic_file_read_iter(iocb, to);
	}
	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}
#endif

static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
60 61 62
	if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
		return -EIO;

63 64 65 66 67 68 69 70 71 72
	if (!iov_iter_count(to))
		return 0; /* skip atime */

#ifdef CONFIG_FS_DAX
	if (IS_DAX(file_inode(iocb->ki_filp)))
		return ext4_dax_read_iter(iocb, to);
#endif
	return generic_file_read_iter(iocb, to);
}

73 74
/*
 * Called when an inode is released. Note that this is different
75
 * from ext4_file_open: open gets called at every open, but release
76 77
 * gets called only when /all/ the files are closed.
 */
78
static int ext4_release_file(struct inode *inode, struct file *filp)
79
{
80
	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
81
		ext4_alloc_da_blocks(inode);
82
		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
83
	}
84 85
	/* if we are the last writer on the inode, drop the block reservation */
	if ((filp->f_mode & FMODE_WRITE) &&
86 87
			(atomic_read(&inode->i_writecount) == 1) &&
		        !EXT4_I(inode)->i_reserved_data_blocks)
88
	{
89
		down_write(&EXT4_I(inode)->i_data_sem);
90
		ext4_discard_preallocations(inode);
91
		up_write(&EXT4_I(inode)->i_data_sem);
92 93
	}
	if (is_dx(inode) && filp->private_data)
94
		ext4_htree_free_dir_info(filp->private_data);
95 96 97 98

	return 0;
}

99
static void ext4_unwritten_wait(struct inode *inode)
100 101 102
{
	wait_queue_head_t *wq = ext4_ioend_wq(inode);

103
	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
104 105 106 107 108 109 110 111 112 113 114 115
}

/*
 * This tests whether the IO in question is block-aligned or not.
 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 * are converted to written only after the IO is complete.  Until they are
 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 * threads are at work on the same unwritten block, they must be synchronized
 * or one thread will zero the other's data, causing corruption.
 */
static int
A
Al Viro 已提交
116
ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
117 118 119 120
{
	struct super_block *sb = inode->i_sb;
	int blockmask = sb->s_blocksize - 1;

121
	if (pos >= i_size_read(inode))
122 123
		return 0;

A
Al Viro 已提交
124
	if ((pos | iov_iter_alignment(from)) & blockmask)
125 126 127 128 129
		return 1;

	return 0;
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
/* Is IO overwriting allocated and initialized blocks? */
static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
{
	struct ext4_map_blocks map;
	unsigned int blkbits = inode->i_blkbits;
	int err, blklen;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = pos >> blkbits;
	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
	blklen = map.m_len;

	err = ext4_map_blocks(NULL, inode, &map, 0);
	/*
	 * 'err==len' means that all of the blocks have been preallocated,
	 * regardless of whether they have been initialized or not. To exclude
	 * unwritten extents, we need to check m_flags.
	 */
	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
}

static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

	ret = generic_write_checks(iocb, from);
	if (ret <= 0)
		return ret;
	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);

		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
			return -EFBIG;
		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
	}
	return iov_iter_count(from);
}

J
Jan Kara 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
#ifdef CONFIG_FS_DAX
static ssize_t
ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

	inode_lock(inode);
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;
	ret = file_remove_privs(iocb->ki_filp);
	if (ret)
		goto out;
	ret = file_update_time(iocb->ki_filp);
	if (ret)
		goto out;

	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
out:
C
Christoph Hellwig 已提交
195
	inode_unlock(inode);
J
Jan Kara 已提交
196 197 198 199 200 201
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
	return ret;
}
#endif

202
static ssize_t
A
Al Viro 已提交
203
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
204
{
205
	struct inode *inode = file_inode(iocb->ki_filp);
206
	int o_direct = iocb->ki_flags & IOCB_DIRECT;
207
	int unaligned_aio = 0;
208
	int overwrite = 0;
209
	ssize_t ret;
210

211 212 213
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

J
Jan Kara 已提交
214 215 216 217 218
#ifdef CONFIG_FS_DAX
	if (IS_DAX(inode))
		return ext4_dax_write_iter(iocb, from);
#endif

219
	inode_lock(inode);
220
	ret = ext4_write_checks(iocb, from);
221 222 223
	if (ret <= 0)
		goto out;

224
	/*
225 226 227
	 * Unaligned direct AIO must be serialized among each other as zeroing
	 * of partial blocks of two competing unaligned AIOs can result in data
	 * corruption.
228
	 */
229
	if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
230
	    !is_sync_kiocb(iocb) &&
231 232
	    ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
		unaligned_aio = 1;
233 234 235
		ext4_unwritten_wait(inode);
	}

236
	iocb->private = &overwrite;
237 238 239 240
	/* Check whether we do a DIO overwrite or not */
	if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
	    ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
		overwrite = 1;
241

A
Al Viro 已提交
242
	ret = __generic_file_write_iter(iocb, from);
A
Al Viro 已提交
243
	inode_unlock(inode);
244

245 246
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
247

248 249 250
	return ret;

out:
A
Al Viro 已提交
251
	inode_unlock(inode);
252
	return ret;
253 254
}

R
Ross Zwisler 已提交
255
#ifdef CONFIG_FS_DAX
256 257
static int ext4_dax_huge_fault(struct vm_fault *vmf,
		enum page_entry_size pe_size)
R
Ross Zwisler 已提交
258
{
259
	int result;
260
	handle_t *handle = NULL;
261
	struct inode *inode = file_inode(vmf->vma->vm_file);
262
	struct super_block *sb = inode->i_sb;
263 264 265 266
	bool write = vmf->flags & FAULT_FLAG_WRITE;

	if (write) {
		sb_start_pagefault(sb);
267
		file_update_time(vmf->vma->vm_file);
268 269 270 271 272
		down_read(&EXT4_I(inode)->i_mmap_sem);
		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
					       EXT4_DATA_TRANS_BLOCKS(sb));
	} else {
		down_read(&EXT4_I(inode)->i_mmap_sem);
J
Jan Kara 已提交
273
	}
274 275 276 277 278 279 280 281
	if (!IS_ERR(handle))
		result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
	else
		result = VM_FAULT_SIGBUS;
	if (write) {
		if (!IS_ERR(handle))
			ext4_journal_stop(handle);
		up_read(&EXT4_I(inode)->i_mmap_sem);
282
		sb_end_pagefault(sb);
283 284 285
	} else {
		up_read(&EXT4_I(inode)->i_mmap_sem);
	}
286 287

	return result;
R
Ross Zwisler 已提交
288 289
}

290 291 292 293 294
static int ext4_dax_fault(struct vm_fault *vmf)
{
	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
}

295
/*
296
 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
297 298 299 300 301 302 303
 * handler we check for races agaist truncate. Note that since we cycle through
 * i_mmap_sem, we are sure that also any hole punching that began before we
 * were called is finished by now and so if it included part of the file we
 * are working on, our pte will get unmapped and the check for pte_same() in
 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
 * desired.
 */
304
static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf)
305
{
306
	struct inode *inode = file_inode(vmf->vma->vm_file);
307 308
	struct super_block *sb = inode->i_sb;
	loff_t size;
309
	int ret;
310 311

	sb_start_pagefault(sb);
312
	file_update_time(vmf->vma->vm_file);
313 314 315 316
	down_read(&EXT4_I(inode)->i_mmap_sem);
	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (vmf->pgoff >= size)
		ret = VM_FAULT_SIGBUS;
317
	else
318
		ret = dax_pfn_mkwrite(vmf);
319 320 321 322
	up_read(&EXT4_I(inode)->i_mmap_sem);
	sb_end_pagefault(sb);

	return ret;
R
Ross Zwisler 已提交
323 324 325 326
}

static const struct vm_operations_struct ext4_dax_vm_ops = {
	.fault		= ext4_dax_fault,
327
	.huge_fault	= ext4_dax_huge_fault,
328
	.page_mkwrite	= ext4_dax_fault,
329
	.pfn_mkwrite	= ext4_dax_pfn_mkwrite,
R
Ross Zwisler 已提交
330 331 332 333 334
};
#else
#define ext4_dax_vm_ops	ext4_file_vm_ops
#endif

335
static const struct vm_operations_struct ext4_file_vm_ops = {
336
	.fault		= ext4_filemap_fault,
337
	.map_pages	= filemap_map_pages,
338 339 340 341 342
	.page_mkwrite   = ext4_page_mkwrite,
};

static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
343 344
	struct inode *inode = file->f_mapping->host;

345 346 347
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

348
	if (ext4_encrypted_inode(inode)) {
349
		int err = fscrypt_get_encryption_info(inode);
350 351
		if (err)
			return 0;
352
		if (!fscrypt_has_encryption_key(inode))
353
			return -ENOKEY;
354
	}
355
	file_accessed(file);
R
Ross Zwisler 已提交
356 357
	if (IS_DAX(file_inode(file))) {
		vma->vm_ops = &ext4_dax_vm_ops;
M
Matthew Wilcox 已提交
358
		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
R
Ross Zwisler 已提交
359 360 361
	} else {
		vma->vm_ops = &ext4_file_vm_ops;
	}
362 363 364
	return 0;
}

365 366 367 368 369
static int ext4_file_open(struct inode * inode, struct file * filp)
{
	struct super_block *sb = inode->i_sb;
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct vfsmount *mnt = filp->f_path.mnt;
370
	struct dentry *dir;
371 372
	struct path path;
	char buf[64], *cp;
373
	int ret;
374

375 376 377
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return -EIO;

378 379 380 381 382 383 384 385 386 387
	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
		     !(sb->s_flags & MS_RDONLY))) {
		sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
		/*
		 * Sample where the filesystem has been mounted and
		 * store it in the superblock for sysadmin convenience
		 * when trying to sort through large numbers of block
		 * devices or filesystem images.
		 */
		memset(buf, 0, sizeof(buf));
388 389
		path.mnt = mnt;
		path.dentry = mnt->mnt_root;
390 391
		cp = d_path(&path, buf, sizeof(buf));
		if (!IS_ERR(cp)) {
392 393 394
			handle_t *handle;
			int err;

395
			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
396 397
			if (IS_ERR(handle))
				return PTR_ERR(handle);
398
			BUFFER_TRACE(sbi->s_sbh, "get_write_access");
399 400 401 402 403
			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
			if (err) {
				ext4_journal_stop(handle);
				return err;
			}
404 405
			strlcpy(sbi->s_es->s_last_mounted, cp,
				sizeof(sbi->s_es->s_last_mounted));
406 407
			ext4_handle_dirty_super(handle, sb);
			ext4_journal_stop(handle);
408 409
		}
	}
410
	if (ext4_encrypted_inode(inode)) {
411
		ret = fscrypt_get_encryption_info(inode);
412 413
		if (ret)
			return -EACCES;
414
		if (!fscrypt_has_encryption_key(inode))
415 416
			return -ENOKEY;
	}
417

M
Miklos Szeredi 已提交
418
	dir = dget_parent(file_dentry(filp));
419
	if (ext4_encrypted_inode(d_inode(dir)) &&
420
			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
421
		ext4_warning(inode->i_sb,
422
			     "Inconsistent encryption contexts: %lu/%lu",
423
			     (unsigned long) d_inode(dir)->i_ino,
424
			     (unsigned long) inode->i_ino);
425
		dput(dir);
426 427
		return -EPERM;
	}
428
	dput(dir);
429 430 431 432
	/*
	 * Set up the jbd2_inode if we are opening the inode for
	 * writing and the journal is present
	 */
433
	if (filp->f_mode & FMODE_WRITE) {
434
		ret = ext4_inode_attach_jinode(inode);
435 436
		if (ret < 0)
			return ret;
437
	}
438
	return dquot_file_open(inode, filp);
439 440
}

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
/*
 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
 * file rather than ext4_ext_walk_space() because we can introduce
 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
 * function.  When extent status tree has been fully implemented, it will
 * track all extent status for a file and we can directly use it to
 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
 */

/*
 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
 * lookup page cache to check whether or not there has some data between
 * [startoff, endoff] because, if this range contains an unwritten extent,
 * we determine this extent as a data or a hole according to whether the
 * page cache has data or not.
 */
457 458
static int ext4_find_unwritten_pgoff(struct inode *inode,
				     int whence,
459
				     ext4_lblk_t end_blk,
460
				     loff_t *offset)
461 462
{
	struct pagevec pvec;
463
	unsigned int blkbits;
464 465
	pgoff_t index;
	pgoff_t end;
466
	loff_t endoff;
467 468 469 470
	loff_t startoff;
	loff_t lastoff;
	int found = 0;

471
	blkbits = inode->i_sb->s_blocksize_bits;
472 473
	startoff = *offset;
	lastoff = startoff;
474
	endoff = (loff_t)end_blk << blkbits;
475

476
	index = startoff >> PAGE_SHIFT;
477
	end = (endoff - 1) >> PAGE_SHIFT;
478 479 480 481 482 483

	pagevec_init(&pvec, 0);
	do {
		int i, num;
		unsigned long nr_pages;

484
		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
485 486
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
					  (pgoff_t)num);
J
Jan Kara 已提交
487
		if (nr_pages == 0)
488 489 490 491 492 493 494
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct buffer_head *bh, *head;

			/*
J
Jan Kara 已提交
495 496
			 * If current offset is smaller than the page offset,
			 * there is a hole at this offset.
497
			 */
J
Jan Kara 已提交
498 499
			if (whence == SEEK_HOLE && lastoff < endoff &&
			    lastoff < page_offset(pvec.pages[i])) {
500 501 502 503 504
				found = 1;
				*offset = lastoff;
				goto out;
			}

J
Jan Kara 已提交
505 506 507
			if (page->index > end)
				goto out;

508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
			lock_page(page);

			if (unlikely(page->mapping != inode->i_mapping)) {
				unlock_page(page);
				continue;
			}

			if (!page_has_buffers(page)) {
				unlock_page(page);
				continue;
			}

			if (page_has_buffers(page)) {
				lastoff = page_offset(page);
				bh = head = page_buffers(page);
				do {
					if (buffer_uptodate(bh) ||
					    buffer_unwritten(bh)) {
526
						if (whence == SEEK_DATA)
527 528
							found = 1;
					} else {
529
						if (whence == SEEK_HOLE)
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
							found = 1;
					}
					if (found) {
						*offset = max_t(loff_t,
							startoff, lastoff);
						unlock_page(page);
						goto out;
					}
					lastoff += bh->b_size;
					bh = bh->b_this_page;
				} while (bh != head);
			}

			lastoff = page_offset(page) + PAGE_SIZE;
			unlock_page(page);
		}

J
Jan Kara 已提交
547 548
		/* The no. of pages is less than our desired, we are done. */
		if (nr_pages < num)
549 550 551 552 553 554
			break;

		index = pvec.pages[i - 1]->index + 1;
		pagevec_release(&pvec);
	} while (index <= end);

J
Jan Kara 已提交
555 556 557 558
	if (whence == SEEK_HOLE && lastoff < endoff) {
		found = 1;
		*offset = lastoff;
	}
559 560 561 562 563 564 565 566 567 568 569
out:
	pagevec_release(&pvec);
	return found;
}

/*
 * ext4_seek_data() retrieves the offset for SEEK_DATA.
 */
static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
{
	struct inode *inode = file->f_mapping->host;
570 571 572 573
	struct extent_status es;
	ext4_lblk_t start, last, end;
	loff_t dataoff, isize;
	int blkbits;
574
	int ret;
575

A
Al Viro 已提交
576
	inode_lock(inode);
577 578 579

	isize = i_size_read(inode);
	if (offset >= isize) {
A
Al Viro 已提交
580
		inode_unlock(inode);
581 582
		return -ENXIO;
	}
583 584 585 586 587 588 589 590

	blkbits = inode->i_sb->s_blocksize_bits;
	start = offset >> blkbits;
	last = start;
	end = isize >> blkbits;
	dataoff = offset;

	do {
591 592 593 594 595 596 597
		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
		if (ret <= 0) {
			/* No extent found -> no data */
			if (ret == 0)
				ret = -ENXIO;
			inode_unlock(inode);
			return ret;
598
		}
599

600 601 602 603
		last = es.es_lblk;
		if (last != start)
			dataoff = (loff_t)last << blkbits;
		if (!ext4_es_is_unwritten(&es))
604 605
			break;

606 607 608 609 610
		/*
		 * If there is a unwritten extent at this offset,
		 * it will be as a data or a hole according to page
		 * cache that has data or not.
		 */
611 612 613 614
		if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
					      es.es_lblk + es.es_len, &dataoff))
			break;
		last += es.es_len;
615
		dataoff = (loff_t)last << blkbits;
616
		cond_resched();
617
	} while (last <= end);
618

A
Al Viro 已提交
619
	inode_unlock(inode);
620

621 622 623 624
	if (dataoff > isize)
		return -ENXIO;

	return vfs_setpos(file, dataoff, maxsize);
625 626 627
}

/*
628
 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
629 630 631 632
 */
static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
{
	struct inode *inode = file->f_mapping->host;
633 634 635 636
	struct extent_status es;
	ext4_lblk_t start, last, end;
	loff_t holeoff, isize;
	int blkbits;
637
	int ret;
638

A
Al Viro 已提交
639
	inode_lock(inode);
640 641 642

	isize = i_size_read(inode);
	if (offset >= isize) {
A
Al Viro 已提交
643
		inode_unlock(inode);
644 645 646
		return -ENXIO;
	}

647 648 649 650 651
	blkbits = inode->i_sb->s_blocksize_bits;
	start = offset >> blkbits;
	last = start;
	end = isize >> blkbits;
	holeoff = offset;
652

653
	do {
654 655 656 657
		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
		if (ret < 0) {
			inode_unlock(inode);
			return ret;
658
		}
659 660 661 662 663
		/* Found a hole? */
		if (ret == 0 || es.es_lblk > last) {
			if (last != start)
				holeoff = (loff_t)last << blkbits;
			break;
664 665 666 667 668 669
		}
		/*
		 * If there is a unwritten extent at this offset,
		 * it will be as a data or a hole according to page
		 * cache that has data or not.
		 */
670 671 672 673
		if (ext4_es_is_unwritten(&es) &&
		    ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
					      last + es.es_len, &holeoff))
			break;
674

675 676 677
		last += es.es_len;
		holeoff = (loff_t)last << blkbits;
		cond_resched();
678 679
	} while (last <= end);

A
Al Viro 已提交
680
	inode_unlock(inode);
681

682 683 684 685
	if (holeoff > isize)
		holeoff = isize;

	return vfs_setpos(file, holeoff, maxsize);
686 687
}

688
/*
689 690 691
 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 * by calling generic_file_llseek_size() with the appropriate maxbytes
 * value for each.
692
 */
693
loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
694 695 696 697 698 699 700 701 702
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes;

	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
	else
		maxbytes = inode->i_sb->s_maxbytes;

703
	switch (whence) {
704 705 706
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
707
		return generic_file_llseek_size(file, offset, whence,
708 709 710 711 712 713 714 715
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
		return ext4_seek_data(file, offset, maxbytes);
	case SEEK_HOLE:
		return ext4_seek_hole(file, offset, maxbytes);
	}

	return -EINVAL;
716 717
}

718
const struct file_operations ext4_file_operations = {
719
	.llseek		= ext4_llseek,
720
	.read_iter	= ext4_file_read_iter,
A
Al Viro 已提交
721
	.write_iter	= ext4_file_write_iter,
A
Andi Kleen 已提交
722
	.unlocked_ioctl = ext4_ioctl,
723
#ifdef CONFIG_COMPAT
724
	.compat_ioctl	= ext4_compat_ioctl,
725
#endif
726
	.mmap		= ext4_file_mmap,
727
	.open		= ext4_file_open,
728 729
	.release	= ext4_release_file,
	.fsync		= ext4_sync_file,
730
	.get_unmapped_area = thp_get_unmapped_area,
731
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
732
	.splice_write	= iter_file_splice_write,
733
	.fallocate	= ext4_fallocate,
734 735
};

736
const struct inode_operations ext4_file_inode_operations = {
737
	.setattr	= ext4_setattr,
D
David Howells 已提交
738
	.getattr	= ext4_file_getattr,
739
	.listxattr	= ext4_listxattr,
740
	.get_acl	= ext4_get_acl,
741
	.set_acl	= ext4_set_acl,
742
	.fiemap		= ext4_fiemap,
743 744
};