You need to sign in or sign up before continuing.
file.c 18.9 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/file.c
3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/file.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
15
 *  ext4 fs regular file handling primitives
16 17 18 19 20 21 22
 *
 *  64-bit file support on 64-bit platforms by Jakub Jelinek
 *	(jj@sunsite.ms.mff.cuni.cz)
 */

#include <linux/time.h>
#include <linux/fs.h>
23 24
#include <linux/mount.h>
#include <linux/path.h>
25
#include <linux/dax.h>
26
#include <linux/quotaops.h>
27
#include <linux/pagevec.h>
28
#include <linux/uio.h>
29 30
#include "ext4.h"
#include "ext4_jbd2.h"
31 32 33
#include "xattr.h"
#include "acl.h"

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
#ifdef CONFIG_FS_DAX
static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

	inode_lock_shared(inode);
	/*
	 * Recheck under inode lock - at this point we are sure it cannot
	 * change anymore
	 */
	if (!IS_DAX(inode)) {
		inode_unlock_shared(inode);
		/* Fallback to buffered IO in case we cannot support DAX */
		return generic_file_read_iter(iocb, to);
	}
	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
	inode_unlock_shared(inode);

	file_accessed(iocb->ki_filp);
	return ret;
}
#endif

static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	if (!iov_iter_count(to))
		return 0; /* skip atime */

#ifdef CONFIG_FS_DAX
	if (IS_DAX(file_inode(iocb->ki_filp)))
		return ext4_dax_read_iter(iocb, to);
#endif
	return generic_file_read_iter(iocb, to);
}

70 71
/*
 * Called when an inode is released. Note that this is different
72
 * from ext4_file_open: open gets called at every open, but release
73 74
 * gets called only when /all/ the files are closed.
 */
75
static int ext4_release_file(struct inode *inode, struct file *filp)
76
{
77
	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
78
		ext4_alloc_da_blocks(inode);
79
		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
80
	}
81 82
	/* if we are the last writer on the inode, drop the block reservation */
	if ((filp->f_mode & FMODE_WRITE) &&
83 84
			(atomic_read(&inode->i_writecount) == 1) &&
		        !EXT4_I(inode)->i_reserved_data_blocks)
85
	{
86
		down_write(&EXT4_I(inode)->i_data_sem);
87
		ext4_discard_preallocations(inode);
88
		up_write(&EXT4_I(inode)->i_data_sem);
89 90
	}
	if (is_dx(inode) && filp->private_data)
91
		ext4_htree_free_dir_info(filp->private_data);
92 93 94 95

	return 0;
}

96
static void ext4_unwritten_wait(struct inode *inode)
97 98 99
{
	wait_queue_head_t *wq = ext4_ioend_wq(inode);

100
	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
101 102 103 104 105 106 107 108 109 110 111 112
}

/*
 * This tests whether the IO in question is block-aligned or not.
 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 * are converted to written only after the IO is complete.  Until they are
 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 * it needs to zero out portions of the start and/or end block.  If 2 AIO
 * threads are at work on the same unwritten block, they must be synchronized
 * or one thread will zero the other's data, causing corruption.
 */
static int
A
Al Viro 已提交
113
ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
114 115 116 117
{
	struct super_block *sb = inode->i_sb;
	int blockmask = sb->s_blocksize - 1;

118
	if (pos >= i_size_read(inode))
119 120
		return 0;

A
Al Viro 已提交
121
	if ((pos | iov_iter_alignment(from)) & blockmask)
122 123 124 125 126
		return 1;

	return 0;
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
/* Is IO overwriting allocated and initialized blocks? */
static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
{
	struct ext4_map_blocks map;
	unsigned int blkbits = inode->i_blkbits;
	int err, blklen;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = pos >> blkbits;
	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
	blklen = map.m_len;

	err = ext4_map_blocks(NULL, inode, &map, 0);
	/*
	 * 'err==len' means that all of the blocks have been preallocated,
	 * regardless of whether they have been initialized or not. To exclude
	 * unwritten extents, we need to check m_flags.
	 */
	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
}

static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;

	ret = generic_write_checks(iocb, from);
	if (ret <= 0)
		return ret;
	/*
	 * If we have encountered a bitmap-format file, the size limit
	 * is smaller than s_maxbytes, which is for extent-mapped files.
	 */
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);

		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
			return -EFBIG;
		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
	}
	return iov_iter_count(from);
}

J
Jan Kara 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
#ifdef CONFIG_FS_DAX
static ssize_t
ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t ret;
	bool overwrite = false;

	inode_lock(inode);
	ret = ext4_write_checks(iocb, from);
	if (ret <= 0)
		goto out;
	ret = file_remove_privs(iocb->ki_filp);
	if (ret)
		goto out;
	ret = file_update_time(iocb->ki_filp);
	if (ret)
		goto out;

	if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
		overwrite = true;
		downgrade_write(&inode->i_rwsem);
	}
	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
out:
	if (!overwrite)
		inode_unlock(inode);
	else
		inode_unlock_shared(inode);
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
	return ret;
}
#endif

207
static ssize_t
A
Al Viro 已提交
208
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
209
{
210
	struct inode *inode = file_inode(iocb->ki_filp);
211
	int o_direct = iocb->ki_flags & IOCB_DIRECT;
212
	int unaligned_aio = 0;
213
	int overwrite = 0;
214
	ssize_t ret;
215

J
Jan Kara 已提交
216 217 218 219 220
#ifdef CONFIG_FS_DAX
	if (IS_DAX(inode))
		return ext4_dax_write_iter(iocb, from);
#endif

221
	inode_lock(inode);
222
	ret = ext4_write_checks(iocb, from);
223 224 225
	if (ret <= 0)
		goto out;

226
	/*
227 228 229
	 * Unaligned direct AIO must be serialized among each other as zeroing
	 * of partial blocks of two competing unaligned AIOs can result in data
	 * corruption.
230
	 */
231
	if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
232
	    !is_sync_kiocb(iocb) &&
233 234
	    ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
		unaligned_aio = 1;
235 236 237
		ext4_unwritten_wait(inode);
	}

238
	iocb->private = &overwrite;
239 240 241 242
	/* Check whether we do a DIO overwrite or not */
	if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
	    ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
		overwrite = 1;
243

A
Al Viro 已提交
244
	ret = __generic_file_write_iter(iocb, from);
A
Al Viro 已提交
245
	inode_unlock(inode);
246

247 248
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
249

250 251 252
	return ret;

out:
A
Al Viro 已提交
253
	inode_unlock(inode);
254
	return ret;
255 256
}

R
Ross Zwisler 已提交
257 258 259
#ifdef CONFIG_FS_DAX
static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
260
	int result;
261 262
	struct inode *inode = file_inode(vma->vm_file);
	struct super_block *sb = inode->i_sb;
263 264 265 266 267
	bool write = vmf->flags & FAULT_FLAG_WRITE;

	if (write) {
		sb_start_pagefault(sb);
		file_update_time(vma->vm_file);
J
Jan Kara 已提交
268 269 270 271 272
	}
	down_read(&EXT4_I(inode)->i_mmap_sem);
	result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
	up_read(&EXT4_I(inode)->i_mmap_sem);
	if (write)
273 274 275
		sb_end_pagefault(sb);

	return result;
R
Ross Zwisler 已提交
276 277
}

M
Matthew Wilcox 已提交
278 279 280
static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
						pmd_t *pmd, unsigned int flags)
{
281 282 283 284 285 286 287 288
	int result;
	struct inode *inode = file_inode(vma->vm_file);
	struct super_block *sb = inode->i_sb;
	bool write = flags & FAULT_FLAG_WRITE;

	if (write) {
		sb_start_pagefault(sb);
		file_update_time(vma->vm_file);
289
	}
J
Jan Kara 已提交
290 291 292 293 294
	down_read(&EXT4_I(inode)->i_mmap_sem);
	result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
				     &ext4_iomap_ops);
	up_read(&EXT4_I(inode)->i_mmap_sem);
	if (write)
295 296 297
		sb_end_pagefault(sb);

	return result;
M
Matthew Wilcox 已提交
298 299
}

300
/*
301
 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
302 303 304 305 306 307 308 309 310 311 312 313 314
 * handler we check for races agaist truncate. Note that since we cycle through
 * i_mmap_sem, we are sure that also any hole punching that began before we
 * were called is finished by now and so if it included part of the file we
 * are working on, our pte will get unmapped and the check for pte_same() in
 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
 * desired.
 */
static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
				struct vm_fault *vmf)
{
	struct inode *inode = file_inode(vma->vm_file);
	struct super_block *sb = inode->i_sb;
	loff_t size;
315
	int ret;
316 317 318 319 320 321 322

	sb_start_pagefault(sb);
	file_update_time(vma->vm_file);
	down_read(&EXT4_I(inode)->i_mmap_sem);
	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (vmf->pgoff >= size)
		ret = VM_FAULT_SIGBUS;
323 324
	else
		ret = dax_pfn_mkwrite(vma, vmf);
325 326 327 328
	up_read(&EXT4_I(inode)->i_mmap_sem);
	sb_end_pagefault(sb);

	return ret;
R
Ross Zwisler 已提交
329 330 331 332
}

static const struct vm_operations_struct ext4_dax_vm_ops = {
	.fault		= ext4_dax_fault,
M
Matthew Wilcox 已提交
333
	.pmd_fault	= ext4_dax_pmd_fault,
334
	.page_mkwrite	= ext4_dax_fault,
335
	.pfn_mkwrite	= ext4_dax_pfn_mkwrite,
R
Ross Zwisler 已提交
336 337 338 339 340
};
#else
#define ext4_dax_vm_ops	ext4_file_vm_ops
#endif

341
static const struct vm_operations_struct ext4_file_vm_ops = {
342
	.fault		= ext4_filemap_fault,
343
	.map_pages	= filemap_map_pages,
344 345 346 347 348
	.page_mkwrite   = ext4_page_mkwrite,
};

static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
349 350 351
	struct inode *inode = file->f_mapping->host;

	if (ext4_encrypted_inode(inode)) {
352
		int err = fscrypt_get_encryption_info(inode);
353 354
		if (err)
			return 0;
355
		if (!fscrypt_has_encryption_key(inode))
356
			return -ENOKEY;
357
	}
358
	file_accessed(file);
R
Ross Zwisler 已提交
359 360
	if (IS_DAX(file_inode(file))) {
		vma->vm_ops = &ext4_dax_vm_ops;
M
Matthew Wilcox 已提交
361
		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
R
Ross Zwisler 已提交
362 363 364
	} else {
		vma->vm_ops = &ext4_file_vm_ops;
	}
365 366 367
	return 0;
}

368 369 370 371 372
static int ext4_file_open(struct inode * inode, struct file * filp)
{
	struct super_block *sb = inode->i_sb;
	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
	struct vfsmount *mnt = filp->f_path.mnt;
373
	struct dentry *dir;
374 375
	struct path path;
	char buf[64], *cp;
376
	int ret;
377 378 379 380 381 382 383 384 385 386 387

	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
		     !(sb->s_flags & MS_RDONLY))) {
		sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
		/*
		 * Sample where the filesystem has been mounted and
		 * store it in the superblock for sysadmin convenience
		 * when trying to sort through large numbers of block
		 * devices or filesystem images.
		 */
		memset(buf, 0, sizeof(buf));
388 389
		path.mnt = mnt;
		path.dentry = mnt->mnt_root;
390 391
		cp = d_path(&path, buf, sizeof(buf));
		if (!IS_ERR(cp)) {
392 393 394
			handle_t *handle;
			int err;

395
			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
396 397
			if (IS_ERR(handle))
				return PTR_ERR(handle);
398
			BUFFER_TRACE(sbi->s_sbh, "get_write_access");
399 400 401 402 403
			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
			if (err) {
				ext4_journal_stop(handle);
				return err;
			}
404 405
			strlcpy(sbi->s_es->s_last_mounted, cp,
				sizeof(sbi->s_es->s_last_mounted));
406 407
			ext4_handle_dirty_super(handle, sb);
			ext4_journal_stop(handle);
408 409
		}
	}
410
	if (ext4_encrypted_inode(inode)) {
411
		ret = fscrypt_get_encryption_info(inode);
412 413
		if (ret)
			return -EACCES;
414
		if (!fscrypt_has_encryption_key(inode))
415 416
			return -ENOKEY;
	}
417

M
Miklos Szeredi 已提交
418
	dir = dget_parent(file_dentry(filp));
419
	if (ext4_encrypted_inode(d_inode(dir)) &&
420
			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
421
		ext4_warning(inode->i_sb,
422
			     "Inconsistent encryption contexts: %lu/%lu",
423
			     (unsigned long) d_inode(dir)->i_ino,
424
			     (unsigned long) inode->i_ino);
425
		dput(dir);
426 427
		return -EPERM;
	}
428
	dput(dir);
429 430 431 432
	/*
	 * Set up the jbd2_inode if we are opening the inode for
	 * writing and the journal is present
	 */
433
	if (filp->f_mode & FMODE_WRITE) {
434
		ret = ext4_inode_attach_jinode(inode);
435 436
		if (ret < 0)
			return ret;
437
	}
438
	return dquot_file_open(inode, filp);
439 440
}

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
/*
 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
 * file rather than ext4_ext_walk_space() because we can introduce
 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
 * function.  When extent status tree has been fully implemented, it will
 * track all extent status for a file and we can directly use it to
 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
 */

/*
 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
 * lookup page cache to check whether or not there has some data between
 * [startoff, endoff] because, if this range contains an unwritten extent,
 * we determine this extent as a data or a hole according to whether the
 * page cache has data or not.
 */
457 458
static int ext4_find_unwritten_pgoff(struct inode *inode,
				     int whence,
459
				     ext4_lblk_t end_blk,
460
				     loff_t *offset)
461 462
{
	struct pagevec pvec;
463
	unsigned int blkbits;
464 465
	pgoff_t index;
	pgoff_t end;
466
	loff_t endoff;
467 468 469 470
	loff_t startoff;
	loff_t lastoff;
	int found = 0;

471
	blkbits = inode->i_sb->s_blocksize_bits;
472 473
	startoff = *offset;
	lastoff = startoff;
474
	endoff = (loff_t)end_blk << blkbits;
475

476 477
	index = startoff >> PAGE_SHIFT;
	end = endoff >> PAGE_SHIFT;
478 479 480 481 482 483 484 485 486 487

	pagevec_init(&pvec, 0);
	do {
		int i, num;
		unsigned long nr_pages;

		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
					  (pgoff_t)num);
		if (nr_pages == 0) {
488
			if (whence == SEEK_DATA)
489 490
				break;

491
			BUG_ON(whence != SEEK_HOLE);
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
			/*
			 * If this is the first time to go into the loop and
			 * offset is not beyond the end offset, it will be a
			 * hole at this offset
			 */
			if (lastoff == startoff || lastoff < endoff)
				found = 1;
			break;
		}

		/*
		 * If this is the first time to go into the loop and
		 * offset is smaller than the first page offset, it will be a
		 * hole at this offset.
		 */
507
		if (lastoff == startoff && whence == SEEK_HOLE &&
508 509 510 511 512 513 514 515 516 517 518 519 520
		    lastoff < page_offset(pvec.pages[0])) {
			found = 1;
			break;
		}

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct buffer_head *bh, *head;

			/*
			 * If the current offset is not beyond the end of given
			 * range, it will be a hole.
			 */
521
			if (lastoff < endoff && whence == SEEK_HOLE &&
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
			    page->index > end) {
				found = 1;
				*offset = lastoff;
				goto out;
			}

			lock_page(page);

			if (unlikely(page->mapping != inode->i_mapping)) {
				unlock_page(page);
				continue;
			}

			if (!page_has_buffers(page)) {
				unlock_page(page);
				continue;
			}

			if (page_has_buffers(page)) {
				lastoff = page_offset(page);
				bh = head = page_buffers(page);
				do {
					if (buffer_uptodate(bh) ||
					    buffer_unwritten(bh)) {
546
						if (whence == SEEK_DATA)
547 548
							found = 1;
					} else {
549
						if (whence == SEEK_HOLE)
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
							found = 1;
					}
					if (found) {
						*offset = max_t(loff_t,
							startoff, lastoff);
						unlock_page(page);
						goto out;
					}
					lastoff += bh->b_size;
					bh = bh->b_this_page;
				} while (bh != head);
			}

			lastoff = page_offset(page) + PAGE_SIZE;
			unlock_page(page);
		}

		/*
		 * The no. of pages is less than our desired, that would be a
		 * hole in there.
		 */
571
		if (nr_pages < num && whence == SEEK_HOLE) {
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
			found = 1;
			*offset = lastoff;
			break;
		}

		index = pvec.pages[i - 1]->index + 1;
		pagevec_release(&pvec);
	} while (index <= end);

out:
	pagevec_release(&pvec);
	return found;
}

/*
 * ext4_seek_data() retrieves the offset for SEEK_DATA.
 */
static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
{
	struct inode *inode = file->f_mapping->host;
592 593 594 595
	struct extent_status es;
	ext4_lblk_t start, last, end;
	loff_t dataoff, isize;
	int blkbits;
596
	int ret;
597

A
Al Viro 已提交
598
	inode_lock(inode);
599 600 601

	isize = i_size_read(inode);
	if (offset >= isize) {
A
Al Viro 已提交
602
		inode_unlock(inode);
603 604
		return -ENXIO;
	}
605 606 607 608 609 610 611 612

	blkbits = inode->i_sb->s_blocksize_bits;
	start = offset >> blkbits;
	last = start;
	end = isize >> blkbits;
	dataoff = offset;

	do {
613 614 615 616 617 618 619
		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
		if (ret <= 0) {
			/* No extent found -> no data */
			if (ret == 0)
				ret = -ENXIO;
			inode_unlock(inode);
			return ret;
620
		}
621

622 623 624 625
		last = es.es_lblk;
		if (last != start)
			dataoff = (loff_t)last << blkbits;
		if (!ext4_es_is_unwritten(&es))
626 627
			break;

628 629 630 631 632
		/*
		 * If there is a unwritten extent at this offset,
		 * it will be as a data or a hole according to page
		 * cache that has data or not.
		 */
633 634 635 636
		if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
					      es.es_lblk + es.es_len, &dataoff))
			break;
		last += es.es_len;
637
		dataoff = (loff_t)last << blkbits;
638
		cond_resched();
639
	} while (last <= end);
640

A
Al Viro 已提交
641
	inode_unlock(inode);
642

643 644 645 646
	if (dataoff > isize)
		return -ENXIO;

	return vfs_setpos(file, dataoff, maxsize);
647 648 649
}

/*
650
 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
651 652 653 654
 */
static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
{
	struct inode *inode = file->f_mapping->host;
655 656 657 658
	struct extent_status es;
	ext4_lblk_t start, last, end;
	loff_t holeoff, isize;
	int blkbits;
659
	int ret;
660

A
Al Viro 已提交
661
	inode_lock(inode);
662 663 664

	isize = i_size_read(inode);
	if (offset >= isize) {
A
Al Viro 已提交
665
		inode_unlock(inode);
666 667 668
		return -ENXIO;
	}

669 670 671 672 673
	blkbits = inode->i_sb->s_blocksize_bits;
	start = offset >> blkbits;
	last = start;
	end = isize >> blkbits;
	holeoff = offset;
674

675
	do {
676 677 678 679
		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
		if (ret < 0) {
			inode_unlock(inode);
			return ret;
680
		}
681 682 683 684 685
		/* Found a hole? */
		if (ret == 0 || es.es_lblk > last) {
			if (last != start)
				holeoff = (loff_t)last << blkbits;
			break;
686 687 688 689 690 691
		}
		/*
		 * If there is a unwritten extent at this offset,
		 * it will be as a data or a hole according to page
		 * cache that has data or not.
		 */
692 693 694 695
		if (ext4_es_is_unwritten(&es) &&
		    ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
					      last + es.es_len, &holeoff))
			break;
696

697 698 699
		last += es.es_len;
		holeoff = (loff_t)last << blkbits;
		cond_resched();
700 701
	} while (last <= end);

A
Al Viro 已提交
702
	inode_unlock(inode);
703

704 705 706 707
	if (holeoff > isize)
		holeoff = isize;

	return vfs_setpos(file, holeoff, maxsize);
708 709
}

710
/*
711 712 713
 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 * by calling generic_file_llseek_size() with the appropriate maxbytes
 * value for each.
714
 */
715
loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
716 717 718 719 720 721 722 723 724
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes;

	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
	else
		maxbytes = inode->i_sb->s_maxbytes;

725
	switch (whence) {
726 727 728
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
729
		return generic_file_llseek_size(file, offset, whence,
730 731 732 733 734 735 736 737
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
		return ext4_seek_data(file, offset, maxbytes);
	case SEEK_HOLE:
		return ext4_seek_hole(file, offset, maxbytes);
	}

	return -EINVAL;
738 739
}

740
const struct file_operations ext4_file_operations = {
741
	.llseek		= ext4_llseek,
742
	.read_iter	= ext4_file_read_iter,
A
Al Viro 已提交
743
	.write_iter	= ext4_file_write_iter,
A
Andi Kleen 已提交
744
	.unlocked_ioctl = ext4_ioctl,
745
#ifdef CONFIG_COMPAT
746
	.compat_ioctl	= ext4_compat_ioctl,
747
#endif
748
	.mmap		= ext4_file_mmap,
749
	.open		= ext4_file_open,
750 751
	.release	= ext4_release_file,
	.fsync		= ext4_sync_file,
752
	.get_unmapped_area = thp_get_unmapped_area,
753
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
754
	.splice_write	= iter_file_splice_write,
755
	.fallocate	= ext4_fallocate,
756 757
};

758
const struct inode_operations ext4_file_inode_operations = {
759
	.setattr	= ext4_setattr,
760
	.getattr	= ext4_getattr,
761
	.listxattr	= ext4_listxattr,
762
	.get_acl	= ext4_get_acl,
763
	.set_acl	= ext4_set_acl,
764
	.fiemap		= ext4_fiemap,
765 766
};