file.c 64.1 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
J
Jaegeuk Kim 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 * fs/f2fs/file.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
16
#include <linux/blkdev.h>
J
Jaegeuk Kim 已提交
17 18
#include <linux/falloc.h>
#include <linux/types.h>
19
#include <linux/compat.h>
J
Jaegeuk Kim 已提交
20 21
#include <linux/uaccess.h>
#include <linux/mount.h>
22
#include <linux/pagevec.h>
23
#include <linux/uio.h>
24
#include <linux/uuid.h>
25
#include <linux/file.h>
J
Jaegeuk Kim 已提交
26 27 28 29 30 31

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "acl.h"
32
#include "gc.h"
J
Jaegeuk Kim 已提交
33
#include "trace.h"
34
#include <trace/events/f2fs.h>
J
Jaegeuk Kim 已提交
35

36 37 38 39 40 41 42 43 44 45 46 47
static int f2fs_filemap_fault(struct vm_fault *vmf)
{
	struct inode *inode = file_inode(vmf->vma->vm_file);
	int err;

	down_read(&F2FS_I(inode)->i_mmap_sem);
	err = filemap_fault(vmf);
	up_read(&F2FS_I(inode)->i_mmap_sem);

	return err;
}

48
static int f2fs_vm_page_mkwrite(struct vm_fault *vmf)
J
Jaegeuk Kim 已提交
49 50
{
	struct page *page = vmf->page;
51
	struct inode *inode = file_inode(vmf->vma->vm_file);
52
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
53
	struct dnode_of_data dn;
54
	int err;
J
Jaegeuk Kim 已提交
55 56

	sb_start_pagefault(inode->i_sb);
57 58

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
59

J
Jaegeuk Kim 已提交
60
	/* block allocation */
61
	f2fs_lock_op(sbi);
62
	set_new_dnode(&dn, inode, NULL, NULL, 0);
63
	err = f2fs_reserve_block(&dn, page->index);
64 65
	if (err) {
		f2fs_unlock_op(sbi);
66
		goto out;
67 68 69
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
J
Jaegeuk Kim 已提交
70

J
Jaegeuk Kim 已提交
71
	f2fs_balance_fs(sbi, dn.node_changed);
72

73
	file_update_time(vmf->vma->vm_file);
74
	down_read(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
75
	lock_page(page);
76
	if (unlikely(page->mapping != inode->i_mapping ||
77
			page_offset(page) > i_size_read(inode) ||
78
			!PageUptodate(page))) {
J
Jaegeuk Kim 已提交
79 80
		unlock_page(page);
		err = -EFAULT;
81
		goto out_sem;
J
Jaegeuk Kim 已提交
82 83 84 85 86 87
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
88
		goto mapped;
J
Jaegeuk Kim 已提交
89 90

	/* page is wholly or partially inside EOF */
91
	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
C
Chao Yu 已提交
92
						i_size_read(inode)) {
J
Jaegeuk Kim 已提交
93
		unsigned offset;
94 95
		offset = i_size_read(inode) & ~PAGE_MASK;
		zero_user_segment(page, offset, PAGE_SIZE);
J
Jaegeuk Kim 已提交
96 97
	}
	set_page_dirty(page);
98 99
	if (!PageUptodate(page))
		SetPageUptodate(page);
J
Jaegeuk Kim 已提交
100

C
Chao Yu 已提交
101 102
	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);

103
	trace_f2fs_vm_page_mkwrite(page, DATA);
104 105
mapped:
	/* fill the page */
106
	f2fs_wait_on_page_writeback(page, DATA, false);
107 108 109 110 111

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

112 113
out_sem:
	up_read(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
114 115
out:
	sb_end_pagefault(inode->i_sb);
116
	f2fs_update_time(sbi, REQ_TIME);
J
Jaegeuk Kim 已提交
117 118 119 120
	return block_page_mkwrite_return(err);
}

static const struct vm_operations_struct f2fs_file_vm_ops = {
121
	.fault		= f2fs_filemap_fault,
122
	.map_pages	= filemap_map_pages,
123
	.page_mkwrite	= f2fs_vm_page_mkwrite,
J
Jaegeuk Kim 已提交
124 125
};

126 127 128 129 130 131 132 133 134 135
static int get_parent_ino(struct inode *inode, nid_t *pino)
{
	struct dentry *dentry;

	inode = igrab(inode);
	dentry = d_find_any_alias(inode);
	iput(inode);
	if (!dentry)
		return 0;

136 137
	*pino = parent_ino(dentry);
	dput(dentry);
138 139 140
	return 1;
}

141 142
static inline bool need_do_checkpoint(struct inode *inode)
{
143
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
144 145 146 147
	bool need_cp = false;

	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
		need_cp = true;
148
	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
149
		need_cp = true;
150 151 152 153 154 155
	else if (file_wrong_pino(inode))
		need_cp = true;
	else if (!space_for_roll_forward(sbi))
		need_cp = true;
	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
		need_cp = true;
156 157
	else if (test_opt(sbi, FASTBOOT))
		need_cp = true;
158 159
	else if (sbi->active_logs == 2)
		need_cp = true;
160 161 162 163

	return need_cp;
}

164 165 166 167 168 169 170 171 172 173 174
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
	bool ret = false;
	/* But we need to avoid that there are some inode updates */
	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
		ret = true;
	f2fs_put_page(i, 0);
	return ret;
}

175 176 177 178 179 180 181 182
static void try_to_fix_pino(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t pino;

	down_write(&fi->i_sem);
	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
			get_parent_ino(inode, &pino)) {
183
		f2fs_i_pino_write(inode, pino);
184 185
		file_got_pino(inode);
	}
186
	up_write(&fi->i_sem);
187 188
}

189 190
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
						int datasync, bool atomic)
J
Jaegeuk Kim 已提交
191 192
{
	struct inode *inode = file->f_mapping->host;
193
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
194
	nid_t ino = inode->i_ino;
J
Jaegeuk Kim 已提交
195 196 197
	int ret = 0;
	bool need_cp = false;
	struct writeback_control wbc = {
198
		.sync_mode = WB_SYNC_ALL,
J
Jaegeuk Kim 已提交
199 200 201 202
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};

203
	if (unlikely(f2fs_readonly(inode->i_sb)))
204 205
		return 0;

206
	trace_f2fs_sync_file_enter(inode);
207 208

	/* if fdatasync is triggered, let's do in-place-update */
J
Jaegeuk Kim 已提交
209
	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
210
		set_inode_flag(inode, FI_NEED_IPU);
J
Jaegeuk Kim 已提交
211
	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
212
	clear_inode_flag(inode, FI_NEED_IPU);
213

214 215
	if (ret) {
		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
J
Jaegeuk Kim 已提交
216
		return ret;
217
	}
J
Jaegeuk Kim 已提交
218

219
	/* if the inode is dirty, let's recover all the time */
C
Chao Yu 已提交
220
	if (!f2fs_skip_inode_update(inode, datasync)) {
221
		f2fs_write_inode(inode, NULL);
222 223 224
		goto go_write;
	}

225 226 227
	/*
	 * if there is no written data, don't waste time to write recovery info.
	 */
228
	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
J
Jaegeuk Kim 已提交
229
			!exist_written_data(sbi, ino, APPEND_INO)) {
230

231 232
		/* it may call write_inode just prior to fsync */
		if (need_inode_page_update(sbi, ino))
233 234
			goto go_write;

235
		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
J
Jaegeuk Kim 已提交
236
				exist_written_data(sbi, ino, UPDATE_INO))
237 238 239
			goto flush_out;
		goto out;
	}
240
go_write:
241 242 243 244
	/*
	 * Both of fdatasync() and fsync() are able to be recovered from
	 * sudden-power-off.
	 */
245
	down_read(&F2FS_I(inode)->i_sem);
246
	need_cp = need_do_checkpoint(inode);
247
	up_read(&F2FS_I(inode)->i_sem);
248

J
Jaegeuk Kim 已提交
249 250 251
	if (need_cp) {
		/* all the dirty node pages should be flushed for POR */
		ret = f2fs_sync_fs(inode->i_sb, 1);
252

253 254 255 256 257
		/*
		 * We've secured consistency through sync_fs. Following pino
		 * will be used only for fsynced inodes after checkpoint.
		 */
		try_to_fix_pino(inode);
258 259
		clear_inode_flag(inode, FI_APPEND_WRITE);
		clear_inode_flag(inode, FI_UPDATE_WRITE);
260 261
		goto out;
	}
262
sync_nodes:
263
	ret = fsync_node_pages(sbi, inode, &wbc, atomic);
264 265
	if (ret)
		goto out;
266

267
	/* if cp_error was enabled, we should avoid infinite loop */
C
Chao Yu 已提交
268 269
	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
270
		goto out;
C
Chao Yu 已提交
271
	}
272

273
	if (need_inode_block_update(sbi, ino)) {
274
		f2fs_mark_inode_dirty_sync(inode, true);
275 276
		f2fs_write_inode(inode, NULL);
		goto sync_nodes;
J
Jaegeuk Kim 已提交
277
	}
278

279 280 281 282 283 284 285 286 287 288 289 290 291
	/*
	 * If it's atomic_write, it's just fine to keep write ordering. So
	 * here we don't need to wait for node write completion, since we use
	 * node chain which serializes node blocks. If one of node writes are
	 * reordered, we can see simply broken chain, resulting in stopping
	 * roll-forward recovery. It means we'll recover all or none node blocks
	 * given fsync mark.
	 */
	if (!atomic) {
		ret = wait_on_node_pages_writeback(sbi, ino);
		if (ret)
			goto out;
	}
292 293

	/* once recovery info is written, don't need to tack this */
294
	remove_ino_entry(sbi, ino, APPEND_INO);
295
	clear_inode_flag(inode, FI_APPEND_WRITE);
296
flush_out:
297
	remove_ino_entry(sbi, ino, UPDATE_INO);
298
	clear_inode_flag(inode, FI_UPDATE_WRITE);
299 300
	if (!atomic)
		ret = f2fs_issue_flush(sbi);
301
	f2fs_update_time(sbi, REQ_TIME);
J
Jaegeuk Kim 已提交
302
out:
303
	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
304
	f2fs_trace_ios(NULL, 1);
J
Jaegeuk Kim 已提交
305 306 307
	return ret;
}

308 309 310 311 312
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
	return f2fs_do_sync_file(file, start, end, datasync, false);
}

313 314 315 316 317 318 319 320 321 322 323
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
						pgoff_t pgofs, int whence)
{
	struct pagevec pvec;
	int nr_pages;

	if (whence != SEEK_DATA)
		return 0;

	/* find first dirty page index */
	pagevec_init(&pvec, 0);
J
Jaegeuk Kim 已提交
324 325
	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
					PAGECACHE_TAG_DIRTY, 1);
326
	pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
	pagevec_release(&pvec);
	return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
							int whence)
{
	switch (whence) {
	case SEEK_DATA:
		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
			return true;
		break;
	case SEEK_HOLE:
		if (blkaddr == NULL_ADDR)
			return true;
		break;
	}
	return false;
}

348 349 350 351 352
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;
	struct dnode_of_data dn;
353 354 355
	pgoff_t pgofs, end_offset, dirty;
	loff_t data_ofs = offset;
	loff_t isize;
356 357
	int err = 0;

A
Al Viro 已提交
358
	inode_lock(inode);
359 360 361 362 363 364

	isize = i_size_read(inode);
	if (offset >= isize)
		goto fail;

	/* handle inline data case */
C
Chao Yu 已提交
365
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
366 367 368 369 370
		if (whence == SEEK_HOLE)
			data_ofs = isize;
		goto found;
	}

371
	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
372

373 374
	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

375
	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
376
		set_new_dnode(&dn, inode, NULL, NULL, 0);
377
		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
378 379 380
		if (err && err != -ENOENT) {
			goto fail;
		} else if (err == -ENOENT) {
A
arter97 已提交
381
			/* direct node does not exists */
382
			if (whence == SEEK_DATA) {
383
				pgofs = get_next_page_offset(&dn, pgofs);
384 385 386 387 388 389
				continue;
			} else {
				goto found;
			}
		}

390
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
391 392 393 394

		/* find data/hole in dnode block */
		for (; dn.ofs_in_node < end_offset;
				dn.ofs_in_node++, pgofs++,
395
				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
396
			block_t blkaddr;
397 398
			blkaddr = datablock_addr(dn.inode,
					dn.node_page, dn.ofs_in_node);
399

400
			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
401 402 403 404 405 406 407 408 409 410
				f2fs_put_dnode(&dn);
				goto found;
			}
		}
		f2fs_put_dnode(&dn);
	}

	if (whence == SEEK_DATA)
		goto fail;
found:
411 412
	if (whence == SEEK_HOLE && data_ofs > isize)
		data_ofs = isize;
A
Al Viro 已提交
413
	inode_unlock(inode);
414 415
	return vfs_setpos(file, data_ofs, maxbytes);
fail:
A
Al Viro 已提交
416
	inode_unlock(inode);
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
	return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;

	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
		return generic_file_llseek_size(file, offset, whence,
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
	case SEEK_HOLE:
433 434
		if (offset < 0)
			return -ENXIO;
435 436 437 438 439 440
		return f2fs_seek_block(file, offset, whence);
	}

	return -EINVAL;
}

J
Jaegeuk Kim 已提交
441 442
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
443
	struct inode *inode = file_inode(file);
444
	int err;
445 446

	/* we don't need to use inline_data strictly */
447 448 449
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
450

J
Jaegeuk Kim 已提交
451 452 453 454 455
	file_accessed(file);
	vma->vm_ops = &f2fs_file_vm_ops;
	return 0;
}

456 457
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
458
	struct dentry *dir;
459

C
Chao Yu 已提交
460 461
	if (f2fs_encrypted_inode(inode)) {
		int ret = fscrypt_get_encryption_info(inode);
462
		if (ret)
463
			return -EACCES;
464
		if (!fscrypt_has_encryption_key(inode))
465
			return -ENOKEY;
466
	}
467 468 469 470
	dir = dget_parent(file_dentry(filp));
	if (f2fs_encrypted_inode(d_inode(dir)) &&
			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
		dput(dir);
471
		return -EPERM;
472 473
	}
	dput(dir);
C
Chao Yu 已提交
474
	return dquot_file_open(inode, filp);
475 476
}

477
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
J
Jaegeuk Kim 已提交
478
{
479
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
J
Jaegeuk Kim 已提交
480
	struct f2fs_node *raw_node;
C
Chao Yu 已提交
481
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
J
Jaegeuk Kim 已提交
482
	__le32 *addr;
483 484 485 486
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
J
Jaegeuk Kim 已提交
487

488
	raw_node = F2FS_NODE(dn->node_page);
489
	addr = blkaddr_in_node(raw_node) + base + ofs;
J
Jaegeuk Kim 已提交
490

C
Chris Fries 已提交
491
	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
J
Jaegeuk Kim 已提交
492 493 494 495
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

J
Jaegeuk Kim 已提交
496
		dn->data_blkaddr = NULL_ADDR;
497
		set_data_blkaddr(dn);
J
Jaegeuk Kim 已提交
498
		invalidate_blocks(sbi, blkaddr);
499
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
500
			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
J
Jaegeuk Kim 已提交
501 502
		nr_free++;
	}
C
Chao Yu 已提交
503

J
Jaegeuk Kim 已提交
504
	if (nr_free) {
C
Chao Yu 已提交
505 506 507 508 509 510
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
511
							dn->inode) + ofs;
C
Chao Yu 已提交
512
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
513
		dec_valid_block_count(sbi, dn->inode, nr_free);
J
Jaegeuk Kim 已提交
514 515
	}
	dn->ofs_in_node = ofs;
516

517
	f2fs_update_time(sbi, REQ_TIME);
518 519
	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
J
Jaegeuk Kim 已提交
520 521 522 523 524 525 526 527
	return nr_free;
}

void truncate_data_blocks(struct dnode_of_data *dn)
{
	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}

528
static int truncate_partial_data_page(struct inode *inode, u64 from,
529
								bool cache_only)
J
Jaegeuk Kim 已提交
530
{
531 532
	unsigned offset = from & (PAGE_SIZE - 1);
	pgoff_t index = from >> PAGE_SHIFT;
533
	struct address_space *mapping = inode->i_mapping;
J
Jaegeuk Kim 已提交
534 535
	struct page *page;

536
	if (!offset && !cache_only)
537
		return 0;
J
Jaegeuk Kim 已提交
538

539
	if (cache_only) {
540
		page = find_lock_page(mapping, index);
541 542 543
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
544
		return 0;
545
	}
J
Jaegeuk Kim 已提交
546

547
	page = get_lock_data_page(inode, index, true);
548
	if (IS_ERR(page))
549
		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
550
truncate_out:
551
	f2fs_wait_on_page_writeback(page, DATA, true);
552
	zero_user(page, offset, PAGE_SIZE - offset);
553 554 555 556

	/* An encrypted inode should have a key and truncate the last page. */
	f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
	if (!cache_only)
557
		set_page_dirty(page);
J
Jaegeuk Kim 已提交
558
	f2fs_put_page(page, 1);
559
	return 0;
J
Jaegeuk Kim 已提交
560 561
}

562
int truncate_blocks(struct inode *inode, u64 from, bool lock)
J
Jaegeuk Kim 已提交
563
{
564
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
565 566 567
	unsigned int blocksize = inode->i_sb->s_blocksize;
	struct dnode_of_data dn;
	pgoff_t free_from;
H
Huajun Li 已提交
568
	int count = 0, err = 0;
569
	struct page *ipage;
570
	bool truncate_page = false;
J
Jaegeuk Kim 已提交
571

572 573
	trace_f2fs_truncate_blocks_enter(inode, from);

574
	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
J
Jaegeuk Kim 已提交
575

576 577 578
	if (free_from >= sbi->max_file_blocks)
		goto free_partial;

579 580
	if (lock)
		f2fs_lock_op(sbi);
H
Huajun Li 已提交
581

582 583 584 585 586 587 588
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	if (f2fs_has_inline_data(inode)) {
589
		truncate_inline_inode(inode, ipage, from);
590
		f2fs_put_page(ipage, 1);
591
		truncate_page = true;
592 593 594 595
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, NULL, 0);
596
	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
J
Jaegeuk Kim 已提交
597 598 599
	if (err) {
		if (err == -ENOENT)
			goto free_next;
600
		goto out;
601 602
	}

603
	count = ADDRS_PER_PAGE(dn.node_page, inode);
J
Jaegeuk Kim 已提交
604 605

	count -= dn.ofs_in_node;
606
	f2fs_bug_on(sbi, count < 0);
607

J
Jaegeuk Kim 已提交
608 609 610 611 612 613 614 615
	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
		truncate_data_blocks_range(&dn, count);
		free_from += count;
	}

	f2fs_put_dnode(&dn);
free_next:
	err = truncate_inode_blocks(inode, free_from);
616 617 618
out:
	if (lock)
		f2fs_unlock_op(sbi);
619
free_partial:
620 621
	/* lastly zero out the first data page */
	if (!err)
622
		err = truncate_partial_data_page(inode, from, truncate_page);
J
Jaegeuk Kim 已提交
623

624
	trace_f2fs_truncate_blocks_exit(inode, err);
J
Jaegeuk Kim 已提交
625 626 627
	return err;
}

628
int f2fs_truncate(struct inode *inode)
J
Jaegeuk Kim 已提交
629
{
630 631
	int err;

J
Jaegeuk Kim 已提交
632 633
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode)))
634
		return 0;
J
Jaegeuk Kim 已提交
635

636 637
	trace_f2fs_truncate(inode);

638 639 640 641 642 643
#ifdef CONFIG_F2FS_FAULT_INJECTION
	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
		f2fs_show_injection_info(FAULT_TRUNCATE);
		return -EIO;
	}
#endif
644
	/* we should check inline_data size */
645
	if (!f2fs_may_inline_data(inode)) {
646 647 648
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
649 650
	}

651
	err = truncate_blocks(inode, i_size_read(inode), true);
652 653 654
	if (err)
		return err;

655
	inode->i_mtime = inode->i_ctime = current_time(inode);
656
	f2fs_mark_inode_dirty_sync(inode, false);
657
	return 0;
J
Jaegeuk Kim 已提交
658 659
}

660
int f2fs_getattr(const struct path *path, struct kstat *stat,
C
Chao Yu 已提交
661
		 u32 request_mask, unsigned int query_flags)
J
Jaegeuk Kim 已提交
662
{
663
	struct inode *inode = d_inode(path->dentry);
C
Chao Yu 已提交
664 665 666
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int flags;

667
	flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
C
Chao Yu 已提交
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
	if (flags & FS_APPEND_FL)
		stat->attributes |= STATX_ATTR_APPEND;
	if (flags & FS_COMPR_FL)
		stat->attributes |= STATX_ATTR_COMPRESSED;
	if (f2fs_encrypted_inode(inode))
		stat->attributes |= STATX_ATTR_ENCRYPTED;
	if (flags & FS_IMMUTABLE_FL)
		stat->attributes |= STATX_ATTR_IMMUTABLE;
	if (flags & FS_NODUMP_FL)
		stat->attributes |= STATX_ATTR_NODUMP;

	stat->attributes_mask |= (STATX_ATTR_APPEND |
				  STATX_ATTR_COMPRESSED |
				  STATX_ATTR_ENCRYPTED |
				  STATX_ATTR_IMMUTABLE |
				  STATX_ATTR_NODUMP);

J
Jaegeuk Kim 已提交
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
	generic_fillattr(inode, stat);
	return 0;
}

#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
	unsigned int ia_valid = attr->ia_valid;

	if (ia_valid & ATTR_UID)
		inode->i_uid = attr->ia_uid;
	if (ia_valid & ATTR_GID)
		inode->i_gid = attr->ia_gid;
	if (ia_valid & ATTR_ATIME)
		inode->i_atime = timespec_trunc(attr->ia_atime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MTIME)
		inode->i_mtime = timespec_trunc(attr->ia_mtime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_CTIME)
		inode->i_ctime = timespec_trunc(attr->ia_ctime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MODE) {
		umode_t mode = attr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;
712
		set_acl_inode(inode, mode);
J
Jaegeuk Kim 已提交
713 714 715 716 717 718 719 720
	}
}
#else
#define __setattr_copy setattr_copy
#endif

int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
721
	struct inode *inode = d_inode(dentry);
J
Jaegeuk Kim 已提交
722
	int err;
723
	bool size_changed = false;
J
Jaegeuk Kim 已提交
724

725
	err = setattr_prepare(dentry, attr);
J
Jaegeuk Kim 已提交
726 727 728
	if (err)
		return err;

C
Chao Yu 已提交
729 730 731 732 733 734 735 736 737 738 739 740 741 742
	if (is_quota_modification(inode, attr)) {
		err = dquot_initialize(inode);
		if (err)
			return err;
	}
	if ((attr->ia_valid & ATTR_UID &&
		!uid_eq(attr->ia_uid, inode->i_uid)) ||
		(attr->ia_valid & ATTR_GID &&
		!gid_eq(attr->ia_gid, inode->i_gid))) {
		err = dquot_transfer(inode, attr);
		if (err)
			return err;
	}

743
	if (attr->ia_valid & ATTR_SIZE) {
744 745 746 747 748 749 750
		if (f2fs_encrypted_inode(inode)) {
			err = fscrypt_get_encryption_info(inode);
			if (err)
				return err;
			if (!fscrypt_has_encryption_key(inode))
				return -ENOKEY;
		}
751

752
		if (attr->ia_size <= i_size_read(inode)) {
753
			down_write(&F2FS_I(inode)->i_mmap_sem);
754
			truncate_setsize(inode, attr->ia_size);
755
			err = f2fs_truncate(inode);
756
			up_write(&F2FS_I(inode)->i_mmap_sem);
757 758
			if (err)
				return err;
759 760
		} else {
			/*
761 762
			 * do not trim all blocks after i_size if target size is
			 * larger than i_size.
763
			 */
764
			down_write(&F2FS_I(inode)->i_mmap_sem);
765
			truncate_setsize(inode, attr->ia_size);
766
			up_write(&F2FS_I(inode)->i_mmap_sem);
767 768

			/* should convert inline inode here */
769
			if (!f2fs_may_inline_data(inode)) {
770 771 772 773
				err = f2fs_convert_inline_inode(inode);
				if (err)
					return err;
			}
774
			inode->i_mtime = inode->i_ctime = current_time(inode);
775
		}
776 777

		size_changed = true;
J
Jaegeuk Kim 已提交
778 779 780 781 782
	}

	__setattr_copy(inode, attr);

	if (attr->ia_valid & ATTR_MODE) {
783
		err = posix_acl_chmod(inode, get_inode_mode(inode));
784 785 786
		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
			inode->i_mode = F2FS_I(inode)->i_acl_mode;
			clear_inode_flag(inode, FI_ACL_MODE);
J
Jaegeuk Kim 已提交
787 788 789
		}
	}

790 791
	/* file size may changed here */
	f2fs_mark_inode_dirty_sync(inode, size_changed);
792 793 794 795

	/* inode change will produce dirty node pages flushed by checkpoint */
	f2fs_balance_fs(F2FS_I_SB(inode), true);

J
Jaegeuk Kim 已提交
796 797 798 799 800 801 802
	return err;
}

const struct inode_operations f2fs_file_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
803
	.set_acl	= f2fs_set_acl,
J
Jaegeuk Kim 已提交
804 805 806
#ifdef CONFIG_F2FS_FS_XATTR
	.listxattr	= f2fs_listxattr,
#endif
J
Jaegeuk Kim 已提交
807
	.fiemap		= f2fs_fiemap,
J
Jaegeuk Kim 已提交
808 809
};

C
Chao Yu 已提交
810
static int fill_zero(struct inode *inode, pgoff_t index,
J
Jaegeuk Kim 已提交
811 812
					loff_t start, loff_t len)
{
813
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
814 815 816
	struct page *page;

	if (!len)
C
Chao Yu 已提交
817
		return 0;
J
Jaegeuk Kim 已提交
818

J
Jaegeuk Kim 已提交
819
	f2fs_balance_fs(sbi, true);
820

821
	f2fs_lock_op(sbi);
822
	page = get_new_data_page(inode, NULL, index, false);
823
	f2fs_unlock_op(sbi);
J
Jaegeuk Kim 已提交
824

C
Chao Yu 已提交
825 826 827
	if (IS_ERR(page))
		return PTR_ERR(page);

828
	f2fs_wait_on_page_writeback(page, DATA, true);
C
Chao Yu 已提交
829 830 831 832
	zero_user(page, start, len);
	set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
J
Jaegeuk Kim 已提交
833 834 835 836 837 838
}

int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
	int err;

839
	while (pg_start < pg_end) {
J
Jaegeuk Kim 已提交
840
		struct dnode_of_data dn;
841
		pgoff_t end_offset, count;
842

J
Jaegeuk Kim 已提交
843
		set_new_dnode(&dn, inode, NULL, NULL, 0);
844
		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
J
Jaegeuk Kim 已提交
845
		if (err) {
846 847
			if (err == -ENOENT) {
				pg_start++;
J
Jaegeuk Kim 已提交
848
				continue;
849
			}
J
Jaegeuk Kim 已提交
850 851 852
			return err;
		}

853
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
854 855 856 857 858
		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);

		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);

		truncate_data_blocks_range(&dn, count);
J
Jaegeuk Kim 已提交
859
		f2fs_put_dnode(&dn);
860 861

		pg_start += count;
J
Jaegeuk Kim 已提交
862 863 864 865
	}
	return 0;
}

C
Chao Yu 已提交
866
static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
J
Jaegeuk Kim 已提交
867 868 869
{
	pgoff_t pg_start, pg_end;
	loff_t off_start, off_end;
870
	int ret;
J
Jaegeuk Kim 已提交
871

872 873 874
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
H
Huajun Li 已提交
875

876 877
	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
J
Jaegeuk Kim 已提交
878

879 880
	off_start = offset & (PAGE_SIZE - 1);
	off_end = (offset + len) & (PAGE_SIZE - 1);
J
Jaegeuk Kim 已提交
881 882

	if (pg_start == pg_end) {
C
Chao Yu 已提交
883
		ret = fill_zero(inode, pg_start, off_start,
J
Jaegeuk Kim 已提交
884
						off_end - off_start);
C
Chao Yu 已提交
885 886
		if (ret)
			return ret;
J
Jaegeuk Kim 已提交
887
	} else {
C
Chao Yu 已提交
888 889
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
890
						PAGE_SIZE - off_start);
C
Chao Yu 已提交
891 892 893 894 895 896 897 898
			if (ret)
				return ret;
		}
		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				return ret;
		}
J
Jaegeuk Kim 已提交
899 900 901 902

		if (pg_start < pg_end) {
			struct address_space *mapping = inode->i_mapping;
			loff_t blk_start, blk_end;
903
			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
904

J
Jaegeuk Kim 已提交
905
			f2fs_balance_fs(sbi, true);
J
Jaegeuk Kim 已提交
906

907 908
			blk_start = (loff_t)pg_start << PAGE_SHIFT;
			blk_end = (loff_t)pg_end << PAGE_SHIFT;
909
			down_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
910 911
			truncate_inode_pages_range(mapping, blk_start,
					blk_end - 1);
912

913
			f2fs_lock_op(sbi);
J
Jaegeuk Kim 已提交
914
			ret = truncate_hole(inode, pg_start, pg_end);
915
			f2fs_unlock_op(sbi);
916
			up_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
917 918 919 920 921 922
		}
	}

	return ret;
}

923 924
static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, pgoff_t len)
C
Chao Yu 已提交
925 926 927
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
928
	int ret, done, i;
929

930
next_dnode:
931
	set_new_dnode(&dn, inode, NULL, NULL, 0);
932
	ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
933 934 935
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
936 937 938 939 940 941 942 943 944 945 946
		if (dn.max_level == 0)
			return -ENOENT;
		done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
		blkaddr += done;
		do_replace += done;
		goto next;
	}

	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
							dn.ofs_in_node, len);
	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
947 948
		*blkaddr = datablock_addr(dn.inode,
					dn.node_page, dn.ofs_in_node);
949 950 951 952 953 954 955
		if (!is_checkpointed_data(sbi, *blkaddr)) {

			if (test_opt(sbi, LFS)) {
				f2fs_put_dnode(&dn);
				return -ENOTSUPP;
			}

956
			/* do not invalidate this block address */
957
			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
958
			*do_replace = 1;
C
Chao Yu 已提交
959
		}
960
	}
961 962 963 964 965 966 967 968
	f2fs_put_dnode(&dn);
next:
	len -= done;
	off += done;
	if (len)
		goto next_dnode;
	return 0;
}
C
Chao Yu 已提交
969

970 971 972 973 974 975
static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, int len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	int ret, i;
C
Chao Yu 已提交
976

977 978 979
	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
		if (*do_replace == 0)
			continue;
C
Chao Yu 已提交
980

981 982 983 984 985 986 987
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
		if (ret) {
			dec_valid_block_count(sbi, inode, 1);
			invalidate_blocks(sbi, *blkaddr);
		} else {
			f2fs_update_data_blkaddr(&dn, *blkaddr);
988
		}
989 990 991 992 993 994 995 996 997 998 999 1000
		f2fs_put_dnode(&dn);
	}
	return 0;
}

static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
			block_t *blkaddr, int *do_replace,
			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
	pgoff_t i = 0;
	int ret;
1001

1002 1003 1004 1005
	while (i < len) {
		if (blkaddr[i] == NULL_ADDR && !full) {
			i++;
			continue;
1006
		}
C
Chao Yu 已提交
1007

1008 1009 1010 1011 1012
		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
			struct dnode_of_data dn;
			struct node_info ni;
			size_t new_size;
			pgoff_t ilen;
C
Chao Yu 已提交
1013

1014 1015 1016 1017
			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
			if (ret)
				return ret;
C
Chao Yu 已提交
1018

1019 1020 1021 1022 1023
			get_node_info(sbi, dn.nid, &ni);
			ilen = min((pgoff_t)
				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
						dn.ofs_in_node, len - i);
			do {
1024 1025
				dn.data_blkaddr = datablock_addr(dn.inode,
						dn.node_page, dn.ofs_in_node);
1026 1027 1028 1029
				truncate_data_blocks_range(&dn, 1);

				if (do_replace[i]) {
					f2fs_i_blocks_write(src_inode,
C
Chao Yu 已提交
1030
							1, false, false);
1031
					f2fs_i_blocks_write(dst_inode,
C
Chao Yu 已提交
1032
							1, true, false);
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
					blkaddr[i], ni.version, true, false);

					do_replace[i] = 0;
				}
				dn.ofs_in_node++;
				i++;
				new_size = (dst + i) << PAGE_SHIFT;
				if (dst_inode->i_size < new_size)
					f2fs_i_size_write(dst_inode, new_size);
1043
			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1044

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
			f2fs_put_dnode(&dn);
		} else {
			struct page *psrc, *pdst;

			psrc = get_lock_data_page(src_inode, src + i, true);
			if (IS_ERR(psrc))
				return PTR_ERR(psrc);
			pdst = get_new_data_page(dst_inode, NULL, dst + i,
								true);
			if (IS_ERR(pdst)) {
				f2fs_put_page(psrc, 1);
				return PTR_ERR(pdst);
			}
			f2fs_copy_page(psrc, pdst);
			set_page_dirty(pdst);
			f2fs_put_page(pdst, 1);
1061
			f2fs_put_page(psrc, 1);
C
Chao Yu 已提交
1062

1063 1064 1065 1066 1067
			ret = truncate_hole(src_inode, src + i, src + i + 1);
			if (ret)
				return ret;
			i++;
		}
1068 1069
	}
	return 0;
1070
}
C
Chao Yu 已提交
1071

1072 1073
static int __exchange_data_block(struct inode *src_inode,
			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1074
			pgoff_t len, bool full)
1075 1076 1077
{
	block_t *src_blkaddr;
	int *do_replace;
1078
	pgoff_t olen;
1079 1080
	int ret;

1081 1082
	while (len) {
		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
1083

M
Michal Hocko 已提交
1084
		src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
1085 1086
		if (!src_blkaddr)
			return -ENOMEM;
1087

M
Michal Hocko 已提交
1088
		do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL);
1089 1090 1091 1092
		if (!do_replace) {
			kvfree(src_blkaddr);
			return -ENOMEM;
		}
1093

1094 1095 1096 1097
		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
					do_replace, src, olen);
		if (ret)
			goto roll_back;
1098

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
					do_replace, src, dst, olen, full);
		if (ret)
			goto roll_back;

		src += olen;
		dst += olen;
		len -= olen;

		kvfree(src_blkaddr);
		kvfree(do_replace);
	}
1111 1112 1113 1114 1115 1116
	return 0;

roll_back:
	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
	kvfree(src_blkaddr);
	kvfree(do_replace);
1117 1118
	return ret;
}
C
Chao Yu 已提交
1119

1120 1121 1122 1123
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1124
	int ret;
1125

1126 1127
	f2fs_balance_fs(sbi, true);
	f2fs_lock_op(sbi);
1128 1129 1130

	f2fs_drop_extent_tree(inode);

1131 1132
	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
	f2fs_unlock_op(sbi);
C
Chao Yu 已提交
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	return ret;
}

static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
	pgoff_t pg_start, pg_end;
	loff_t new_size;
	int ret;

	if (offset + len >= i_size_read(inode))
		return -EINVAL;

	/* collapse range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

1149 1150 1151
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
1152

1153 1154
	pg_start = offset >> PAGE_SHIFT;
	pg_end = (offset + len) >> PAGE_SHIFT;
C
Chao Yu 已提交
1155

1156
	down_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1157 1158 1159
	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
1160
		goto out;
C
Chao Yu 已提交
1161 1162 1163 1164 1165

	truncate_pagecache(inode, offset);

	ret = f2fs_do_collapse(inode, pg_start, pg_end);
	if (ret)
1166
		goto out;
C
Chao Yu 已提交
1167

1168 1169 1170 1171
	/* write out all moved pages, if possible */
	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	truncate_pagecache(inode, offset);

C
Chao Yu 已提交
1172
	new_size = i_size_read(inode) - len;
1173
	truncate_pagecache(inode, new_size);
C
Chao Yu 已提交
1174 1175 1176

	ret = truncate_blocks(inode, new_size, true);
	if (!ret)
1177
		f2fs_i_size_write(inode, new_size);
C
Chao Yu 已提交
1178

1179 1180
out:
	up_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1181 1182 1183
	return ret;
}

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
								pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
	pgoff_t index = start;
	unsigned int ofs_in_node = dn->ofs_in_node;
	blkcnt_t count = 0;
	int ret;

	for (; index < end; index++, dn->ofs_in_node++) {
1194 1195
		if (datablock_addr(dn->inode, dn->node_page,
					dn->ofs_in_node) == NULL_ADDR)
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
			count++;
	}

	dn->ofs_in_node = ofs_in_node;
	ret = reserve_new_blocks(dn, count);
	if (ret)
		return ret;

	dn->ofs_in_node = ofs_in_node;
	for (index = start; index < end; index++, dn->ofs_in_node++) {
1206 1207
		dn->data_blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
		/*
		 * reserve_new_blocks will not guarantee entire block
		 * allocation.
		 */
		if (dn->data_blkaddr == NULL_ADDR) {
			ret = -ENOSPC;
			break;
		}
		if (dn->data_blkaddr != NEW_ADDR) {
			invalidate_blocks(sbi, dn->data_blkaddr);
			dn->data_blkaddr = NEW_ADDR;
			set_data_blkaddr(dn);
		}
	}

	f2fs_update_extent_cache_range(dn, start, 0, index - start);

	return ret;
}

C
Chao Yu 已提交
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
								int mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	pgoff_t index, pg_start, pg_end;
	loff_t new_size = i_size_read(inode);
	loff_t off_start, off_end;
	int ret = 0;

	ret = inode_newsize_ok(inode, (len + offset));
	if (ret)
		return ret;

1242 1243 1244
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
C
Chao Yu 已提交
1245

1246
	down_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1247 1248
	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
	if (ret)
1249
		goto out_sem;
C
Chao Yu 已提交
1250 1251 1252

	truncate_pagecache_range(inode, offset, offset + len - 1);

1253 1254
	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
C
Chao Yu 已提交
1255

1256 1257
	off_start = offset & (PAGE_SIZE - 1);
	off_end = (offset + len) & (PAGE_SIZE - 1);
C
Chao Yu 已提交
1258 1259

	if (pg_start == pg_end) {
C
Chao Yu 已提交
1260 1261 1262
		ret = fill_zero(inode, pg_start, off_start,
						off_end - off_start);
		if (ret)
1263
			goto out_sem;
C
Chao Yu 已提交
1264

C
Chao Yu 已提交
1265 1266 1267
		new_size = max_t(loff_t, new_size, offset + len);
	} else {
		if (off_start) {
C
Chao Yu 已提交
1268
			ret = fill_zero(inode, pg_start++, off_start,
1269
						PAGE_SIZE - off_start);
C
Chao Yu 已提交
1270
			if (ret)
1271
				goto out_sem;
C
Chao Yu 已提交
1272

C
Chao Yu 已提交
1273
			new_size = max_t(loff_t, new_size,
1274
					(loff_t)pg_start << PAGE_SHIFT);
C
Chao Yu 已提交
1275 1276
		}

1277
		for (index = pg_start; index < pg_end;) {
C
Chao Yu 已提交
1278
			struct dnode_of_data dn;
1279 1280
			unsigned int end_offset;
			pgoff_t end;
C
Chao Yu 已提交
1281 1282 1283

			f2fs_lock_op(sbi);

1284 1285
			set_new_dnode(&dn, inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
C
Chao Yu 已提交
1286 1287 1288 1289 1290
			if (ret) {
				f2fs_unlock_op(sbi);
				goto out;
			}

1291 1292 1293 1294
			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
			end = min(pg_end, end_offset - dn.ofs_in_node + index);

			ret = f2fs_do_zero_range(&dn, index, end);
C
Chao Yu 已提交
1295 1296
			f2fs_put_dnode(&dn);
			f2fs_unlock_op(sbi);
1297 1298 1299

			f2fs_balance_fs(sbi, dn.node_changed);

1300 1301
			if (ret)
				goto out;
C
Chao Yu 已提交
1302

1303
			index = end;
C
Chao Yu 已提交
1304
			new_size = max_t(loff_t, new_size,
1305
					(loff_t)index << PAGE_SHIFT);
C
Chao Yu 已提交
1306 1307 1308
		}

		if (off_end) {
C
Chao Yu 已提交
1309 1310 1311 1312
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				goto out;

C
Chao Yu 已提交
1313 1314 1315 1316 1317
			new_size = max_t(loff_t, new_size, offset + len);
		}
	}

out:
1318
	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
1319
		f2fs_i_size_write(inode, new_size);
1320 1321
out_sem:
	up_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1322 1323 1324 1325

	return ret;
}

C
Chao Yu 已提交
1326 1327 1328
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1329
	pgoff_t nr, pg_start, pg_end, delta, idx;
C
Chao Yu 已提交
1330
	loff_t new_size;
1331
	int ret = 0;
C
Chao Yu 已提交
1332 1333

	new_size = i_size_read(inode) + len;
1334 1335 1336
	ret = inode_newsize_ok(inode, new_size);
	if (ret)
		return ret;
C
Chao Yu 已提交
1337 1338 1339 1340 1341 1342 1343 1344

	if (offset >= i_size_read(inode))
		return -EINVAL;

	/* insert range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

1345 1346 1347
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
1348

J
Jaegeuk Kim 已提交
1349
	f2fs_balance_fs(sbi, true);
1350

1351
	down_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1352 1353
	ret = truncate_blocks(inode, i_size_read(inode), true);
	if (ret)
1354
		goto out;
C
Chao Yu 已提交
1355 1356 1357 1358

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
1359
		goto out;
C
Chao Yu 已提交
1360 1361 1362

	truncate_pagecache(inode, offset);

1363 1364
	pg_start = offset >> PAGE_SHIFT;
	pg_end = (offset + len) >> PAGE_SHIFT;
C
Chao Yu 已提交
1365
	delta = pg_end - pg_start;
1366 1367 1368 1369 1370 1371 1372
	idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;

	while (!ret && idx > pg_start) {
		nr = idx - pg_start;
		if (nr > delta)
			nr = delta;
		idx -= nr;
C
Chao Yu 已提交
1373 1374

		f2fs_lock_op(sbi);
1375 1376
		f2fs_drop_extent_tree(inode);

1377 1378
		ret = __exchange_data_block(inode, inode, idx,
					idx + delta, nr, false);
C
Chao Yu 已提交
1379 1380 1381
		f2fs_unlock_op(sbi);
	}

1382 1383 1384 1385 1386
	/* write out all moved pages, if possible */
	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	truncate_pagecache(inode, offset);

	if (!ret)
1387
		f2fs_i_size_write(inode, new_size);
1388 1389
out:
	up_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1390 1391 1392
	return ret;
}

J
Jaegeuk Kim 已提交
1393 1394 1395
static int expand_inode_data(struct inode *inode, loff_t offset,
					loff_t len, int mode)
{
1396
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1397 1398
	struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
	pgoff_t pg_end;
J
Jaegeuk Kim 已提交
1399
	loff_t new_size = i_size_read(inode);
1400
	loff_t off_end;
1401
	int err;
J
Jaegeuk Kim 已提交
1402

1403 1404 1405
	err = inode_newsize_ok(inode, (len + offset));
	if (err)
		return err;
J
Jaegeuk Kim 已提交
1406

1407 1408 1409
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
1410

J
Jaegeuk Kim 已提交
1411
	f2fs_balance_fs(sbi, true);
1412

1413
	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1414
	off_end = (offset + len) & (PAGE_SIZE - 1);
J
Jaegeuk Kim 已提交
1415

1416 1417 1418 1419
	map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
	map.m_len = pg_end - map.m_lblk;
	if (off_end)
		map.m_len++;
1420

1421 1422
	err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
	if (err) {
1423
		pgoff_t last_off;
J
Jaegeuk Kim 已提交
1424

1425
		if (!map.m_len)
1426
			return err;
1427

1428 1429 1430 1431 1432 1433 1434
		last_off = map.m_lblk + map.m_len - 1;

		/* update new size to the failed position */
		new_size = (last_off == pg_end) ? offset + len:
					(loff_t)(last_off + 1) << PAGE_SHIFT;
	} else {
		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
J
Jaegeuk Kim 已提交
1435 1436
	}

1437
	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
1438
		f2fs_i_size_write(inode, new_size);
J
Jaegeuk Kim 已提交
1439

1440
	return err;
J
Jaegeuk Kim 已提交
1441 1442 1443 1444 1445
}

static long f2fs_fallocate(struct file *file, int mode,
				loff_t offset, loff_t len)
{
A
Al Viro 已提交
1446
	struct inode *inode = file_inode(file);
1447
	long ret = 0;
J
Jaegeuk Kim 已提交
1448

1449 1450 1451 1452
	/* f2fs only support ->fallocate for regular file */
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

C
Chao Yu 已提交
1453 1454
	if (f2fs_encrypted_inode(inode) &&
		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1455 1456
		return -EOPNOTSUPP;

C
Chao Yu 已提交
1457
	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
C
Chao Yu 已提交
1458 1459
			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
			FALLOC_FL_INSERT_RANGE))
J
Jaegeuk Kim 已提交
1460 1461
		return -EOPNOTSUPP;

A
Al Viro 已提交
1462
	inode_lock(inode);
1463

1464 1465 1466 1467
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		if (offset >= inode->i_size)
			goto out;

C
Chao Yu 已提交
1468
		ret = punch_hole(inode, offset, len);
C
Chao Yu 已提交
1469 1470
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
		ret = f2fs_collapse_range(inode, offset, len);
C
Chao Yu 已提交
1471 1472
	} else if (mode & FALLOC_FL_ZERO_RANGE) {
		ret = f2fs_zero_range(inode, offset, len, mode);
C
Chao Yu 已提交
1473 1474
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
		ret = f2fs_insert_range(inode, offset, len);
C
Chao Yu 已提交
1475
	} else {
J
Jaegeuk Kim 已提交
1476
		ret = expand_inode_data(inode, offset, len, mode);
C
Chao Yu 已提交
1477
	}
J
Jaegeuk Kim 已提交
1478

1479
	if (!ret) {
1480
		inode->i_mtime = inode->i_ctime = current_time(inode);
1481
		f2fs_mark_inode_dirty_sync(inode, false);
1482 1483
		if (mode & FALLOC_FL_KEEP_SIZE)
			file_set_keep_isize(inode);
1484
		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1485
	}
1486

1487
out:
A
Al Viro 已提交
1488
	inode_unlock(inode);
1489

1490
	trace_f2fs_fallocate(inode, mode, offset, len, ret);
J
Jaegeuk Kim 已提交
1491 1492 1493
	return ret;
}

1494 1495
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
1496 1497 1498 1499 1500 1501 1502 1503
	/*
	 * f2fs_relase_file is called at every close calls. So we should
	 * not drop any inmemory pages by close called by other process.
	 */
	if (!(filp->f_mode & FMODE_WRITE) ||
			atomic_read(&inode->i_writecount) != 1)
		return 0;

1504 1505
	/* some remained atomic pages should discarded */
	if (f2fs_is_atomic_file(inode))
1506
		drop_inmem_pages(inode);
1507
	if (f2fs_is_volatile_file(inode)) {
1508
		clear_inode_flag(inode, FI_VOLATILE_FILE);
1509
		stat_dec_volatile_write(inode);
1510
		set_inode_flag(inode, FI_DROP_CACHE);
1511
		filemap_fdatawrite(inode->i_mapping);
1512
		clear_inode_flag(inode, FI_DROP_CACHE);
1513 1514 1515 1516
	}
	return 0;
}

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
static int f2fs_file_flush(struct file *file, fl_owner_t id)
{
	struct inode *inode = file_inode(file);

	/*
	 * If the process doing a transaction is crashed, we should do
	 * roll-back. Otherwise, other reader/write can see corrupted database
	 * until all the writers close its file. Since this should be done
	 * before dropping file lock, it needs to do in ->flush.
	 */
	if (f2fs_is_atomic_file(inode) &&
			F2FS_I(inode)->inmem_task == current)
		drop_inmem_pages(inode);
	return 0;
}

1533
static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
J
Jaegeuk Kim 已提交
1534
{
A
Al Viro 已提交
1535
	struct inode *inode = file_inode(filp);
J
Jaegeuk Kim 已提交
1536
	struct f2fs_inode_info *fi = F2FS_I(inode);
1537 1538
	unsigned int flags = fi->i_flags &
			(FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
1539 1540
	return put_user(flags, (int __user *)arg);
}
J
Jaegeuk Kim 已提交
1541

1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int oldflags;

	/* Is it quota file? Do not allow user to mess with it */
	if (IS_NOQUOTA(inode))
		return -EPERM;

	flags = f2fs_mask_flags(inode->i_mode, flags);

	oldflags = fi->i_flags;

	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL))
		if (!capable(CAP_LINUX_IMMUTABLE))
			return -EPERM;

	flags = flags & (FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL);
	flags |= oldflags & ~(FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL);
	fi->i_flags = flags;

	if (fi->i_flags & FS_PROJINHERIT_FL)
		set_inode_flag(inode, FI_PROJ_INHERIT);
	else
		clear_inode_flag(inode, FI_PROJ_INHERIT);

	inode->i_ctime = current_time(inode);
	f2fs_set_inode_flags(inode);
	f2fs_mark_inode_dirty_sync(inode, false);
	return 0;
}

1574 1575 1576
static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
1577
	unsigned int flags;
1578
	int ret;
J
Jaegeuk Kim 已提交
1579

1580 1581 1582 1583 1584 1585
	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (get_user(flags, (int __user *)arg))
		return -EFAULT;

1586 1587 1588
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;
J
Jaegeuk Kim 已提交
1589

A
Al Viro 已提交
1590
	inode_lock(inode);
J
Jaegeuk Kim 已提交
1591

1592
	ret = __f2fs_ioc_setflags(inode, flags);
J
Jaegeuk Kim 已提交
1593

1594
	inode_unlock(inode);
1595 1596 1597
	mnt_drop_write_file(filp);
	return ret;
}
1598

C
Chao Yu 已提交
1599 1600 1601 1602 1603 1604 1605
static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);

	return put_user(inode->i_generation, (int __user *)arg);
}

J
Jaegeuk Kim 已提交
1606 1607 1608
static int f2fs_ioc_start_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1609
	int ret;
J
Jaegeuk Kim 已提交
1610 1611 1612 1613

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1614 1615 1616
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

1617 1618 1619 1620
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1621 1622
	inode_lock(inode);

1623
	if (f2fs_is_atomic_file(inode))
1624
		goto out;
J
Jaegeuk Kim 已提交
1625

1626 1627
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
1628
		goto out;
J
Jaegeuk Kim 已提交
1629

1630
	set_inode_flag(inode, FI_ATOMIC_FILE);
1631
	set_inode_flag(inode, FI_HOT_DATA);
1632 1633
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);

1634
	if (!get_dirty_pages(inode))
1635
		goto inc_stat;
1636 1637

	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
1638
		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
1639 1640
					inode->i_ino, get_dirty_pages(inode));
	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1641
	if (ret) {
1642
		clear_inode_flag(inode, FI_ATOMIC_FILE);
C
Chao Yu 已提交
1643
		clear_inode_flag(inode, FI_HOT_DATA);
1644 1645 1646 1647
		goto out;
	}

inc_stat:
1648
	F2FS_I(inode)->inmem_task = current;
1649 1650
	stat_inc_atomic_write(inode);
	stat_update_max_atomic_write(inode);
1651
out:
1652
	inode_unlock(inode);
1653
	mnt_drop_write_file(filp);
1654
	return ret;
J
Jaegeuk Kim 已提交
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
}

static int f2fs_ioc_commit_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1669 1670
	inode_lock(inode);

1671 1672 1673
	if (f2fs_is_volatile_file(inode))
		goto err_out;

1674
	if (f2fs_is_atomic_file(inode)) {
1675
		ret = commit_inmem_pages(inode);
C
Chao Yu 已提交
1676
		if (ret)
1677
			goto err_out;
C
Chao Yu 已提交
1678

1679
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
C
Chao Yu 已提交
1680 1681
		if (!ret) {
			clear_inode_flag(inode, FI_ATOMIC_FILE);
C
Chao Yu 已提交
1682
			clear_inode_flag(inode, FI_HOT_DATA);
C
Chao Yu 已提交
1683
			stat_dec_atomic_write(inode);
1684
		}
1685
	} else {
1686
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
1687
	}
1688
err_out:
1689
	inode_unlock(inode);
J
Jaegeuk Kim 已提交
1690 1691 1692 1693
	mnt_drop_write_file(filp);
	return ret;
}

1694 1695 1696
static int f2fs_ioc_start_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1697
	int ret;
1698 1699 1700 1701

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1702 1703 1704
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

1705 1706 1707 1708
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1709 1710
	inode_lock(inode);

1711
	if (f2fs_is_volatile_file(inode))
1712
		goto out;
1713

1714 1715
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
1716
		goto out;
1717

1718 1719 1720
	stat_inc_volatile_write(inode);
	stat_update_max_volatile_write(inode);

1721
	set_inode_flag(inode, FI_VOLATILE_FILE);
1722
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1723
out:
1724
	inode_unlock(inode);
1725 1726
	mnt_drop_write_file(filp);
	return ret;
1727 1728
}

1729 1730 1731
static int f2fs_ioc_release_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1732
	int ret;
1733 1734 1735 1736

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1737 1738 1739 1740
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1741 1742
	inode_lock(inode);

1743
	if (!f2fs_is_volatile_file(inode))
1744
		goto out;
1745

1746 1747 1748 1749
	if (!f2fs_is_first_block_written(inode)) {
		ret = truncate_partial_data_page(inode, 0, true);
		goto out;
	}
1750

1751 1752
	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
out:
1753
	inode_unlock(inode);
1754 1755
	mnt_drop_write_file(filp);
	return ret;
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
}

static int f2fs_ioc_abort_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1770 1771
	inode_lock(inode);

1772
	if (f2fs_is_atomic_file(inode))
1773
		drop_inmem_pages(inode);
1774
	if (f2fs_is_volatile_file(inode)) {
1775
		clear_inode_flag(inode, FI_VOLATILE_FILE);
1776
		stat_dec_volatile_write(inode);
1777
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1778
	}
1779

1780 1781
	inode_unlock(inode);

1782
	mnt_drop_write_file(filp);
1783
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1784 1785 1786
	return ret;
}

J
Jaegeuk Kim 已提交
1787 1788 1789 1790 1791 1792
static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct super_block *sb = sbi->sb;
	__u32 in;
1793
	int ret;
J
Jaegeuk Kim 已提交
1794 1795 1796 1797 1798 1799 1800

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (get_user(in, (__u32 __user *)arg))
		return -EFAULT;

1801 1802 1803 1804
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
1805 1806 1807 1808
	switch (in) {
	case F2FS_GOING_DOWN_FULLSYNC:
		sb = freeze_bdev(sb->s_bdev);
		if (sb && !IS_ERR(sb)) {
1809
			f2fs_stop_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
1810 1811 1812 1813 1814 1815
			thaw_bdev(sb->s_bdev, sb);
		}
		break;
	case F2FS_GOING_DOWN_METASYNC:
		/* do checkpoint only */
		f2fs_sync_fs(sb, 1);
1816
		f2fs_stop_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
1817 1818
		break;
	case F2FS_GOING_DOWN_NOSYNC:
1819
		f2fs_stop_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
1820
		break;
1821
	case F2FS_GOING_DOWN_METAFLUSH:
C
Chao Yu 已提交
1822
		sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
1823
		f2fs_stop_checkpoint(sbi, false);
1824
		break;
J
Jaegeuk Kim 已提交
1825
	default:
1826 1827
		ret = -EINVAL;
		goto out;
J
Jaegeuk Kim 已提交
1828
	}
1829
	f2fs_update_time(sbi, REQ_TIME);
1830 1831 1832
out:
	mnt_drop_write_file(filp);
	return ret;
J
Jaegeuk Kim 已提交
1833 1834
}

1835 1836 1837 1838 1839 1840 1841
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct super_block *sb = inode->i_sb;
	struct request_queue *q = bdev_get_queue(sb->s_bdev);
	struct fstrim_range range;
	int ret;
1842

1843 1844
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
1845

1846 1847
	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;
1848

1849 1850 1851
	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
				sizeof(range)))
		return -EFAULT;
1852

1853 1854 1855 1856
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1857 1858 1859
	range.minlen = max((unsigned int)range.minlen,
				q->limits.discard_granularity);
	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
1860
	mnt_drop_write_file(filp);
1861 1862
	if (ret < 0)
		return ret;
1863

1864 1865 1866
	if (copy_to_user((struct fstrim_range __user *)arg, &range,
				sizeof(range)))
		return -EFAULT;
1867
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1868 1869 1870
	return 0;
}

1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
static bool uuid_is_nonzero(__u8 u[16])
{
	int i;

	for (i = 0; i < 16; i++)
		if (u[i])
			return true;
	return false;
}

static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);

1885
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1886

1887
	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
1888 1889 1890 1891
}

static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
1892
	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
}

static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	int err;

	if (!f2fs_sb_has_crypto(inode->i_sb))
		return -EOPNOTSUPP;

	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
		goto got_it;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	/* update superblock with uuid */
	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);

C
Chao Yu 已提交
1914
	err = f2fs_commit_super(sbi, false);
1915 1916 1917
	if (err) {
		/* undo new data */
		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
1918
		mnt_drop_write_file(filp);
1919 1920
		return err;
	}
1921
	mnt_drop_write_file(filp);
1922 1923 1924 1925 1926 1927 1928
got_it:
	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
									16))
		return -EFAULT;
	return 0;
}

1929 1930 1931 1932
static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
C
Chao Yu 已提交
1933
	__u32 sync;
1934
	int ret;
1935 1936 1937 1938

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

C
Chao Yu 已提交
1939
	if (get_user(sync, (__u32 __user *)arg))
1940 1941
		return -EFAULT;

C
Chao Yu 已提交
1942 1943
	if (f2fs_readonly(sbi->sb))
		return -EROFS;
1944

1945 1946 1947 1948
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

C
Chao Yu 已提交
1949
	if (!sync) {
1950 1951 1952 1953
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
C
Chao Yu 已提交
1954 1955
	} else {
		mutex_lock(&sbi->gc_mutex);
1956 1957
	}

1958
	ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
1959 1960 1961
out:
	mnt_drop_write_file(filp);
	return ret;
1962 1963
}

1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007
static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_gc_range range;
	u64 end;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
							sizeof(range)))
		return -EFAULT;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	end = range.start + range.len;
	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi))
		return -EINVAL;
do_more:
	if (!range.sync) {
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
	} else {
		mutex_lock(&sbi->gc_mutex);
	}

	ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
	range.start += sbi->blocks_per_seg;
	if (range.start <= end)
		goto do_more;
out:
	mnt_drop_write_file(filp);
	return ret;
}

2008 2009 2010 2011
static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2012
	int ret;
2013 2014 2015 2016 2017 2018 2019

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

2020 2021 2022 2023 2024 2025 2026 2027
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	ret = f2fs_sync_fs(sbi->sb, 1);

	mnt_drop_write_file(filp);
	return ret;
2028 2029
}

C
Chao Yu 已提交
2030 2031 2032 2033 2034
static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
					struct file *filp,
					struct f2fs_defragment *range)
{
	struct inode *inode = file_inode(filp);
2035
	struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
2036
	struct extent_info ei = {0,0,0};
C
Chao Yu 已提交
2037
	pgoff_t pg_start, pg_end;
2038
	unsigned int blk_per_seg = sbi->blocks_per_seg;
C
Chao Yu 已提交
2039 2040 2041 2042 2043 2044
	unsigned int total = 0, sec_num;
	block_t blk_end = 0;
	bool fragmented = false;
	int err;

	/* if in-place-update policy is enabled, don't waste time here */
2045
	if (need_inplace_update_policy(inode, NULL))
C
Chao Yu 已提交
2046 2047
		return -EINVAL;

2048 2049
	pg_start = range->start >> PAGE_SHIFT;
	pg_end = (range->start + range->len) >> PAGE_SHIFT;
C
Chao Yu 已提交
2050

J
Jaegeuk Kim 已提交
2051
	f2fs_balance_fs(sbi, true);
C
Chao Yu 已提交
2052

A
Al Viro 已提交
2053
	inode_lock(inode);
C
Chao Yu 已提交
2054 2055 2056

	/* writeback all dirty pages in the range */
	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2057
						range->start + range->len - 1);
C
Chao Yu 已提交
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
	if (err)
		goto out;

	/*
	 * lookup mapping info in extent cache, skip defragmenting if physical
	 * block addresses are continuous.
	 */
	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
		if (ei.fofs + ei.len >= pg_end)
			goto out;
	}

	map.m_lblk = pg_start;

	/*
	 * lookup mapping info in dnode page cache, skip defragmenting if all
	 * physical block addresses are continuous even if there are hole(s)
	 * in logical blocks.
	 */
	while (map.m_lblk < pg_end) {
F
Fan Li 已提交
2078
		map.m_len = pg_end - map.m_lblk;
2079
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
C
Chao Yu 已提交
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102
		if (err)
			goto out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

		if (blk_end && blk_end != map.m_pblk) {
			fragmented = true;
			break;
		}
		blk_end = map.m_pblk + map.m_len;

		map.m_lblk += map.m_len;
	}

	if (!fragmented)
		goto out;

	map.m_lblk = pg_start;
	map.m_len = pg_end - pg_start;

2103
	sec_num = (map.m_len + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
C
Chao Yu 已提交
2104 2105 2106 2107 2108 2109

	/*
	 * make sure there are enough free section for LFS allocation, this can
	 * avoid defragment running in SSR mode when free section are allocated
	 * intensively
	 */
2110
	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
C
Chao Yu 已提交
2111 2112 2113 2114 2115 2116 2117 2118 2119
		err = -EAGAIN;
		goto out;
	}

	while (map.m_lblk < pg_end) {
		pgoff_t idx;
		int cnt = 0;

do_map:
F
Fan Li 已提交
2120
		map.m_len = pg_end - map.m_lblk;
2121
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
C
Chao Yu 已提交
2122 2123 2124 2125 2126 2127 2128 2129
		if (err)
			goto clear_out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

2130
		set_inode_flag(inode, FI_DO_DEFRAG);
C
Chao Yu 已提交
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154

		idx = map.m_lblk;
		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
			struct page *page;

			page = get_lock_data_page(inode, idx, true);
			if (IS_ERR(page)) {
				err = PTR_ERR(page);
				goto clear_out;
			}

			set_page_dirty(page);
			f2fs_put_page(page, 1);

			idx++;
			cnt++;
			total++;
		}

		map.m_lblk = idx;

		if (idx < pg_end && cnt < blk_per_seg)
			goto do_map;

2155
		clear_inode_flag(inode, FI_DO_DEFRAG);
C
Chao Yu 已提交
2156 2157 2158 2159 2160 2161

		err = filemap_fdatawrite(inode->i_mapping);
		if (err)
			goto out;
	}
clear_out:
2162
	clear_inode_flag(inode, FI_DO_DEFRAG);
C
Chao Yu 已提交
2163
out:
A
Al Viro 已提交
2164
	inode_unlock(inode);
C
Chao Yu 已提交
2165
	if (!err)
2166
		range->len = (u64)total << PAGE_SHIFT;
C
Chao Yu 已提交
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
	return err;
}

static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_defragment range;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2180
	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
C
Chao Yu 已提交
2181 2182
		return -EINVAL;

2183 2184
	if (f2fs_readonly(sbi->sb))
		return -EROFS;
C
Chao Yu 已提交
2185 2186

	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2187 2188
							sizeof(range)))
		return -EFAULT;
C
Chao Yu 已提交
2189 2190

	/* verify alignment of offset & size */
2191 2192
	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
		return -EINVAL;
C
Chao Yu 已提交
2193

S
Sheng Yong 已提交
2194
	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2195 2196 2197 2198 2199 2200
					sbi->max_file_blocks))
		return -EINVAL;

	err = mnt_want_write_file(filp);
	if (err)
		return err;
S
Sheng Yong 已提交
2201

C
Chao Yu 已提交
2202
	err = f2fs_defragment_range(sbi, filp, &range);
2203 2204
	mnt_drop_write_file(filp);

2205
	f2fs_update_time(sbi, REQ_TIME);
C
Chao Yu 已提交
2206
	if (err < 0)
2207
		return err;
C
Chao Yu 已提交
2208 2209 2210

	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
							sizeof(range)))
2211 2212 2213
		return -EFAULT;

	return 0;
C
Chao Yu 已提交
2214 2215
}

2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
			struct file *file_out, loff_t pos_out, size_t len)
{
	struct inode *src = file_inode(file_in);
	struct inode *dst = file_inode(file_out);
	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
	size_t olen = len, dst_max_i_size = 0;
	size_t dst_osize;
	int ret;

	if (file_in->f_path.mnt != file_out->f_path.mnt ||
				src->i_sb != dst->i_sb)
		return -EXDEV;

	if (unlikely(f2fs_readonly(src->i_sb)))
		return -EROFS;

2233 2234
	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
		return -EINVAL;
2235 2236 2237 2238

	if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
		return -EOPNOTSUPP;

2239 2240 2241 2242 2243 2244 2245
	if (src == dst) {
		if (pos_in == pos_out)
			return 0;
		if (pos_out > pos_in && pos_out < pos_in + len)
			return -EINVAL;
	}

2246
	inode_lock(src);
2247 2248 2249 2250 2251 2252
	if (src != dst) {
		if (!inode_trylock(dst)) {
			ret = -EBUSY;
			goto out;
		}
	}
2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296

	ret = -EINVAL;
	if (pos_in + len > src->i_size || pos_in + len < pos_in)
		goto out_unlock;
	if (len == 0)
		olen = len = src->i_size - pos_in;
	if (pos_in + len == src->i_size)
		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
	if (len == 0) {
		ret = 0;
		goto out_unlock;
	}

	dst_osize = dst->i_size;
	if (pos_out + olen > dst->i_size)
		dst_max_i_size = pos_out + olen;

	/* verify the end result is block aligned */
	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
		goto out_unlock;

	ret = f2fs_convert_inline_inode(src);
	if (ret)
		goto out_unlock;

	ret = f2fs_convert_inline_inode(dst);
	if (ret)
		goto out_unlock;

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(src->i_mapping,
					pos_in, pos_in + len);
	if (ret)
		goto out_unlock;

	ret = filemap_write_and_wait_range(dst->i_mapping,
					pos_out, pos_out + len);
	if (ret)
		goto out_unlock;

	f2fs_balance_fs(sbi, true);
	f2fs_lock_op(sbi);
2297 2298 2299
	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
				pos_out >> F2FS_BLKSIZE_BITS,
				len >> F2FS_BLKSIZE_BITS, false);
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310

	if (!ret) {
		if (dst_max_i_size)
			f2fs_i_size_write(dst, dst_max_i_size);
		else if (dst_osize != dst->i_size)
			f2fs_i_size_write(dst, dst_osize);
	}
	f2fs_unlock_op(sbi);
out_unlock:
	if (src != dst)
		inode_unlock(dst);
2311
out:
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
	inode_unlock(src);
	return ret;
}

static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
{
	struct f2fs_move_range range;
	struct fd dst;
	int err;

	if (!(filp->f_mode & FMODE_READ) ||
			!(filp->f_mode & FMODE_WRITE))
		return -EBADF;

	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
							sizeof(range)))
		return -EFAULT;

	dst = fdget(range.dst_fd);
	if (!dst.file)
		return -EBADF;

	if (!(dst.file->f_mode & FMODE_WRITE)) {
		err = -EBADF;
		goto err_out;
	}

	err = mnt_want_write_file(filp);
	if (err)
		goto err_out;

	err = f2fs_move_file_range(filp, range.pos_in, dst.file,
					range.pos_out, range.len);

	mnt_drop_write_file(filp);
2347 2348
	if (err)
		goto err_out;
2349 2350 2351 2352 2353 2354 2355 2356 2357

	if (copy_to_user((struct f2fs_move_range __user *)arg,
						&range, sizeof(range)))
		err = -EFAULT;
err_out:
	fdput(dst);
	return err;
}

2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419
static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct sit_info *sm = SIT_I(sbi);
	unsigned int start_segno = 0, end_segno = 0;
	unsigned int dev_start_segno = 0, dev_end_segno = 0;
	struct f2fs_flush_device range;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
							sizeof(range)))
		return -EFAULT;

	if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
			sbi->segs_per_sec != 1) {
		f2fs_msg(sbi->sb, KERN_WARNING,
			"Can't flush %u in %d for segs_per_sec %u != 1\n",
				range.dev_num, sbi->s_ndevs,
				sbi->segs_per_sec);
		return -EINVAL;
	}

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	if (range.dev_num != 0)
		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);

	start_segno = sm->last_victim[FLUSH_DEVICE];
	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
		start_segno = dev_start_segno;
	end_segno = min(start_segno + range.segments, dev_end_segno);

	while (start_segno < end_segno) {
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
		sm->last_victim[GC_CB] = end_segno + 1;
		sm->last_victim[GC_GREEDY] = end_segno + 1;
		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
		ret = f2fs_gc(sbi, true, true, start_segno);
		if (ret == -EAGAIN)
			ret = 0;
		else if (ret < 0)
			break;
		start_segno++;
	}
out:
	mnt_drop_write_file(filp);
	return ret;
}

2420 2421 2422 2423 2424 2425 2426 2427 2428 2429
static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);

	/* Must validate to set it with SQLite behavior in Android. */
	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;

	return put_user(sb_feature, (u32 __user *)arg);
}
2430

2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
#ifdef CONFIG_QUOTA
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct super_block *sb = sbi->sb;
	struct dquot *transfer_to[MAXQUOTAS] = {};
	struct page *ipage;
	kprojid_t kprojid;
	int err;

	if (!f2fs_sb_has_project_quota(sb)) {
		if (projid != F2FS_DEF_PROJID)
			return -EOPNOTSUPP;
		else
			return 0;
	}

	if (!f2fs_has_extra_attr(inode))
		return -EOPNOTSUPP;

	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);

	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
		return 0;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	err = -EPERM;
	inode_lock(inode);

	/* Is it quota file? Do not allow user to mess with it */
	if (IS_NOQUOTA(inode))
		goto out_unlock;

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out_unlock;
	}

	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
								i_projid)) {
		err = -EOVERFLOW;
		f2fs_put_page(ipage, 1);
		goto out_unlock;
	}
	f2fs_put_page(ipage, 1);

	dquot_initialize(inode);

	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
	if (!IS_ERR(transfer_to[PRJQUOTA])) {
		err = __dquot_transfer(inode, transfer_to);
		dqput(transfer_to[PRJQUOTA]);
		if (err)
			goto out_dirty;
	}

	F2FS_I(inode)->i_projid = kprojid;
	inode->i_ctime = current_time(inode);
out_dirty:
	f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	return err;
}
#else
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
	if (projid != F2FS_DEF_PROJID)
		return -EOPNOTSUPP;
	return 0;
}
#endif

/* Transfer internal flags to xflags */
static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags)
{
	__u32 xflags = 0;

	if (iflags & FS_SYNC_FL)
		xflags |= FS_XFLAG_SYNC;
	if (iflags & FS_IMMUTABLE_FL)
		xflags |= FS_XFLAG_IMMUTABLE;
	if (iflags & FS_APPEND_FL)
		xflags |= FS_XFLAG_APPEND;
	if (iflags & FS_NODUMP_FL)
		xflags |= FS_XFLAG_NODUMP;
	if (iflags & FS_NOATIME_FL)
		xflags |= FS_XFLAG_NOATIME;
	if (iflags & FS_PROJINHERIT_FL)
		xflags |= FS_XFLAG_PROJINHERIT;
	return xflags;
}

#define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
				  FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
				  FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)

/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
#define F2FS_FL_XFLAG_VISIBLE		(FS_SYNC_FL | \
					 FS_IMMUTABLE_FL | \
					 FS_APPEND_FL | \
					 FS_NODUMP_FL | \
					 FS_NOATIME_FL | \
					 FS_PROJINHERIT_FL)

/* Transfer xflags flags to internal */
static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags)
{
	unsigned long iflags = 0;

	if (xflags & FS_XFLAG_SYNC)
		iflags |= FS_SYNC_FL;
	if (xflags & FS_XFLAG_IMMUTABLE)
		iflags |= FS_IMMUTABLE_FL;
	if (xflags & FS_XFLAG_APPEND)
		iflags |= FS_APPEND_FL;
	if (xflags & FS_XFLAG_NODUMP)
		iflags |= FS_NODUMP_FL;
	if (xflags & FS_XFLAG_NOATIME)
		iflags |= FS_NOATIME_FL;
	if (xflags & FS_XFLAG_PROJINHERIT)
		iflags |= FS_PROJINHERIT_FL;

	return iflags;
}

static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct fsxattr fa;

	memset(&fa, 0, sizeof(struct fsxattr));
	fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
				(FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL));

	if (f2fs_sb_has_project_quota(inode->i_sb))
		fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
							fi->i_projid);

	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
		return -EFAULT;
	return 0;
}

static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct fsxattr fa;
	unsigned int flags;
	int err;

	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
		return -EFAULT;

	/* Make sure caller has proper permission */
	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS)
		return -EOPNOTSUPP;

	flags = f2fs_xflags_to_iflags(fa.fsx_xflags);
	if (f2fs_mask_flags(inode->i_mode, flags) != flags)
		return -EOPNOTSUPP;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	inode_lock(inode);
	flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) |
				(flags & F2FS_FL_XFLAG_VISIBLE);
	err = __f2fs_ioc_setflags(inode, flags);
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	if (err)
		return err;

	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
	if (err)
		return err;

	return 0;
}

2625 2626 2627 2628 2629 2630 2631
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case F2FS_IOC_GETFLAGS:
		return f2fs_ioc_getflags(filp, arg);
	case F2FS_IOC_SETFLAGS:
		return f2fs_ioc_setflags(filp, arg);
C
Chao Yu 已提交
2632 2633
	case F2FS_IOC_GETVERSION:
		return f2fs_ioc_getversion(filp, arg);
J
Jaegeuk Kim 已提交
2634 2635 2636 2637
	case F2FS_IOC_START_ATOMIC_WRITE:
		return f2fs_ioc_start_atomic_write(filp);
	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
		return f2fs_ioc_commit_atomic_write(filp);
2638 2639
	case F2FS_IOC_START_VOLATILE_WRITE:
		return f2fs_ioc_start_volatile_write(filp);
2640 2641 2642 2643
	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
		return f2fs_ioc_release_volatile_write(filp);
	case F2FS_IOC_ABORT_VOLATILE_WRITE:
		return f2fs_ioc_abort_volatile_write(filp);
J
Jaegeuk Kim 已提交
2644 2645
	case F2FS_IOC_SHUTDOWN:
		return f2fs_ioc_shutdown(filp, arg);
2646 2647
	case FITRIM:
		return f2fs_ioc_fitrim(filp, arg);
2648 2649 2650 2651 2652 2653
	case F2FS_IOC_SET_ENCRYPTION_POLICY:
		return f2fs_ioc_set_encryption_policy(filp, arg);
	case F2FS_IOC_GET_ENCRYPTION_POLICY:
		return f2fs_ioc_get_encryption_policy(filp, arg);
	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
2654 2655
	case F2FS_IOC_GARBAGE_COLLECT:
		return f2fs_ioc_gc(filp, arg);
2656 2657
	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
		return f2fs_ioc_gc_range(filp, arg);
2658 2659
	case F2FS_IOC_WRITE_CHECKPOINT:
		return f2fs_ioc_write_checkpoint(filp, arg);
C
Chao Yu 已提交
2660 2661
	case F2FS_IOC_DEFRAGMENT:
		return f2fs_ioc_defragment(filp, arg);
2662 2663
	case F2FS_IOC_MOVE_RANGE:
		return f2fs_ioc_move_range(filp, arg);
2664 2665
	case F2FS_IOC_FLUSH_DEVICE:
		return f2fs_ioc_flush_device(filp, arg);
2666 2667
	case F2FS_IOC_GET_FEATURES:
		return f2fs_ioc_get_features(filp, arg);
2668 2669 2670 2671
	case F2FS_IOC_FSGETXATTR:
		return f2fs_ioc_fsgetxattr(filp, arg);
	case F2FS_IOC_FSSETXATTR:
		return f2fs_ioc_fssetxattr(filp, arg);
J
Jaegeuk Kim 已提交
2672 2673 2674 2675 2676
	default:
		return -ENOTTY;
	}
}

2677 2678
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
2679 2680
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
2681
	struct blk_plug plug;
2682
	ssize_t ret;
2683

2684 2685 2686
	inode_lock(inode);
	ret = generic_write_checks(iocb, from);
	if (ret > 0) {
2687
		int err;
2688

2689 2690
		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
			set_inode_flag(inode, FI_NO_PREALLOC);
2691

2692
		err = f2fs_preallocate_blocks(iocb, from);
2693 2694 2695
		if (err) {
			inode_unlock(inode);
			return err;
2696
		}
2697 2698 2699
		blk_start_plug(&plug);
		ret = __generic_file_write_iter(iocb, from);
		blk_finish_plug(&plug);
2700
		clear_inode_flag(inode, FI_NO_PREALLOC);
C
Chao Yu 已提交
2701 2702 2703

		if (ret > 0)
			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
2704 2705 2706
	}
	inode_unlock(inode);

2707 2708
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
2709
	return ret;
2710 2711
}

2712 2713 2714 2715 2716 2717 2718 2719 2720 2721
#ifdef CONFIG_COMPAT
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case F2FS_IOC32_GETFLAGS:
		cmd = F2FS_IOC_GETFLAGS;
		break;
	case F2FS_IOC32_SETFLAGS:
		cmd = F2FS_IOC_SETFLAGS;
		break;
2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734
	case F2FS_IOC32_GETVERSION:
		cmd = F2FS_IOC_GETVERSION;
		break;
	case F2FS_IOC_START_ATOMIC_WRITE:
	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
	case F2FS_IOC_START_VOLATILE_WRITE:
	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
	case F2FS_IOC_ABORT_VOLATILE_WRITE:
	case F2FS_IOC_SHUTDOWN:
	case F2FS_IOC_SET_ENCRYPTION_POLICY:
	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
	case F2FS_IOC_GET_ENCRYPTION_POLICY:
	case F2FS_IOC_GARBAGE_COLLECT:
2735
	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
2736 2737
	case F2FS_IOC_WRITE_CHECKPOINT:
	case F2FS_IOC_DEFRAGMENT:
2738
	case F2FS_IOC_MOVE_RANGE:
2739
	case F2FS_IOC_FLUSH_DEVICE:
2740
	case F2FS_IOC_GET_FEATURES:
2741 2742
	case F2FS_IOC_FSGETXATTR:
	case F2FS_IOC_FSSETXATTR:
2743
		break;
2744 2745 2746 2747 2748 2749 2750
	default:
		return -ENOIOCTLCMD;
	}
	return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif

J
Jaegeuk Kim 已提交
2751
const struct file_operations f2fs_file_operations = {
2752
	.llseek		= f2fs_llseek,
2753
	.read_iter	= generic_file_read_iter,
2754 2755
	.write_iter	= f2fs_file_write_iter,
	.open		= f2fs_file_open,
2756
	.release	= f2fs_release_file,
J
Jaegeuk Kim 已提交
2757
	.mmap		= f2fs_file_mmap,
2758
	.flush		= f2fs_file_flush,
J
Jaegeuk Kim 已提交
2759 2760 2761
	.fsync		= f2fs_sync_file,
	.fallocate	= f2fs_fallocate,
	.unlocked_ioctl	= f2fs_ioctl,
2762 2763 2764
#ifdef CONFIG_COMPAT
	.compat_ioctl	= f2fs_compat_ioctl,
#endif
J
Jaegeuk Kim 已提交
2765
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
2766
	.splice_write	= iter_file_splice_write,
J
Jaegeuk Kim 已提交
2767
};