file.c 64.1 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
J
Jaegeuk Kim 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 * fs/f2fs/file.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
16
#include <linux/blkdev.h>
J
Jaegeuk Kim 已提交
17 18
#include <linux/falloc.h>
#include <linux/types.h>
19
#include <linux/compat.h>
J
Jaegeuk Kim 已提交
20 21
#include <linux/uaccess.h>
#include <linux/mount.h>
22
#include <linux/pagevec.h>
23
#include <linux/uio.h>
24
#include <linux/uuid.h>
25
#include <linux/file.h>
J
Jaegeuk Kim 已提交
26 27 28 29 30 31

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "acl.h"
32
#include "gc.h"
J
Jaegeuk Kim 已提交
33
#include "trace.h"
34
#include <trace/events/f2fs.h>
J
Jaegeuk Kim 已提交
35

36 37 38 39 40 41 42 43 44 45 46 47
static int f2fs_filemap_fault(struct vm_fault *vmf)
{
	struct inode *inode = file_inode(vmf->vma->vm_file);
	int err;

	down_read(&F2FS_I(inode)->i_mmap_sem);
	err = filemap_fault(vmf);
	up_read(&F2FS_I(inode)->i_mmap_sem);

	return err;
}

48
static int f2fs_vm_page_mkwrite(struct vm_fault *vmf)
J
Jaegeuk Kim 已提交
49 50
{
	struct page *page = vmf->page;
51
	struct inode *inode = file_inode(vmf->vma->vm_file);
52
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
53
	struct dnode_of_data dn;
54
	int err;
J
Jaegeuk Kim 已提交
55 56

	sb_start_pagefault(inode->i_sb);
57 58

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
59

J
Jaegeuk Kim 已提交
60
	/* block allocation */
61
	f2fs_lock_op(sbi);
62
	set_new_dnode(&dn, inode, NULL, NULL, 0);
63
	err = f2fs_reserve_block(&dn, page->index);
64 65
	if (err) {
		f2fs_unlock_op(sbi);
66
		goto out;
67 68 69
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
J
Jaegeuk Kim 已提交
70

J
Jaegeuk Kim 已提交
71
	f2fs_balance_fs(sbi, dn.node_changed);
72

73
	file_update_time(vmf->vma->vm_file);
74
	down_read(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
75
	lock_page(page);
76
	if (unlikely(page->mapping != inode->i_mapping ||
77
			page_offset(page) > i_size_read(inode) ||
78
			!PageUptodate(page))) {
J
Jaegeuk Kim 已提交
79 80
		unlock_page(page);
		err = -EFAULT;
81
		goto out_sem;
J
Jaegeuk Kim 已提交
82 83 84 85 86 87
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
88
		goto mapped;
J
Jaegeuk Kim 已提交
89 90

	/* page is wholly or partially inside EOF */
91
	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
C
Chao Yu 已提交
92
						i_size_read(inode)) {
J
Jaegeuk Kim 已提交
93
		unsigned offset;
94 95
		offset = i_size_read(inode) & ~PAGE_MASK;
		zero_user_segment(page, offset, PAGE_SIZE);
J
Jaegeuk Kim 已提交
96 97
	}
	set_page_dirty(page);
98 99
	if (!PageUptodate(page))
		SetPageUptodate(page);
J
Jaegeuk Kim 已提交
100

C
Chao Yu 已提交
101 102
	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);

103
	trace_f2fs_vm_page_mkwrite(page, DATA);
104 105
mapped:
	/* fill the page */
106
	f2fs_wait_on_page_writeback(page, DATA, false);
107 108

	/* wait for GCed encrypted page writeback */
109
	if (f2fs_encrypted_file(inode))
110
		f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr);
111

112 113
out_sem:
	up_read(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
114 115
out:
	sb_end_pagefault(inode->i_sb);
116
	f2fs_update_time(sbi, REQ_TIME);
J
Jaegeuk Kim 已提交
117 118 119 120
	return block_page_mkwrite_return(err);
}

static const struct vm_operations_struct f2fs_file_vm_ops = {
121
	.fault		= f2fs_filemap_fault,
122
	.map_pages	= filemap_map_pages,
123
	.page_mkwrite	= f2fs_vm_page_mkwrite,
J
Jaegeuk Kim 已提交
124 125
};

126 127 128 129 130 131 132 133 134 135
static int get_parent_ino(struct inode *inode, nid_t *pino)
{
	struct dentry *dentry;

	inode = igrab(inode);
	dentry = d_find_any_alias(inode);
	iput(inode);
	if (!dentry)
		return 0;

136 137
	*pino = parent_ino(dentry);
	dput(dentry);
138 139 140
	return 1;
}

141 142
static inline bool need_do_checkpoint(struct inode *inode)
{
143
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
144 145 146 147
	bool need_cp = false;

	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
		need_cp = true;
148
	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
149
		need_cp = true;
150 151 152 153 154 155
	else if (file_wrong_pino(inode))
		need_cp = true;
	else if (!space_for_roll_forward(sbi))
		need_cp = true;
	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
		need_cp = true;
156 157
	else if (test_opt(sbi, FASTBOOT))
		need_cp = true;
158 159
	else if (sbi->active_logs == 2)
		need_cp = true;
160 161 162 163

	return need_cp;
}

164 165 166 167 168 169 170 171 172 173 174
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
	bool ret = false;
	/* But we need to avoid that there are some inode updates */
	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
		ret = true;
	f2fs_put_page(i, 0);
	return ret;
}

175 176 177 178 179 180 181 182
static void try_to_fix_pino(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t pino;

	down_write(&fi->i_sem);
	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
			get_parent_ino(inode, &pino)) {
183
		f2fs_i_pino_write(inode, pino);
184 185
		file_got_pino(inode);
	}
186
	up_write(&fi->i_sem);
187 188
}

189 190
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
						int datasync, bool atomic)
J
Jaegeuk Kim 已提交
191 192
{
	struct inode *inode = file->f_mapping->host;
193
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
194
	nid_t ino = inode->i_ino;
J
Jaegeuk Kim 已提交
195 196 197
	int ret = 0;
	bool need_cp = false;
	struct writeback_control wbc = {
198
		.sync_mode = WB_SYNC_ALL,
J
Jaegeuk Kim 已提交
199 200 201 202
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};

203
	if (unlikely(f2fs_readonly(inode->i_sb)))
204 205
		return 0;

206
	trace_f2fs_sync_file_enter(inode);
207 208

	/* if fdatasync is triggered, let's do in-place-update */
J
Jaegeuk Kim 已提交
209
	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
210
		set_inode_flag(inode, FI_NEED_IPU);
211
	ret = file_write_and_wait_range(file, start, end);
212
	clear_inode_flag(inode, FI_NEED_IPU);
213

214 215
	if (ret) {
		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
J
Jaegeuk Kim 已提交
216
		return ret;
217
	}
J
Jaegeuk Kim 已提交
218

219
	/* if the inode is dirty, let's recover all the time */
C
Chao Yu 已提交
220
	if (!f2fs_skip_inode_update(inode, datasync)) {
221
		f2fs_write_inode(inode, NULL);
222 223 224
		goto go_write;
	}

225 226 227
	/*
	 * if there is no written data, don't waste time to write recovery info.
	 */
228
	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
J
Jaegeuk Kim 已提交
229
			!exist_written_data(sbi, ino, APPEND_INO)) {
230

231 232
		/* it may call write_inode just prior to fsync */
		if (need_inode_page_update(sbi, ino))
233 234
			goto go_write;

235
		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
J
Jaegeuk Kim 已提交
236
				exist_written_data(sbi, ino, UPDATE_INO))
237 238 239
			goto flush_out;
		goto out;
	}
240
go_write:
241 242 243 244
	/*
	 * Both of fdatasync() and fsync() are able to be recovered from
	 * sudden-power-off.
	 */
245
	down_read(&F2FS_I(inode)->i_sem);
246
	need_cp = need_do_checkpoint(inode);
247
	up_read(&F2FS_I(inode)->i_sem);
248

J
Jaegeuk Kim 已提交
249 250 251
	if (need_cp) {
		/* all the dirty node pages should be flushed for POR */
		ret = f2fs_sync_fs(inode->i_sb, 1);
252

253 254 255 256 257
		/*
		 * We've secured consistency through sync_fs. Following pino
		 * will be used only for fsynced inodes after checkpoint.
		 */
		try_to_fix_pino(inode);
258 259
		clear_inode_flag(inode, FI_APPEND_WRITE);
		clear_inode_flag(inode, FI_UPDATE_WRITE);
260 261
		goto out;
	}
262
sync_nodes:
263
	ret = fsync_node_pages(sbi, inode, &wbc, atomic);
264 265
	if (ret)
		goto out;
266

267
	/* if cp_error was enabled, we should avoid infinite loop */
C
Chao Yu 已提交
268 269
	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
270
		goto out;
C
Chao Yu 已提交
271
	}
272

273
	if (need_inode_block_update(sbi, ino)) {
274
		f2fs_mark_inode_dirty_sync(inode, true);
275 276
		f2fs_write_inode(inode, NULL);
		goto sync_nodes;
J
Jaegeuk Kim 已提交
277
	}
278

279 280 281 282 283 284 285 286 287 288 289 290 291
	/*
	 * If it's atomic_write, it's just fine to keep write ordering. So
	 * here we don't need to wait for node write completion, since we use
	 * node chain which serializes node blocks. If one of node writes are
	 * reordered, we can see simply broken chain, resulting in stopping
	 * roll-forward recovery. It means we'll recover all or none node blocks
	 * given fsync mark.
	 */
	if (!atomic) {
		ret = wait_on_node_pages_writeback(sbi, ino);
		if (ret)
			goto out;
	}
292 293

	/* once recovery info is written, don't need to tack this */
294
	remove_ino_entry(sbi, ino, APPEND_INO);
295
	clear_inode_flag(inode, FI_APPEND_WRITE);
296
flush_out:
297
	remove_ino_entry(sbi, ino, UPDATE_INO);
298
	clear_inode_flag(inode, FI_UPDATE_WRITE);
299 300
	if (!atomic)
		ret = f2fs_issue_flush(sbi);
301
	f2fs_update_time(sbi, REQ_TIME);
J
Jaegeuk Kim 已提交
302
out:
303
	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
304
	f2fs_trace_ios(NULL, 1);
J
Jaegeuk Kim 已提交
305 306 307
	return ret;
}

308 309 310 311 312
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
	return f2fs_do_sync_file(file, start, end, datasync, false);
}

313 314 315
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
						pgoff_t pgofs, int whence)
{
316
	struct page *page;
317 318 319 320 321 322
	int nr_pages;

	if (whence != SEEK_DATA)
		return 0;

	/* find first dirty page index */
323 324 325 326 327 328
	nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
				      1, &page);
	if (!nr_pages)
		return ULONG_MAX;
	pgofs = page->index;
	put_page(page);
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
	return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
							int whence)
{
	switch (whence) {
	case SEEK_DATA:
		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
			return true;
		break;
	case SEEK_HOLE:
		if (blkaddr == NULL_ADDR)
			return true;
		break;
	}
	return false;
}

349 350 351 352 353
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;
	struct dnode_of_data dn;
354 355 356
	pgoff_t pgofs, end_offset, dirty;
	loff_t data_ofs = offset;
	loff_t isize;
357 358
	int err = 0;

A
Al Viro 已提交
359
	inode_lock(inode);
360 361 362 363 364 365

	isize = i_size_read(inode);
	if (offset >= isize)
		goto fail;

	/* handle inline data case */
C
Chao Yu 已提交
366
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
367 368 369 370 371
		if (whence == SEEK_HOLE)
			data_ofs = isize;
		goto found;
	}

372
	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
373

374 375
	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

376
	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
377
		set_new_dnode(&dn, inode, NULL, NULL, 0);
378
		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
379 380 381
		if (err && err != -ENOENT) {
			goto fail;
		} else if (err == -ENOENT) {
A
arter97 已提交
382
			/* direct node does not exists */
383
			if (whence == SEEK_DATA) {
384
				pgofs = get_next_page_offset(&dn, pgofs);
385 386 387 388 389 390
				continue;
			} else {
				goto found;
			}
		}

391
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
392 393 394 395

		/* find data/hole in dnode block */
		for (; dn.ofs_in_node < end_offset;
				dn.ofs_in_node++, pgofs++,
396
				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
397
			block_t blkaddr;
398 399
			blkaddr = datablock_addr(dn.inode,
					dn.node_page, dn.ofs_in_node);
400

401
			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
402 403 404 405 406 407 408 409 410 411
				f2fs_put_dnode(&dn);
				goto found;
			}
		}
		f2fs_put_dnode(&dn);
	}

	if (whence == SEEK_DATA)
		goto fail;
found:
412 413
	if (whence == SEEK_HOLE && data_ofs > isize)
		data_ofs = isize;
A
Al Viro 已提交
414
	inode_unlock(inode);
415 416
	return vfs_setpos(file, data_ofs, maxbytes);
fail:
A
Al Viro 已提交
417
	inode_unlock(inode);
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;

	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
		return generic_file_llseek_size(file, offset, whence,
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
	case SEEK_HOLE:
434 435
		if (offset < 0)
			return -ENXIO;
436 437 438 439 440 441
		return f2fs_seek_block(file, offset, whence);
	}

	return -EINVAL;
}

J
Jaegeuk Kim 已提交
442 443
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
444
	struct inode *inode = file_inode(file);
445
	int err;
446 447

	/* we don't need to use inline_data strictly */
448 449 450
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
451

J
Jaegeuk Kim 已提交
452 453 454 455 456
	file_accessed(file);
	vma->vm_ops = &f2fs_file_vm_ops;
	return 0;
}

457 458
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
459
	struct dentry *dir;
460

C
Chao Yu 已提交
461 462
	if (f2fs_encrypted_inode(inode)) {
		int ret = fscrypt_get_encryption_info(inode);
463
		if (ret)
464
			return -EACCES;
465
		if (!fscrypt_has_encryption_key(inode))
466
			return -ENOKEY;
467
	}
468 469 470 471
	dir = dget_parent(file_dentry(filp));
	if (f2fs_encrypted_inode(d_inode(dir)) &&
			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
		dput(dir);
472
		return -EPERM;
473 474
	}
	dput(dir);
C
Chao Yu 已提交
475
	return dquot_file_open(inode, filp);
476 477
}

478
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
J
Jaegeuk Kim 已提交
479
{
480
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
J
Jaegeuk Kim 已提交
481
	struct f2fs_node *raw_node;
C
Chao Yu 已提交
482
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
J
Jaegeuk Kim 已提交
483
	__le32 *addr;
484 485 486 487
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
J
Jaegeuk Kim 已提交
488

489
	raw_node = F2FS_NODE(dn->node_page);
490
	addr = blkaddr_in_node(raw_node) + base + ofs;
J
Jaegeuk Kim 已提交
491

C
Chris Fries 已提交
492
	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
J
Jaegeuk Kim 已提交
493 494 495 496
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

J
Jaegeuk Kim 已提交
497
		dn->data_blkaddr = NULL_ADDR;
498
		set_data_blkaddr(dn);
J
Jaegeuk Kim 已提交
499
		invalidate_blocks(sbi, blkaddr);
500
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
501
			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
J
Jaegeuk Kim 已提交
502 503
		nr_free++;
	}
C
Chao Yu 已提交
504

J
Jaegeuk Kim 已提交
505
	if (nr_free) {
C
Chao Yu 已提交
506 507 508 509 510 511
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
512
							dn->inode) + ofs;
C
Chao Yu 已提交
513
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
514
		dec_valid_block_count(sbi, dn->inode, nr_free);
J
Jaegeuk Kim 已提交
515 516
	}
	dn->ofs_in_node = ofs;
517

518
	f2fs_update_time(sbi, REQ_TIME);
519 520
	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
J
Jaegeuk Kim 已提交
521 522 523 524 525 526 527 528
	return nr_free;
}

void truncate_data_blocks(struct dnode_of_data *dn)
{
	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}

529
static int truncate_partial_data_page(struct inode *inode, u64 from,
530
								bool cache_only)
J
Jaegeuk Kim 已提交
531
{
532 533
	unsigned offset = from & (PAGE_SIZE - 1);
	pgoff_t index = from >> PAGE_SHIFT;
534
	struct address_space *mapping = inode->i_mapping;
J
Jaegeuk Kim 已提交
535 536
	struct page *page;

537
	if (!offset && !cache_only)
538
		return 0;
J
Jaegeuk Kim 已提交
539

540
	if (cache_only) {
541
		page = find_lock_page(mapping, index);
542 543 544
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
545
		return 0;
546
	}
J
Jaegeuk Kim 已提交
547

548
	page = get_lock_data_page(inode, index, true);
549
	if (IS_ERR(page))
550
		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
551
truncate_out:
552
	f2fs_wait_on_page_writeback(page, DATA, true);
553
	zero_user(page, offset, PAGE_SIZE - offset);
554 555 556 557

	/* An encrypted inode should have a key and truncate the last page. */
	f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
	if (!cache_only)
558
		set_page_dirty(page);
J
Jaegeuk Kim 已提交
559
	f2fs_put_page(page, 1);
560
	return 0;
J
Jaegeuk Kim 已提交
561 562
}

563
int truncate_blocks(struct inode *inode, u64 from, bool lock)
J
Jaegeuk Kim 已提交
564
{
565
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
566 567 568
	unsigned int blocksize = inode->i_sb->s_blocksize;
	struct dnode_of_data dn;
	pgoff_t free_from;
H
Huajun Li 已提交
569
	int count = 0, err = 0;
570
	struct page *ipage;
571
	bool truncate_page = false;
J
Jaegeuk Kim 已提交
572

573 574
	trace_f2fs_truncate_blocks_enter(inode, from);

575
	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
J
Jaegeuk Kim 已提交
576

577 578 579
	if (free_from >= sbi->max_file_blocks)
		goto free_partial;

580 581
	if (lock)
		f2fs_lock_op(sbi);
H
Huajun Li 已提交
582

583 584 585 586 587 588 589
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	if (f2fs_has_inline_data(inode)) {
590
		truncate_inline_inode(inode, ipage, from);
591
		f2fs_put_page(ipage, 1);
592
		truncate_page = true;
593 594 595 596
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, NULL, 0);
597
	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
J
Jaegeuk Kim 已提交
598 599 600
	if (err) {
		if (err == -ENOENT)
			goto free_next;
601
		goto out;
602 603
	}

604
	count = ADDRS_PER_PAGE(dn.node_page, inode);
J
Jaegeuk Kim 已提交
605 606

	count -= dn.ofs_in_node;
607
	f2fs_bug_on(sbi, count < 0);
608

J
Jaegeuk Kim 已提交
609 610 611 612 613 614 615 616
	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
		truncate_data_blocks_range(&dn, count);
		free_from += count;
	}

	f2fs_put_dnode(&dn);
free_next:
	err = truncate_inode_blocks(inode, free_from);
617 618 619
out:
	if (lock)
		f2fs_unlock_op(sbi);
620
free_partial:
621 622
	/* lastly zero out the first data page */
	if (!err)
623
		err = truncate_partial_data_page(inode, from, truncate_page);
J
Jaegeuk Kim 已提交
624

625
	trace_f2fs_truncate_blocks_exit(inode, err);
J
Jaegeuk Kim 已提交
626 627 628
	return err;
}

629
int f2fs_truncate(struct inode *inode)
J
Jaegeuk Kim 已提交
630
{
631 632
	int err;

J
Jaegeuk Kim 已提交
633 634
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode)))
635
		return 0;
J
Jaegeuk Kim 已提交
636

637 638
	trace_f2fs_truncate(inode);

639 640 641 642 643 644
#ifdef CONFIG_F2FS_FAULT_INJECTION
	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
		f2fs_show_injection_info(FAULT_TRUNCATE);
		return -EIO;
	}
#endif
645
	/* we should check inline_data size */
646
	if (!f2fs_may_inline_data(inode)) {
647 648 649
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
650 651
	}

652
	err = truncate_blocks(inode, i_size_read(inode), true);
653 654 655
	if (err)
		return err;

656
	inode->i_mtime = inode->i_ctime = current_time(inode);
657
	f2fs_mark_inode_dirty_sync(inode, false);
658
	return 0;
J
Jaegeuk Kim 已提交
659 660
}

661
int f2fs_getattr(const struct path *path, struct kstat *stat,
C
Chao Yu 已提交
662
		 u32 request_mask, unsigned int query_flags)
J
Jaegeuk Kim 已提交
663
{
664
	struct inode *inode = d_inode(path->dentry);
C
Chao Yu 已提交
665 666 667
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int flags;

668
	flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
C
Chao Yu 已提交
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
	if (flags & FS_APPEND_FL)
		stat->attributes |= STATX_ATTR_APPEND;
	if (flags & FS_COMPR_FL)
		stat->attributes |= STATX_ATTR_COMPRESSED;
	if (f2fs_encrypted_inode(inode))
		stat->attributes |= STATX_ATTR_ENCRYPTED;
	if (flags & FS_IMMUTABLE_FL)
		stat->attributes |= STATX_ATTR_IMMUTABLE;
	if (flags & FS_NODUMP_FL)
		stat->attributes |= STATX_ATTR_NODUMP;

	stat->attributes_mask |= (STATX_ATTR_APPEND |
				  STATX_ATTR_COMPRESSED |
				  STATX_ATTR_ENCRYPTED |
				  STATX_ATTR_IMMUTABLE |
				  STATX_ATTR_NODUMP);

J
Jaegeuk Kim 已提交
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
	generic_fillattr(inode, stat);
	return 0;
}

#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
	unsigned int ia_valid = attr->ia_valid;

	if (ia_valid & ATTR_UID)
		inode->i_uid = attr->ia_uid;
	if (ia_valid & ATTR_GID)
		inode->i_gid = attr->ia_gid;
	if (ia_valid & ATTR_ATIME)
		inode->i_atime = timespec_trunc(attr->ia_atime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MTIME)
		inode->i_mtime = timespec_trunc(attr->ia_mtime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_CTIME)
		inode->i_ctime = timespec_trunc(attr->ia_ctime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MODE) {
		umode_t mode = attr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;
713
		set_acl_inode(inode, mode);
J
Jaegeuk Kim 已提交
714 715 716 717 718 719 720 721
	}
}
#else
#define __setattr_copy setattr_copy
#endif

int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
722
	struct inode *inode = d_inode(dentry);
J
Jaegeuk Kim 已提交
723
	int err;
724
	bool size_changed = false;
J
Jaegeuk Kim 已提交
725

726
	err = setattr_prepare(dentry, attr);
J
Jaegeuk Kim 已提交
727 728 729
	if (err)
		return err;

C
Chao Yu 已提交
730 731 732 733 734 735 736 737 738 739 740 741 742 743
	if (is_quota_modification(inode, attr)) {
		err = dquot_initialize(inode);
		if (err)
			return err;
	}
	if ((attr->ia_valid & ATTR_UID &&
		!uid_eq(attr->ia_uid, inode->i_uid)) ||
		(attr->ia_valid & ATTR_GID &&
		!gid_eq(attr->ia_gid, inode->i_gid))) {
		err = dquot_transfer(inode, attr);
		if (err)
			return err;
	}

744
	if (attr->ia_valid & ATTR_SIZE) {
745 746 747 748 749 750 751
		if (f2fs_encrypted_inode(inode)) {
			err = fscrypt_get_encryption_info(inode);
			if (err)
				return err;
			if (!fscrypt_has_encryption_key(inode))
				return -ENOKEY;
		}
752

753
		if (attr->ia_size <= i_size_read(inode)) {
754
			down_write(&F2FS_I(inode)->i_mmap_sem);
755
			truncate_setsize(inode, attr->ia_size);
756
			err = f2fs_truncate(inode);
757
			up_write(&F2FS_I(inode)->i_mmap_sem);
758 759
			if (err)
				return err;
760 761
		} else {
			/*
762 763
			 * do not trim all blocks after i_size if target size is
			 * larger than i_size.
764
			 */
765
			down_write(&F2FS_I(inode)->i_mmap_sem);
766
			truncate_setsize(inode, attr->ia_size);
767
			up_write(&F2FS_I(inode)->i_mmap_sem);
768 769

			/* should convert inline inode here */
770
			if (!f2fs_may_inline_data(inode)) {
771 772 773 774
				err = f2fs_convert_inline_inode(inode);
				if (err)
					return err;
			}
775
			inode->i_mtime = inode->i_ctime = current_time(inode);
776
		}
777 778

		size_changed = true;
J
Jaegeuk Kim 已提交
779 780 781 782 783
	}

	__setattr_copy(inode, attr);

	if (attr->ia_valid & ATTR_MODE) {
784
		err = posix_acl_chmod(inode, get_inode_mode(inode));
785 786 787
		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
			inode->i_mode = F2FS_I(inode)->i_acl_mode;
			clear_inode_flag(inode, FI_ACL_MODE);
J
Jaegeuk Kim 已提交
788 789 790
		}
	}

791 792
	/* file size may changed here */
	f2fs_mark_inode_dirty_sync(inode, size_changed);
793 794 795 796

	/* inode change will produce dirty node pages flushed by checkpoint */
	f2fs_balance_fs(F2FS_I_SB(inode), true);

J
Jaegeuk Kim 已提交
797 798 799 800 801 802 803
	return err;
}

const struct inode_operations f2fs_file_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
804
	.set_acl	= f2fs_set_acl,
J
Jaegeuk Kim 已提交
805 806 807
#ifdef CONFIG_F2FS_FS_XATTR
	.listxattr	= f2fs_listxattr,
#endif
J
Jaegeuk Kim 已提交
808
	.fiemap		= f2fs_fiemap,
J
Jaegeuk Kim 已提交
809 810
};

C
Chao Yu 已提交
811
static int fill_zero(struct inode *inode, pgoff_t index,
J
Jaegeuk Kim 已提交
812 813
					loff_t start, loff_t len)
{
814
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
815 816 817
	struct page *page;

	if (!len)
C
Chao Yu 已提交
818
		return 0;
J
Jaegeuk Kim 已提交
819

J
Jaegeuk Kim 已提交
820
	f2fs_balance_fs(sbi, true);
821

822
	f2fs_lock_op(sbi);
823
	page = get_new_data_page(inode, NULL, index, false);
824
	f2fs_unlock_op(sbi);
J
Jaegeuk Kim 已提交
825

C
Chao Yu 已提交
826 827 828
	if (IS_ERR(page))
		return PTR_ERR(page);

829
	f2fs_wait_on_page_writeback(page, DATA, true);
C
Chao Yu 已提交
830 831 832 833
	zero_user(page, start, len);
	set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
J
Jaegeuk Kim 已提交
834 835 836 837 838 839
}

int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
	int err;

840
	while (pg_start < pg_end) {
J
Jaegeuk Kim 已提交
841
		struct dnode_of_data dn;
842
		pgoff_t end_offset, count;
843

J
Jaegeuk Kim 已提交
844
		set_new_dnode(&dn, inode, NULL, NULL, 0);
845
		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
J
Jaegeuk Kim 已提交
846
		if (err) {
847 848
			if (err == -ENOENT) {
				pg_start++;
J
Jaegeuk Kim 已提交
849
				continue;
850
			}
J
Jaegeuk Kim 已提交
851 852 853
			return err;
		}

854
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
855 856 857 858 859
		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);

		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);

		truncate_data_blocks_range(&dn, count);
J
Jaegeuk Kim 已提交
860
		f2fs_put_dnode(&dn);
861 862

		pg_start += count;
J
Jaegeuk Kim 已提交
863 864 865 866
	}
	return 0;
}

C
Chao Yu 已提交
867
static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
J
Jaegeuk Kim 已提交
868 869 870
{
	pgoff_t pg_start, pg_end;
	loff_t off_start, off_end;
871
	int ret;
J
Jaegeuk Kim 已提交
872

873 874 875
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
H
Huajun Li 已提交
876

877 878
	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
J
Jaegeuk Kim 已提交
879

880 881
	off_start = offset & (PAGE_SIZE - 1);
	off_end = (offset + len) & (PAGE_SIZE - 1);
J
Jaegeuk Kim 已提交
882 883

	if (pg_start == pg_end) {
C
Chao Yu 已提交
884
		ret = fill_zero(inode, pg_start, off_start,
J
Jaegeuk Kim 已提交
885
						off_end - off_start);
C
Chao Yu 已提交
886 887
		if (ret)
			return ret;
J
Jaegeuk Kim 已提交
888
	} else {
C
Chao Yu 已提交
889 890
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
891
						PAGE_SIZE - off_start);
C
Chao Yu 已提交
892 893 894 895 896 897 898 899
			if (ret)
				return ret;
		}
		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				return ret;
		}
J
Jaegeuk Kim 已提交
900 901 902 903

		if (pg_start < pg_end) {
			struct address_space *mapping = inode->i_mapping;
			loff_t blk_start, blk_end;
904
			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
905

J
Jaegeuk Kim 已提交
906
			f2fs_balance_fs(sbi, true);
J
Jaegeuk Kim 已提交
907

908 909
			blk_start = (loff_t)pg_start << PAGE_SHIFT;
			blk_end = (loff_t)pg_end << PAGE_SHIFT;
910
			down_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
911 912
			truncate_inode_pages_range(mapping, blk_start,
					blk_end - 1);
913

914
			f2fs_lock_op(sbi);
J
Jaegeuk Kim 已提交
915
			ret = truncate_hole(inode, pg_start, pg_end);
916
			f2fs_unlock_op(sbi);
917
			up_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
918 919 920 921 922 923
		}
	}

	return ret;
}

924 925
static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, pgoff_t len)
C
Chao Yu 已提交
926 927 928
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
929
	int ret, done, i;
930

931
next_dnode:
932
	set_new_dnode(&dn, inode, NULL, NULL, 0);
933
	ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
934 935 936
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
937 938 939 940 941 942 943 944 945 946 947
		if (dn.max_level == 0)
			return -ENOENT;
		done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
		blkaddr += done;
		do_replace += done;
		goto next;
	}

	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
							dn.ofs_in_node, len);
	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
948 949
		*blkaddr = datablock_addr(dn.inode,
					dn.node_page, dn.ofs_in_node);
950 951 952 953 954 955 956
		if (!is_checkpointed_data(sbi, *blkaddr)) {

			if (test_opt(sbi, LFS)) {
				f2fs_put_dnode(&dn);
				return -ENOTSUPP;
			}

957
			/* do not invalidate this block address */
958
			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
959
			*do_replace = 1;
C
Chao Yu 已提交
960
		}
961
	}
962 963 964 965 966 967 968 969
	f2fs_put_dnode(&dn);
next:
	len -= done;
	off += done;
	if (len)
		goto next_dnode;
	return 0;
}
C
Chao Yu 已提交
970

971 972 973 974 975 976
static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, int len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	int ret, i;
C
Chao Yu 已提交
977

978 979 980
	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
		if (*do_replace == 0)
			continue;
C
Chao Yu 已提交
981

982 983 984 985 986 987 988
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
		if (ret) {
			dec_valid_block_count(sbi, inode, 1);
			invalidate_blocks(sbi, *blkaddr);
		} else {
			f2fs_update_data_blkaddr(&dn, *blkaddr);
989
		}
990 991 992 993 994 995 996 997 998 999 1000 1001
		f2fs_put_dnode(&dn);
	}
	return 0;
}

static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
			block_t *blkaddr, int *do_replace,
			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
	pgoff_t i = 0;
	int ret;
1002

1003 1004 1005 1006
	while (i < len) {
		if (blkaddr[i] == NULL_ADDR && !full) {
			i++;
			continue;
1007
		}
C
Chao Yu 已提交
1008

1009 1010 1011 1012 1013
		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
			struct dnode_of_data dn;
			struct node_info ni;
			size_t new_size;
			pgoff_t ilen;
C
Chao Yu 已提交
1014

1015 1016 1017 1018
			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
			if (ret)
				return ret;
C
Chao Yu 已提交
1019

1020 1021 1022 1023 1024
			get_node_info(sbi, dn.nid, &ni);
			ilen = min((pgoff_t)
				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
						dn.ofs_in_node, len - i);
			do {
1025 1026
				dn.data_blkaddr = datablock_addr(dn.inode,
						dn.node_page, dn.ofs_in_node);
1027 1028 1029 1030
				truncate_data_blocks_range(&dn, 1);

				if (do_replace[i]) {
					f2fs_i_blocks_write(src_inode,
C
Chao Yu 已提交
1031
							1, false, false);
1032
					f2fs_i_blocks_write(dst_inode,
C
Chao Yu 已提交
1033
							1, true, false);
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
					blkaddr[i], ni.version, true, false);

					do_replace[i] = 0;
				}
				dn.ofs_in_node++;
				i++;
				new_size = (dst + i) << PAGE_SHIFT;
				if (dst_inode->i_size < new_size)
					f2fs_i_size_write(dst_inode, new_size);
1044
			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1045

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
			f2fs_put_dnode(&dn);
		} else {
			struct page *psrc, *pdst;

			psrc = get_lock_data_page(src_inode, src + i, true);
			if (IS_ERR(psrc))
				return PTR_ERR(psrc);
			pdst = get_new_data_page(dst_inode, NULL, dst + i,
								true);
			if (IS_ERR(pdst)) {
				f2fs_put_page(psrc, 1);
				return PTR_ERR(pdst);
			}
			f2fs_copy_page(psrc, pdst);
			set_page_dirty(pdst);
			f2fs_put_page(pdst, 1);
1062
			f2fs_put_page(psrc, 1);
C
Chao Yu 已提交
1063

1064 1065 1066 1067 1068
			ret = truncate_hole(src_inode, src + i, src + i + 1);
			if (ret)
				return ret;
			i++;
		}
1069 1070
	}
	return 0;
1071
}
C
Chao Yu 已提交
1072

1073 1074
static int __exchange_data_block(struct inode *src_inode,
			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1075
			pgoff_t len, bool full)
1076 1077 1078
{
	block_t *src_blkaddr;
	int *do_replace;
1079
	pgoff_t olen;
1080 1081
	int ret;

1082 1083
	while (len) {
		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
1084

M
Michal Hocko 已提交
1085
		src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
1086 1087
		if (!src_blkaddr)
			return -ENOMEM;
1088

M
Michal Hocko 已提交
1089
		do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL);
1090 1091 1092 1093
		if (!do_replace) {
			kvfree(src_blkaddr);
			return -ENOMEM;
		}
1094

1095 1096 1097 1098
		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
					do_replace, src, olen);
		if (ret)
			goto roll_back;
1099

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
					do_replace, src, dst, olen, full);
		if (ret)
			goto roll_back;

		src += olen;
		dst += olen;
		len -= olen;

		kvfree(src_blkaddr);
		kvfree(do_replace);
	}
1112 1113 1114 1115 1116 1117
	return 0;

roll_back:
	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
	kvfree(src_blkaddr);
	kvfree(do_replace);
1118 1119
	return ret;
}
C
Chao Yu 已提交
1120

1121 1122 1123 1124
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1125
	int ret;
1126

1127 1128
	f2fs_balance_fs(sbi, true);
	f2fs_lock_op(sbi);
1129 1130 1131

	f2fs_drop_extent_tree(inode);

1132 1133
	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
	f2fs_unlock_op(sbi);
C
Chao Yu 已提交
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
	return ret;
}

static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
	pgoff_t pg_start, pg_end;
	loff_t new_size;
	int ret;

	if (offset + len >= i_size_read(inode))
		return -EINVAL;

	/* collapse range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

1150 1151 1152
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
1153

1154 1155
	pg_start = offset >> PAGE_SHIFT;
	pg_end = (offset + len) >> PAGE_SHIFT;
C
Chao Yu 已提交
1156

1157
	down_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1158 1159 1160
	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
1161
		goto out;
C
Chao Yu 已提交
1162 1163 1164 1165 1166

	truncate_pagecache(inode, offset);

	ret = f2fs_do_collapse(inode, pg_start, pg_end);
	if (ret)
1167
		goto out;
C
Chao Yu 已提交
1168

1169 1170 1171 1172
	/* write out all moved pages, if possible */
	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	truncate_pagecache(inode, offset);

C
Chao Yu 已提交
1173
	new_size = i_size_read(inode) - len;
1174
	truncate_pagecache(inode, new_size);
C
Chao Yu 已提交
1175 1176 1177

	ret = truncate_blocks(inode, new_size, true);
	if (!ret)
1178
		f2fs_i_size_write(inode, new_size);
C
Chao Yu 已提交
1179

1180 1181
out:
	up_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1182 1183 1184
	return ret;
}

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
								pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
	pgoff_t index = start;
	unsigned int ofs_in_node = dn->ofs_in_node;
	blkcnt_t count = 0;
	int ret;

	for (; index < end; index++, dn->ofs_in_node++) {
1195 1196
		if (datablock_addr(dn->inode, dn->node_page,
					dn->ofs_in_node) == NULL_ADDR)
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
			count++;
	}

	dn->ofs_in_node = ofs_in_node;
	ret = reserve_new_blocks(dn, count);
	if (ret)
		return ret;

	dn->ofs_in_node = ofs_in_node;
	for (index = start; index < end; index++, dn->ofs_in_node++) {
1207 1208
		dn->data_blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
		/*
		 * reserve_new_blocks will not guarantee entire block
		 * allocation.
		 */
		if (dn->data_blkaddr == NULL_ADDR) {
			ret = -ENOSPC;
			break;
		}
		if (dn->data_blkaddr != NEW_ADDR) {
			invalidate_blocks(sbi, dn->data_blkaddr);
			dn->data_blkaddr = NEW_ADDR;
			set_data_blkaddr(dn);
		}
	}

	f2fs_update_extent_cache_range(dn, start, 0, index - start);

	return ret;
}

C
Chao Yu 已提交
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
								int mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	pgoff_t index, pg_start, pg_end;
	loff_t new_size = i_size_read(inode);
	loff_t off_start, off_end;
	int ret = 0;

	ret = inode_newsize_ok(inode, (len + offset));
	if (ret)
		return ret;

1243 1244 1245
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
C
Chao Yu 已提交
1246

1247
	down_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1248 1249
	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
	if (ret)
1250
		goto out_sem;
C
Chao Yu 已提交
1251 1252 1253

	truncate_pagecache_range(inode, offset, offset + len - 1);

1254 1255
	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
C
Chao Yu 已提交
1256

1257 1258
	off_start = offset & (PAGE_SIZE - 1);
	off_end = (offset + len) & (PAGE_SIZE - 1);
C
Chao Yu 已提交
1259 1260

	if (pg_start == pg_end) {
C
Chao Yu 已提交
1261 1262 1263
		ret = fill_zero(inode, pg_start, off_start,
						off_end - off_start);
		if (ret)
1264
			goto out_sem;
C
Chao Yu 已提交
1265

C
Chao Yu 已提交
1266 1267 1268
		new_size = max_t(loff_t, new_size, offset + len);
	} else {
		if (off_start) {
C
Chao Yu 已提交
1269
			ret = fill_zero(inode, pg_start++, off_start,
1270
						PAGE_SIZE - off_start);
C
Chao Yu 已提交
1271
			if (ret)
1272
				goto out_sem;
C
Chao Yu 已提交
1273

C
Chao Yu 已提交
1274
			new_size = max_t(loff_t, new_size,
1275
					(loff_t)pg_start << PAGE_SHIFT);
C
Chao Yu 已提交
1276 1277
		}

1278
		for (index = pg_start; index < pg_end;) {
C
Chao Yu 已提交
1279
			struct dnode_of_data dn;
1280 1281
			unsigned int end_offset;
			pgoff_t end;
C
Chao Yu 已提交
1282 1283 1284

			f2fs_lock_op(sbi);

1285 1286
			set_new_dnode(&dn, inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
C
Chao Yu 已提交
1287 1288 1289 1290 1291
			if (ret) {
				f2fs_unlock_op(sbi);
				goto out;
			}

1292 1293 1294 1295
			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
			end = min(pg_end, end_offset - dn.ofs_in_node + index);

			ret = f2fs_do_zero_range(&dn, index, end);
C
Chao Yu 已提交
1296 1297
			f2fs_put_dnode(&dn);
			f2fs_unlock_op(sbi);
1298 1299 1300

			f2fs_balance_fs(sbi, dn.node_changed);

1301 1302
			if (ret)
				goto out;
C
Chao Yu 已提交
1303

1304
			index = end;
C
Chao Yu 已提交
1305
			new_size = max_t(loff_t, new_size,
1306
					(loff_t)index << PAGE_SHIFT);
C
Chao Yu 已提交
1307 1308 1309
		}

		if (off_end) {
C
Chao Yu 已提交
1310 1311 1312 1313
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				goto out;

C
Chao Yu 已提交
1314 1315 1316 1317 1318
			new_size = max_t(loff_t, new_size, offset + len);
		}
	}

out:
1319
	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
1320
		f2fs_i_size_write(inode, new_size);
1321 1322
out_sem:
	up_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1323 1324 1325 1326

	return ret;
}

C
Chao Yu 已提交
1327 1328 1329
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1330
	pgoff_t nr, pg_start, pg_end, delta, idx;
C
Chao Yu 已提交
1331
	loff_t new_size;
1332
	int ret = 0;
C
Chao Yu 已提交
1333 1334

	new_size = i_size_read(inode) + len;
1335 1336 1337
	ret = inode_newsize_ok(inode, new_size);
	if (ret)
		return ret;
C
Chao Yu 已提交
1338 1339 1340 1341 1342 1343 1344 1345

	if (offset >= i_size_read(inode))
		return -EINVAL;

	/* insert range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

1346 1347 1348
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
1349

J
Jaegeuk Kim 已提交
1350
	f2fs_balance_fs(sbi, true);
1351

1352
	down_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1353 1354
	ret = truncate_blocks(inode, i_size_read(inode), true);
	if (ret)
1355
		goto out;
C
Chao Yu 已提交
1356 1357 1358 1359

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
1360
		goto out;
C
Chao Yu 已提交
1361 1362 1363

	truncate_pagecache(inode, offset);

1364 1365
	pg_start = offset >> PAGE_SHIFT;
	pg_end = (offset + len) >> PAGE_SHIFT;
C
Chao Yu 已提交
1366
	delta = pg_end - pg_start;
1367 1368 1369 1370 1371 1372 1373
	idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;

	while (!ret && idx > pg_start) {
		nr = idx - pg_start;
		if (nr > delta)
			nr = delta;
		idx -= nr;
C
Chao Yu 已提交
1374 1375

		f2fs_lock_op(sbi);
1376 1377
		f2fs_drop_extent_tree(inode);

1378 1379
		ret = __exchange_data_block(inode, inode, idx,
					idx + delta, nr, false);
C
Chao Yu 已提交
1380 1381 1382
		f2fs_unlock_op(sbi);
	}

1383 1384 1385 1386 1387
	/* write out all moved pages, if possible */
	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	truncate_pagecache(inode, offset);

	if (!ret)
1388
		f2fs_i_size_write(inode, new_size);
1389 1390
out:
	up_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1391 1392 1393
	return ret;
}

J
Jaegeuk Kim 已提交
1394 1395 1396
static int expand_inode_data(struct inode *inode, loff_t offset,
					loff_t len, int mode)
{
1397
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1398 1399
	struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
	pgoff_t pg_end;
J
Jaegeuk Kim 已提交
1400
	loff_t new_size = i_size_read(inode);
1401
	loff_t off_end;
1402
	int err;
J
Jaegeuk Kim 已提交
1403

1404 1405 1406
	err = inode_newsize_ok(inode, (len + offset));
	if (err)
		return err;
J
Jaegeuk Kim 已提交
1407

1408 1409 1410
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
1411

J
Jaegeuk Kim 已提交
1412
	f2fs_balance_fs(sbi, true);
1413

1414
	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1415
	off_end = (offset + len) & (PAGE_SIZE - 1);
J
Jaegeuk Kim 已提交
1416

1417 1418 1419 1420
	map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
	map.m_len = pg_end - map.m_lblk;
	if (off_end)
		map.m_len++;
1421

1422 1423
	err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
	if (err) {
1424
		pgoff_t last_off;
J
Jaegeuk Kim 已提交
1425

1426
		if (!map.m_len)
1427
			return err;
1428

1429 1430 1431 1432 1433 1434 1435
		last_off = map.m_lblk + map.m_len - 1;

		/* update new size to the failed position */
		new_size = (last_off == pg_end) ? offset + len:
					(loff_t)(last_off + 1) << PAGE_SHIFT;
	} else {
		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
J
Jaegeuk Kim 已提交
1436 1437
	}

1438
	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
1439
		f2fs_i_size_write(inode, new_size);
J
Jaegeuk Kim 已提交
1440

1441
	return err;
J
Jaegeuk Kim 已提交
1442 1443 1444 1445 1446
}

static long f2fs_fallocate(struct file *file, int mode,
				loff_t offset, loff_t len)
{
A
Al Viro 已提交
1447
	struct inode *inode = file_inode(file);
1448
	long ret = 0;
J
Jaegeuk Kim 已提交
1449

1450 1451 1452 1453
	/* f2fs only support ->fallocate for regular file */
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

C
Chao Yu 已提交
1454 1455
	if (f2fs_encrypted_inode(inode) &&
		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1456 1457
		return -EOPNOTSUPP;

C
Chao Yu 已提交
1458
	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
C
Chao Yu 已提交
1459 1460
			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
			FALLOC_FL_INSERT_RANGE))
J
Jaegeuk Kim 已提交
1461 1462
		return -EOPNOTSUPP;

A
Al Viro 已提交
1463
	inode_lock(inode);
1464

1465 1466 1467 1468
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		if (offset >= inode->i_size)
			goto out;

C
Chao Yu 已提交
1469
		ret = punch_hole(inode, offset, len);
C
Chao Yu 已提交
1470 1471
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
		ret = f2fs_collapse_range(inode, offset, len);
C
Chao Yu 已提交
1472 1473
	} else if (mode & FALLOC_FL_ZERO_RANGE) {
		ret = f2fs_zero_range(inode, offset, len, mode);
C
Chao Yu 已提交
1474 1475
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
		ret = f2fs_insert_range(inode, offset, len);
C
Chao Yu 已提交
1476
	} else {
J
Jaegeuk Kim 已提交
1477
		ret = expand_inode_data(inode, offset, len, mode);
C
Chao Yu 已提交
1478
	}
J
Jaegeuk Kim 已提交
1479

1480
	if (!ret) {
1481
		inode->i_mtime = inode->i_ctime = current_time(inode);
1482
		f2fs_mark_inode_dirty_sync(inode, false);
1483 1484
		if (mode & FALLOC_FL_KEEP_SIZE)
			file_set_keep_isize(inode);
1485
		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1486
	}
1487

1488
out:
A
Al Viro 已提交
1489
	inode_unlock(inode);
1490

1491
	trace_f2fs_fallocate(inode, mode, offset, len, ret);
J
Jaegeuk Kim 已提交
1492 1493 1494
	return ret;
}

1495 1496
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
1497 1498 1499 1500 1501 1502 1503 1504
	/*
	 * f2fs_relase_file is called at every close calls. So we should
	 * not drop any inmemory pages by close called by other process.
	 */
	if (!(filp->f_mode & FMODE_WRITE) ||
			atomic_read(&inode->i_writecount) != 1)
		return 0;

1505 1506
	/* some remained atomic pages should discarded */
	if (f2fs_is_atomic_file(inode))
1507
		drop_inmem_pages(inode);
1508
	if (f2fs_is_volatile_file(inode)) {
1509
		clear_inode_flag(inode, FI_VOLATILE_FILE);
1510
		stat_dec_volatile_write(inode);
1511
		set_inode_flag(inode, FI_DROP_CACHE);
1512
		filemap_fdatawrite(inode->i_mapping);
1513
		clear_inode_flag(inode, FI_DROP_CACHE);
1514 1515 1516 1517
	}
	return 0;
}

1518
static int f2fs_file_flush(struct file *file, fl_owner_t id)
J
Jaegeuk Kim 已提交
1519
{
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
	struct inode *inode = file_inode(file);

	/*
	 * If the process doing a transaction is crashed, we should do
	 * roll-back. Otherwise, other reader/write can see corrupted database
	 * until all the writers close its file. Since this should be done
	 * before dropping file lock, it needs to do in ->flush.
	 */
	if (f2fs_is_atomic_file(inode) &&
			F2FS_I(inode)->inmem_task == current)
		drop_inmem_pages(inode);
	return 0;
J
Jaegeuk Kim 已提交
1532 1533
}

1534
static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
J
Jaegeuk Kim 已提交
1535
{
A
Al Viro 已提交
1536
	struct inode *inode = file_inode(filp);
J
Jaegeuk Kim 已提交
1537
	struct f2fs_inode_info *fi = F2FS_I(inode);
1538 1539
	unsigned int flags = fi->i_flags &
			(FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
1540 1541
	return put_user(flags, (int __user *)arg);
}
J
Jaegeuk Kim 已提交
1542

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int oldflags;

	/* Is it quota file? Do not allow user to mess with it */
	if (IS_NOQUOTA(inode))
		return -EPERM;

	flags = f2fs_mask_flags(inode->i_mode, flags);

	oldflags = fi->i_flags;

	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL))
		if (!capable(CAP_LINUX_IMMUTABLE))
			return -EPERM;

	flags = flags & (FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL);
	flags |= oldflags & ~(FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL);
	fi->i_flags = flags;

	if (fi->i_flags & FS_PROJINHERIT_FL)
		set_inode_flag(inode, FI_PROJ_INHERIT);
	else
		clear_inode_flag(inode, FI_PROJ_INHERIT);

	inode->i_ctime = current_time(inode);
	f2fs_set_inode_flags(inode);
	f2fs_mark_inode_dirty_sync(inode, false);
	return 0;
}

1575 1576 1577
static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
1578
	unsigned int flags;
1579
	int ret;
J
Jaegeuk Kim 已提交
1580

1581 1582 1583 1584 1585 1586
	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (get_user(flags, (int __user *)arg))
		return -EFAULT;

1587 1588 1589
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;
J
Jaegeuk Kim 已提交
1590

A
Al Viro 已提交
1591
	inode_lock(inode);
J
Jaegeuk Kim 已提交
1592

1593
	ret = __f2fs_ioc_setflags(inode, flags);
J
Jaegeuk Kim 已提交
1594

1595
	inode_unlock(inode);
1596 1597 1598
	mnt_drop_write_file(filp);
	return ret;
}
1599

C
Chao Yu 已提交
1600 1601 1602 1603 1604 1605 1606
static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);

	return put_user(inode->i_generation, (int __user *)arg);
}

J
Jaegeuk Kim 已提交
1607 1608 1609
static int f2fs_ioc_start_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1610
	int ret;
J
Jaegeuk Kim 已提交
1611 1612 1613 1614

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1615 1616 1617
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

1618 1619 1620 1621
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1622 1623
	inode_lock(inode);

1624
	if (f2fs_is_atomic_file(inode))
1625
		goto out;
J
Jaegeuk Kim 已提交
1626

1627 1628
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
1629
		goto out;
J
Jaegeuk Kim 已提交
1630

1631
	set_inode_flag(inode, FI_ATOMIC_FILE);
1632
	set_inode_flag(inode, FI_HOT_DATA);
1633 1634
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);

1635
	if (!get_dirty_pages(inode))
1636
		goto inc_stat;
1637 1638

	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
1639
		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
1640 1641
					inode->i_ino, get_dirty_pages(inode));
	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1642
	if (ret) {
1643
		clear_inode_flag(inode, FI_ATOMIC_FILE);
C
Chao Yu 已提交
1644
		clear_inode_flag(inode, FI_HOT_DATA);
1645 1646 1647 1648
		goto out;
	}

inc_stat:
1649
	F2FS_I(inode)->inmem_task = current;
1650 1651
	stat_inc_atomic_write(inode);
	stat_update_max_atomic_write(inode);
1652
out:
1653
	inode_unlock(inode);
1654
	mnt_drop_write_file(filp);
1655
	return ret;
J
Jaegeuk Kim 已提交
1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
}

static int f2fs_ioc_commit_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1670 1671
	inode_lock(inode);

1672 1673 1674
	if (f2fs_is_volatile_file(inode))
		goto err_out;

1675
	if (f2fs_is_atomic_file(inode)) {
1676
		ret = commit_inmem_pages(inode);
C
Chao Yu 已提交
1677
		if (ret)
1678
			goto err_out;
C
Chao Yu 已提交
1679

1680
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
C
Chao Yu 已提交
1681 1682
		if (!ret) {
			clear_inode_flag(inode, FI_ATOMIC_FILE);
C
Chao Yu 已提交
1683
			clear_inode_flag(inode, FI_HOT_DATA);
C
Chao Yu 已提交
1684
			stat_dec_atomic_write(inode);
1685
		}
1686
	} else {
1687
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
1688
	}
1689
err_out:
1690
	inode_unlock(inode);
J
Jaegeuk Kim 已提交
1691 1692 1693 1694
	mnt_drop_write_file(filp);
	return ret;
}

1695 1696 1697
static int f2fs_ioc_start_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1698
	int ret;
1699 1700 1701 1702

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1703 1704 1705
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

1706 1707 1708 1709
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1710 1711
	inode_lock(inode);

1712
	if (f2fs_is_volatile_file(inode))
1713
		goto out;
1714

1715 1716
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
1717
		goto out;
1718

1719 1720 1721
	stat_inc_volatile_write(inode);
	stat_update_max_volatile_write(inode);

1722
	set_inode_flag(inode, FI_VOLATILE_FILE);
1723
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1724
out:
1725
	inode_unlock(inode);
1726 1727
	mnt_drop_write_file(filp);
	return ret;
1728 1729
}

1730 1731 1732
static int f2fs_ioc_release_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1733
	int ret;
1734 1735 1736 1737

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1738 1739 1740 1741
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1742 1743
	inode_lock(inode);

1744
	if (!f2fs_is_volatile_file(inode))
1745
		goto out;
1746

1747 1748 1749 1750
	if (!f2fs_is_first_block_written(inode)) {
		ret = truncate_partial_data_page(inode, 0, true);
		goto out;
	}
1751

1752 1753
	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
out:
1754
	inode_unlock(inode);
1755 1756
	mnt_drop_write_file(filp);
	return ret;
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
}

static int f2fs_ioc_abort_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1771 1772
	inode_lock(inode);

1773
	if (f2fs_is_atomic_file(inode))
1774
		drop_inmem_pages(inode);
1775
	if (f2fs_is_volatile_file(inode)) {
1776
		clear_inode_flag(inode, FI_VOLATILE_FILE);
1777
		stat_dec_volatile_write(inode);
1778
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1779
	}
1780

1781 1782
	inode_unlock(inode);

1783
	mnt_drop_write_file(filp);
1784
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1785 1786 1787
	return ret;
}

J
Jaegeuk Kim 已提交
1788 1789 1790 1791 1792 1793
static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct super_block *sb = sbi->sb;
	__u32 in;
1794
	int ret;
J
Jaegeuk Kim 已提交
1795 1796 1797 1798 1799 1800 1801

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (get_user(in, (__u32 __user *)arg))
		return -EFAULT;

1802 1803 1804 1805
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
1806 1807 1808 1809
	switch (in) {
	case F2FS_GOING_DOWN_FULLSYNC:
		sb = freeze_bdev(sb->s_bdev);
		if (sb && !IS_ERR(sb)) {
1810
			f2fs_stop_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
1811 1812 1813 1814 1815 1816
			thaw_bdev(sb->s_bdev, sb);
		}
		break;
	case F2FS_GOING_DOWN_METASYNC:
		/* do checkpoint only */
		f2fs_sync_fs(sb, 1);
1817
		f2fs_stop_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
1818 1819
		break;
	case F2FS_GOING_DOWN_NOSYNC:
1820
		f2fs_stop_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
1821
		break;
1822
	case F2FS_GOING_DOWN_METAFLUSH:
C
Chao Yu 已提交
1823
		sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
1824
		f2fs_stop_checkpoint(sbi, false);
1825
		break;
J
Jaegeuk Kim 已提交
1826
	default:
1827 1828
		ret = -EINVAL;
		goto out;
J
Jaegeuk Kim 已提交
1829
	}
1830
	f2fs_update_time(sbi, REQ_TIME);
1831 1832 1833
out:
	mnt_drop_write_file(filp);
	return ret;
J
Jaegeuk Kim 已提交
1834 1835
}

1836 1837 1838 1839 1840 1841 1842
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct super_block *sb = inode->i_sb;
	struct request_queue *q = bdev_get_queue(sb->s_bdev);
	struct fstrim_range range;
	int ret;
1843

1844 1845
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
1846

1847 1848
	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;
1849

1850 1851 1852
	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
				sizeof(range)))
		return -EFAULT;
1853

1854 1855 1856 1857
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1858 1859 1860
	range.minlen = max((unsigned int)range.minlen,
				q->limits.discard_granularity);
	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
1861
	mnt_drop_write_file(filp);
1862 1863
	if (ret < 0)
		return ret;
1864

1865 1866 1867
	if (copy_to_user((struct fstrim_range __user *)arg, &range,
				sizeof(range)))
		return -EFAULT;
1868
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1869 1870 1871
	return 0;
}

1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
static bool uuid_is_nonzero(__u8 u[16])
{
	int i;

	for (i = 0; i < 16; i++)
		if (u[i])
			return true;
	return false;
}

static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);

1886
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1887

1888
	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
1889 1890 1891 1892
}

static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
1893
	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
}

static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	int err;

	if (!f2fs_sb_has_crypto(inode->i_sb))
		return -EOPNOTSUPP;

	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
		goto got_it;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	/* update superblock with uuid */
	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);

C
Chao Yu 已提交
1915
	err = f2fs_commit_super(sbi, false);
1916 1917 1918
	if (err) {
		/* undo new data */
		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
1919
		mnt_drop_write_file(filp);
1920 1921
		return err;
	}
1922
	mnt_drop_write_file(filp);
1923 1924 1925 1926 1927 1928 1929
got_it:
	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
									16))
		return -EFAULT;
	return 0;
}

1930 1931 1932 1933
static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
C
Chao Yu 已提交
1934
	__u32 sync;
1935
	int ret;
1936 1937 1938 1939

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

C
Chao Yu 已提交
1940
	if (get_user(sync, (__u32 __user *)arg))
1941 1942
		return -EFAULT;

C
Chao Yu 已提交
1943 1944
	if (f2fs_readonly(sbi->sb))
		return -EROFS;
1945

1946 1947 1948 1949
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

C
Chao Yu 已提交
1950
	if (!sync) {
1951 1952 1953 1954
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
C
Chao Yu 已提交
1955 1956
	} else {
		mutex_lock(&sbi->gc_mutex);
1957 1958
	}

1959
	ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
1960 1961 1962
out:
	mnt_drop_write_file(filp);
	return ret;
1963 1964
}

1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008
static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_gc_range range;
	u64 end;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
							sizeof(range)))
		return -EFAULT;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	end = range.start + range.len;
	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi))
		return -EINVAL;
do_more:
	if (!range.sync) {
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
	} else {
		mutex_lock(&sbi->gc_mutex);
	}

	ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
	range.start += sbi->blocks_per_seg;
	if (range.start <= end)
		goto do_more;
out:
	mnt_drop_write_file(filp);
	return ret;
}

2009 2010 2011 2012
static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2013
	int ret;
2014 2015 2016 2017 2018 2019 2020

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

2021 2022 2023 2024 2025 2026 2027 2028
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	ret = f2fs_sync_fs(sbi->sb, 1);

	mnt_drop_write_file(filp);
	return ret;
2029 2030
}

C
Chao Yu 已提交
2031 2032 2033 2034 2035
static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
					struct file *filp,
					struct f2fs_defragment *range)
{
	struct inode *inode = file_inode(filp);
2036
	struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
2037
	struct extent_info ei = {0,0,0};
C
Chao Yu 已提交
2038
	pgoff_t pg_start, pg_end;
2039
	unsigned int blk_per_seg = sbi->blocks_per_seg;
C
Chao Yu 已提交
2040 2041 2042 2043 2044 2045
	unsigned int total = 0, sec_num;
	block_t blk_end = 0;
	bool fragmented = false;
	int err;

	/* if in-place-update policy is enabled, don't waste time here */
2046
	if (need_inplace_update_policy(inode, NULL))
C
Chao Yu 已提交
2047 2048
		return -EINVAL;

2049 2050
	pg_start = range->start >> PAGE_SHIFT;
	pg_end = (range->start + range->len) >> PAGE_SHIFT;
C
Chao Yu 已提交
2051

J
Jaegeuk Kim 已提交
2052
	f2fs_balance_fs(sbi, true);
C
Chao Yu 已提交
2053

A
Al Viro 已提交
2054
	inode_lock(inode);
C
Chao Yu 已提交
2055 2056 2057

	/* writeback all dirty pages in the range */
	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2058
						range->start + range->len - 1);
C
Chao Yu 已提交
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
	if (err)
		goto out;

	/*
	 * lookup mapping info in extent cache, skip defragmenting if physical
	 * block addresses are continuous.
	 */
	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
		if (ei.fofs + ei.len >= pg_end)
			goto out;
	}

	map.m_lblk = pg_start;

	/*
	 * lookup mapping info in dnode page cache, skip defragmenting if all
	 * physical block addresses are continuous even if there are hole(s)
	 * in logical blocks.
	 */
	while (map.m_lblk < pg_end) {
F
Fan Li 已提交
2079
		map.m_len = pg_end - map.m_lblk;
2080
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
C
Chao Yu 已提交
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
		if (err)
			goto out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

		if (blk_end && blk_end != map.m_pblk) {
			fragmented = true;
			break;
		}
		blk_end = map.m_pblk + map.m_len;

		map.m_lblk += map.m_len;
	}

	if (!fragmented)
		goto out;

	map.m_lblk = pg_start;
	map.m_len = pg_end - pg_start;

2104
	sec_num = (map.m_len + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
C
Chao Yu 已提交
2105 2106 2107 2108 2109 2110

	/*
	 * make sure there are enough free section for LFS allocation, this can
	 * avoid defragment running in SSR mode when free section are allocated
	 * intensively
	 */
2111
	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
C
Chao Yu 已提交
2112 2113 2114 2115 2116 2117 2118 2119 2120
		err = -EAGAIN;
		goto out;
	}

	while (map.m_lblk < pg_end) {
		pgoff_t idx;
		int cnt = 0;

do_map:
F
Fan Li 已提交
2121
		map.m_len = pg_end - map.m_lblk;
2122
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
C
Chao Yu 已提交
2123 2124 2125 2126 2127 2128 2129 2130
		if (err)
			goto clear_out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

2131
		set_inode_flag(inode, FI_DO_DEFRAG);
C
Chao Yu 已提交
2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155

		idx = map.m_lblk;
		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
			struct page *page;

			page = get_lock_data_page(inode, idx, true);
			if (IS_ERR(page)) {
				err = PTR_ERR(page);
				goto clear_out;
			}

			set_page_dirty(page);
			f2fs_put_page(page, 1);

			idx++;
			cnt++;
			total++;
		}

		map.m_lblk = idx;

		if (idx < pg_end && cnt < blk_per_seg)
			goto do_map;

2156
		clear_inode_flag(inode, FI_DO_DEFRAG);
C
Chao Yu 已提交
2157 2158 2159 2160 2161 2162

		err = filemap_fdatawrite(inode->i_mapping);
		if (err)
			goto out;
	}
clear_out:
2163
	clear_inode_flag(inode, FI_DO_DEFRAG);
C
Chao Yu 已提交
2164
out:
A
Al Viro 已提交
2165
	inode_unlock(inode);
C
Chao Yu 已提交
2166
	if (!err)
2167
		range->len = (u64)total << PAGE_SHIFT;
C
Chao Yu 已提交
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
	return err;
}

static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_defragment range;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2181
	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
C
Chao Yu 已提交
2182 2183
		return -EINVAL;

2184 2185
	if (f2fs_readonly(sbi->sb))
		return -EROFS;
C
Chao Yu 已提交
2186 2187

	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2188 2189
							sizeof(range)))
		return -EFAULT;
C
Chao Yu 已提交
2190 2191

	/* verify alignment of offset & size */
2192 2193
	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
		return -EINVAL;
C
Chao Yu 已提交
2194

S
Sheng Yong 已提交
2195
	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2196 2197 2198 2199 2200 2201
					sbi->max_file_blocks))
		return -EINVAL;

	err = mnt_want_write_file(filp);
	if (err)
		return err;
S
Sheng Yong 已提交
2202

C
Chao Yu 已提交
2203
	err = f2fs_defragment_range(sbi, filp, &range);
2204 2205
	mnt_drop_write_file(filp);

2206
	f2fs_update_time(sbi, REQ_TIME);
C
Chao Yu 已提交
2207
	if (err < 0)
2208
		return err;
C
Chao Yu 已提交
2209 2210 2211

	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
							sizeof(range)))
2212 2213 2214
		return -EFAULT;

	return 0;
C
Chao Yu 已提交
2215 2216
}

2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
			struct file *file_out, loff_t pos_out, size_t len)
{
	struct inode *src = file_inode(file_in);
	struct inode *dst = file_inode(file_out);
	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
	size_t olen = len, dst_max_i_size = 0;
	size_t dst_osize;
	int ret;

	if (file_in->f_path.mnt != file_out->f_path.mnt ||
				src->i_sb != dst->i_sb)
		return -EXDEV;

	if (unlikely(f2fs_readonly(src->i_sb)))
		return -EROFS;

2234 2235
	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
		return -EINVAL;
2236 2237 2238 2239

	if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
		return -EOPNOTSUPP;

2240 2241 2242 2243 2244 2245 2246
	if (src == dst) {
		if (pos_in == pos_out)
			return 0;
		if (pos_out > pos_in && pos_out < pos_in + len)
			return -EINVAL;
	}

2247
	inode_lock(src);
2248 2249 2250 2251 2252 2253
	if (src != dst) {
		if (!inode_trylock(dst)) {
			ret = -EBUSY;
			goto out;
		}
	}
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297

	ret = -EINVAL;
	if (pos_in + len > src->i_size || pos_in + len < pos_in)
		goto out_unlock;
	if (len == 0)
		olen = len = src->i_size - pos_in;
	if (pos_in + len == src->i_size)
		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
	if (len == 0) {
		ret = 0;
		goto out_unlock;
	}

	dst_osize = dst->i_size;
	if (pos_out + olen > dst->i_size)
		dst_max_i_size = pos_out + olen;

	/* verify the end result is block aligned */
	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
		goto out_unlock;

	ret = f2fs_convert_inline_inode(src);
	if (ret)
		goto out_unlock;

	ret = f2fs_convert_inline_inode(dst);
	if (ret)
		goto out_unlock;

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(src->i_mapping,
					pos_in, pos_in + len);
	if (ret)
		goto out_unlock;

	ret = filemap_write_and_wait_range(dst->i_mapping,
					pos_out, pos_out + len);
	if (ret)
		goto out_unlock;

	f2fs_balance_fs(sbi, true);
	f2fs_lock_op(sbi);
2298 2299 2300
	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
				pos_out >> F2FS_BLKSIZE_BITS,
				len >> F2FS_BLKSIZE_BITS, false);
2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311

	if (!ret) {
		if (dst_max_i_size)
			f2fs_i_size_write(dst, dst_max_i_size);
		else if (dst_osize != dst->i_size)
			f2fs_i_size_write(dst, dst_osize);
	}
	f2fs_unlock_op(sbi);
out_unlock:
	if (src != dst)
		inode_unlock(dst);
2312
out:
2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347
	inode_unlock(src);
	return ret;
}

static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
{
	struct f2fs_move_range range;
	struct fd dst;
	int err;

	if (!(filp->f_mode & FMODE_READ) ||
			!(filp->f_mode & FMODE_WRITE))
		return -EBADF;

	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
							sizeof(range)))
		return -EFAULT;

	dst = fdget(range.dst_fd);
	if (!dst.file)
		return -EBADF;

	if (!(dst.file->f_mode & FMODE_WRITE)) {
		err = -EBADF;
		goto err_out;
	}

	err = mnt_want_write_file(filp);
	if (err)
		goto err_out;

	err = f2fs_move_file_range(filp, range.pos_in, dst.file,
					range.pos_out, range.len);

	mnt_drop_write_file(filp);
2348 2349
	if (err)
		goto err_out;
2350 2351 2352 2353 2354 2355 2356 2357 2358

	if (copy_to_user((struct f2fs_move_range __user *)arg,
						&range, sizeof(range)))
		err = -EFAULT;
err_out:
	fdput(dst);
	return err;
}

2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct sit_info *sm = SIT_I(sbi);
	unsigned int start_segno = 0, end_segno = 0;
	unsigned int dev_start_segno = 0, dev_end_segno = 0;
	struct f2fs_flush_device range;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
							sizeof(range)))
		return -EFAULT;

	if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
			sbi->segs_per_sec != 1) {
		f2fs_msg(sbi->sb, KERN_WARNING,
			"Can't flush %u in %d for segs_per_sec %u != 1\n",
				range.dev_num, sbi->s_ndevs,
				sbi->segs_per_sec);
		return -EINVAL;
	}

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	if (range.dev_num != 0)
		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);

	start_segno = sm->last_victim[FLUSH_DEVICE];
	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
		start_segno = dev_start_segno;
	end_segno = min(start_segno + range.segments, dev_end_segno);

	while (start_segno < end_segno) {
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
		sm->last_victim[GC_CB] = end_segno + 1;
		sm->last_victim[GC_GREEDY] = end_segno + 1;
		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
		ret = f2fs_gc(sbi, true, true, start_segno);
		if (ret == -EAGAIN)
			ret = 0;
		else if (ret < 0)
			break;
		start_segno++;
	}
out:
	mnt_drop_write_file(filp);
	return ret;
}

2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);

	/* Must validate to set it with SQLite behavior in Android. */
	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;

	return put_user(sb_feature, (u32 __user *)arg);
}
2431

2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
#ifdef CONFIG_QUOTA
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct super_block *sb = sbi->sb;
	struct dquot *transfer_to[MAXQUOTAS] = {};
	struct page *ipage;
	kprojid_t kprojid;
	int err;

	if (!f2fs_sb_has_project_quota(sb)) {
		if (projid != F2FS_DEF_PROJID)
			return -EOPNOTSUPP;
		else
			return 0;
	}

	if (!f2fs_has_extra_attr(inode))
		return -EOPNOTSUPP;

	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);

	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
		return 0;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	err = -EPERM;
	inode_lock(inode);

	/* Is it quota file? Do not allow user to mess with it */
	if (IS_NOQUOTA(inode))
		goto out_unlock;

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out_unlock;
	}

	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
								i_projid)) {
		err = -EOVERFLOW;
		f2fs_put_page(ipage, 1);
		goto out_unlock;
	}
	f2fs_put_page(ipage, 1);

	dquot_initialize(inode);

	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
	if (!IS_ERR(transfer_to[PRJQUOTA])) {
		err = __dquot_transfer(inode, transfer_to);
		dqput(transfer_to[PRJQUOTA]);
		if (err)
			goto out_dirty;
	}

	F2FS_I(inode)->i_projid = kprojid;
	inode->i_ctime = current_time(inode);
out_dirty:
	f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	return err;
}
#else
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
	if (projid != F2FS_DEF_PROJID)
		return -EOPNOTSUPP;
	return 0;
}
#endif

/* Transfer internal flags to xflags */
static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags)
{
	__u32 xflags = 0;

	if (iflags & FS_SYNC_FL)
		xflags |= FS_XFLAG_SYNC;
	if (iflags & FS_IMMUTABLE_FL)
		xflags |= FS_XFLAG_IMMUTABLE;
	if (iflags & FS_APPEND_FL)
		xflags |= FS_XFLAG_APPEND;
	if (iflags & FS_NODUMP_FL)
		xflags |= FS_XFLAG_NODUMP;
	if (iflags & FS_NOATIME_FL)
		xflags |= FS_XFLAG_NOATIME;
	if (iflags & FS_PROJINHERIT_FL)
		xflags |= FS_XFLAG_PROJINHERIT;
	return xflags;
}

#define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
				  FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
				  FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)

/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
#define F2FS_FL_XFLAG_VISIBLE		(FS_SYNC_FL | \
					 FS_IMMUTABLE_FL | \
					 FS_APPEND_FL | \
					 FS_NODUMP_FL | \
					 FS_NOATIME_FL | \
					 FS_PROJINHERIT_FL)

/* Transfer xflags flags to internal */
static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags)
{
	unsigned long iflags = 0;

	if (xflags & FS_XFLAG_SYNC)
		iflags |= FS_SYNC_FL;
	if (xflags & FS_XFLAG_IMMUTABLE)
		iflags |= FS_IMMUTABLE_FL;
	if (xflags & FS_XFLAG_APPEND)
		iflags |= FS_APPEND_FL;
	if (xflags & FS_XFLAG_NODUMP)
		iflags |= FS_NODUMP_FL;
	if (xflags & FS_XFLAG_NOATIME)
		iflags |= FS_NOATIME_FL;
	if (xflags & FS_XFLAG_PROJINHERIT)
		iflags |= FS_PROJINHERIT_FL;

	return iflags;
}

static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct fsxattr fa;

	memset(&fa, 0, sizeof(struct fsxattr));
	fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
				(FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL));

	if (f2fs_sb_has_project_quota(inode->i_sb))
		fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
							fi->i_projid);

	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
		return -EFAULT;
	return 0;
}

static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct fsxattr fa;
	unsigned int flags;
	int err;

	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
		return -EFAULT;

	/* Make sure caller has proper permission */
	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS)
		return -EOPNOTSUPP;

	flags = f2fs_xflags_to_iflags(fa.fsx_xflags);
	if (f2fs_mask_flags(inode->i_mode, flags) != flags)
		return -EOPNOTSUPP;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	inode_lock(inode);
	flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) |
				(flags & F2FS_FL_XFLAG_VISIBLE);
	err = __f2fs_ioc_setflags(inode, flags);
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	if (err)
		return err;

	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
	if (err)
		return err;

	return 0;
}
2625

2626 2627 2628 2629 2630 2631 2632
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case F2FS_IOC_GETFLAGS:
		return f2fs_ioc_getflags(filp, arg);
	case F2FS_IOC_SETFLAGS:
		return f2fs_ioc_setflags(filp, arg);
C
Chao Yu 已提交
2633 2634
	case F2FS_IOC_GETVERSION:
		return f2fs_ioc_getversion(filp, arg);
J
Jaegeuk Kim 已提交
2635 2636 2637 2638
	case F2FS_IOC_START_ATOMIC_WRITE:
		return f2fs_ioc_start_atomic_write(filp);
	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
		return f2fs_ioc_commit_atomic_write(filp);
2639 2640
	case F2FS_IOC_START_VOLATILE_WRITE:
		return f2fs_ioc_start_volatile_write(filp);
2641 2642 2643 2644
	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
		return f2fs_ioc_release_volatile_write(filp);
	case F2FS_IOC_ABORT_VOLATILE_WRITE:
		return f2fs_ioc_abort_volatile_write(filp);
J
Jaegeuk Kim 已提交
2645 2646
	case F2FS_IOC_SHUTDOWN:
		return f2fs_ioc_shutdown(filp, arg);
2647 2648
	case FITRIM:
		return f2fs_ioc_fitrim(filp, arg);
2649 2650 2651 2652 2653 2654
	case F2FS_IOC_SET_ENCRYPTION_POLICY:
		return f2fs_ioc_set_encryption_policy(filp, arg);
	case F2FS_IOC_GET_ENCRYPTION_POLICY:
		return f2fs_ioc_get_encryption_policy(filp, arg);
	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
2655 2656
	case F2FS_IOC_GARBAGE_COLLECT:
		return f2fs_ioc_gc(filp, arg);
2657 2658
	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
		return f2fs_ioc_gc_range(filp, arg);
2659 2660
	case F2FS_IOC_WRITE_CHECKPOINT:
		return f2fs_ioc_write_checkpoint(filp, arg);
C
Chao Yu 已提交
2661 2662
	case F2FS_IOC_DEFRAGMENT:
		return f2fs_ioc_defragment(filp, arg);
2663 2664
	case F2FS_IOC_MOVE_RANGE:
		return f2fs_ioc_move_range(filp, arg);
2665 2666
	case F2FS_IOC_FLUSH_DEVICE:
		return f2fs_ioc_flush_device(filp, arg);
2667 2668
	case F2FS_IOC_GET_FEATURES:
		return f2fs_ioc_get_features(filp, arg);
2669 2670 2671 2672
	case F2FS_IOC_FSGETXATTR:
		return f2fs_ioc_fsgetxattr(filp, arg);
	case F2FS_IOC_FSSETXATTR:
		return f2fs_ioc_fssetxattr(filp, arg);
J
Jaegeuk Kim 已提交
2673 2674 2675 2676 2677
	default:
		return -ENOTTY;
	}
}

2678 2679
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
2680 2681
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
2682
	struct blk_plug plug;
2683
	ssize_t ret;
2684

2685 2686 2687
	inode_lock(inode);
	ret = generic_write_checks(iocb, from);
	if (ret > 0) {
2688
		int err;
2689

2690 2691
		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
			set_inode_flag(inode, FI_NO_PREALLOC);
2692

2693
		err = f2fs_preallocate_blocks(iocb, from);
2694 2695 2696
		if (err) {
			inode_unlock(inode);
			return err;
2697
		}
2698 2699 2700
		blk_start_plug(&plug);
		ret = __generic_file_write_iter(iocb, from);
		blk_finish_plug(&plug);
2701
		clear_inode_flag(inode, FI_NO_PREALLOC);
C
Chao Yu 已提交
2702 2703 2704

		if (ret > 0)
			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
2705 2706 2707
	}
	inode_unlock(inode);

2708 2709
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
2710
	return ret;
2711 2712
}

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722
#ifdef CONFIG_COMPAT
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case F2FS_IOC32_GETFLAGS:
		cmd = F2FS_IOC_GETFLAGS;
		break;
	case F2FS_IOC32_SETFLAGS:
		cmd = F2FS_IOC_SETFLAGS;
		break;
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
	case F2FS_IOC32_GETVERSION:
		cmd = F2FS_IOC_GETVERSION;
		break;
	case F2FS_IOC_START_ATOMIC_WRITE:
	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
	case F2FS_IOC_START_VOLATILE_WRITE:
	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
	case F2FS_IOC_ABORT_VOLATILE_WRITE:
	case F2FS_IOC_SHUTDOWN:
	case F2FS_IOC_SET_ENCRYPTION_POLICY:
	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
	case F2FS_IOC_GET_ENCRYPTION_POLICY:
	case F2FS_IOC_GARBAGE_COLLECT:
2736
	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
2737 2738
	case F2FS_IOC_WRITE_CHECKPOINT:
	case F2FS_IOC_DEFRAGMENT:
2739
	case F2FS_IOC_MOVE_RANGE:
2740
	case F2FS_IOC_FLUSH_DEVICE:
2741
	case F2FS_IOC_GET_FEATURES:
2742 2743
	case F2FS_IOC_FSGETXATTR:
	case F2FS_IOC_FSSETXATTR:
2744
		break;
2745 2746 2747 2748 2749 2750 2751
	default:
		return -ENOIOCTLCMD;
	}
	return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif

J
Jaegeuk Kim 已提交
2752
const struct file_operations f2fs_file_operations = {
2753
	.llseek		= f2fs_llseek,
2754
	.read_iter	= generic_file_read_iter,
2755 2756
	.write_iter	= f2fs_file_write_iter,
	.open		= f2fs_file_open,
2757
	.release	= f2fs_release_file,
J
Jaegeuk Kim 已提交
2758
	.mmap		= f2fs_file_mmap,
2759
	.flush		= f2fs_file_flush,
J
Jaegeuk Kim 已提交
2760 2761 2762
	.fsync		= f2fs_sync_file,
	.fallocate	= f2fs_fallocate,
	.unlocked_ioctl	= f2fs_ioctl,
2763 2764 2765
#ifdef CONFIG_COMPAT
	.compat_ioctl	= f2fs_compat_ioctl,
#endif
J
Jaegeuk Kim 已提交
2766
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
2767
	.splice_write	= iter_file_splice_write,
J
Jaegeuk Kim 已提交
2768
};