file.c 70.0 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
J
Jaegeuk Kim 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
 * fs/f2fs/file.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
16
#include <linux/blkdev.h>
J
Jaegeuk Kim 已提交
17 18
#include <linux/falloc.h>
#include <linux/types.h>
19
#include <linux/compat.h>
J
Jaegeuk Kim 已提交
20 21
#include <linux/uaccess.h>
#include <linux/mount.h>
22
#include <linux/pagevec.h>
23
#include <linux/uio.h>
24
#include <linux/uuid.h>
25
#include <linux/file.h>
J
Jaegeuk Kim 已提交
26 27 28 29 30 31

#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "acl.h"
32
#include "gc.h"
J
Jaegeuk Kim 已提交
33
#include "trace.h"
34
#include <trace/events/f2fs.h>
J
Jaegeuk Kim 已提交
35

36
static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
37 38
{
	struct inode *inode = file_inode(vmf->vma->vm_file);
39
	vm_fault_t ret;
40 41

	down_read(&F2FS_I(inode)->i_mmap_sem);
42
	ret = filemap_fault(vmf);
43 44
	up_read(&F2FS_I(inode)->i_mmap_sem);

45
	return ret;
46 47
}

48
static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
J
Jaegeuk Kim 已提交
49 50
{
	struct page *page = vmf->page;
51
	struct inode *inode = file_inode(vmf->vma->vm_file);
52
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
53
	struct dnode_of_data dn;
54
	int err;
J
Jaegeuk Kim 已提交
55

56 57 58 59 60
	if (unlikely(f2fs_cp_error(sbi))) {
		err = -EIO;
		goto err;
	}

J
Jaegeuk Kim 已提交
61
	sb_start_pagefault(inode->i_sb);
62 63

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
64

J
Jaegeuk Kim 已提交
65
	/* block allocation */
66
	f2fs_lock_op(sbi);
67
	set_new_dnode(&dn, inode, NULL, NULL, 0);
68
	err = f2fs_reserve_block(&dn, page->index);
69 70
	if (err) {
		f2fs_unlock_op(sbi);
71
		goto out;
72 73 74
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
J
Jaegeuk Kim 已提交
75

J
Jaegeuk Kim 已提交
76
	f2fs_balance_fs(sbi, dn.node_changed);
77

78
	file_update_time(vmf->vma->vm_file);
79
	down_read(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
80
	lock_page(page);
81
	if (unlikely(page->mapping != inode->i_mapping ||
82
			page_offset(page) > i_size_read(inode) ||
83
			!PageUptodate(page))) {
J
Jaegeuk Kim 已提交
84 85
		unlock_page(page);
		err = -EFAULT;
86
		goto out_sem;
J
Jaegeuk Kim 已提交
87 88 89 90 91 92
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
93
		goto mapped;
J
Jaegeuk Kim 已提交
94 95

	/* page is wholly or partially inside EOF */
96
	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
C
Chao Yu 已提交
97
						i_size_read(inode)) {
J
Jaegeuk Kim 已提交
98
		unsigned offset;
99 100
		offset = i_size_read(inode) & ~PAGE_MASK;
		zero_user_segment(page, offset, PAGE_SIZE);
J
Jaegeuk Kim 已提交
101 102
	}
	set_page_dirty(page);
103 104
	if (!PageUptodate(page))
		SetPageUptodate(page);
J
Jaegeuk Kim 已提交
105

C
Chao Yu 已提交
106 107
	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);

108
	trace_f2fs_vm_page_mkwrite(page, DATA);
109 110
mapped:
	/* fill the page */
111
	f2fs_wait_on_page_writeback(page, DATA, false);
112

113 114
	/* wait for GCed page writeback via META_MAPPING */
	if (f2fs_post_read_required(inode))
115
		f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr);
116

117 118
out_sem:
	up_read(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
119 120
out:
	sb_end_pagefault(inode->i_sb);
121
	f2fs_update_time(sbi, REQ_TIME);
122
err:
J
Jaegeuk Kim 已提交
123 124 125 126
	return block_page_mkwrite_return(err);
}

static const struct vm_operations_struct f2fs_file_vm_ops = {
127
	.fault		= f2fs_filemap_fault,
128
	.map_pages	= filemap_map_pages,
129
	.page_mkwrite	= f2fs_vm_page_mkwrite,
J
Jaegeuk Kim 已提交
130 131
};

132 133 134 135 136 137 138 139 140 141
static int get_parent_ino(struct inode *inode, nid_t *pino)
{
	struct dentry *dentry;

	inode = igrab(inode);
	dentry = d_find_any_alias(inode);
	iput(inode);
	if (!dentry)
		return 0;

142 143
	*pino = parent_ino(dentry);
	dput(dentry);
144 145 146
	return 1;
}

C
Chao Yu 已提交
147
static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
148
{
149
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
C
Chao Yu 已提交
150
	enum cp_reason_type cp_reason = CP_NO_NEEDED;
151

C
Chao Yu 已提交
152 153 154 155
	if (!S_ISREG(inode->i_mode))
		cp_reason = CP_NON_REGULAR;
	else if (inode->i_nlink != 1)
		cp_reason = CP_HARDLINK;
156
	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
C
Chao Yu 已提交
157
		cp_reason = CP_SB_NEED_CP;
158
	else if (file_wrong_pino(inode))
C
Chao Yu 已提交
159
		cp_reason = CP_WRONG_PINO;
160
	else if (!space_for_roll_forward(sbi))
C
Chao Yu 已提交
161
		cp_reason = CP_NO_SPC_ROLL;
162
	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
C
Chao Yu 已提交
163
		cp_reason = CP_NODE_NEED_CP;
164
	else if (test_opt(sbi, FASTBOOT))
C
Chao Yu 已提交
165
		cp_reason = CP_FASTBOOT_MODE;
166
	else if (F2FS_OPTION(sbi).active_logs == 2)
C
Chao Yu 已提交
167
		cp_reason = CP_SPEC_LOG_NUM;
168
	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
169
		need_dentry_mark(sbi, inode->i_ino) &&
170 171
		exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO))
		cp_reason = CP_RECOVER_DIR;
172

C
Chao Yu 已提交
173
	return cp_reason;
174 175
}

176 177 178 179 180 181 182 183 184 185 186
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
	bool ret = false;
	/* But we need to avoid that there are some inode updates */
	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
		ret = true;
	f2fs_put_page(i, 0);
	return ret;
}

187 188 189 190 191 192 193 194
static void try_to_fix_pino(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t pino;

	down_write(&fi->i_sem);
	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
			get_parent_ino(inode, &pino)) {
195
		f2fs_i_pino_write(inode, pino);
196 197
		file_got_pino(inode);
	}
198
	up_write(&fi->i_sem);
199 200
}

201 202
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
						int datasync, bool atomic)
J
Jaegeuk Kim 已提交
203 204
{
	struct inode *inode = file->f_mapping->host;
205
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
206
	nid_t ino = inode->i_ino;
J
Jaegeuk Kim 已提交
207
	int ret = 0;
C
Chao Yu 已提交
208
	enum cp_reason_type cp_reason = 0;
J
Jaegeuk Kim 已提交
209
	struct writeback_control wbc = {
210
		.sync_mode = WB_SYNC_ALL,
J
Jaegeuk Kim 已提交
211 212 213 214
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};

215
	if (unlikely(f2fs_readonly(inode->i_sb)))
216 217
		return 0;

218
	trace_f2fs_sync_file_enter(inode);
219 220

	/* if fdatasync is triggered, let's do in-place-update */
J
Jaegeuk Kim 已提交
221
	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
222
		set_inode_flag(inode, FI_NEED_IPU);
223
	ret = file_write_and_wait_range(file, start, end);
224
	clear_inode_flag(inode, FI_NEED_IPU);
225

226
	if (ret) {
C
Chao Yu 已提交
227
		trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
J
Jaegeuk Kim 已提交
228
		return ret;
229
	}
J
Jaegeuk Kim 已提交
230

231
	/* if the inode is dirty, let's recover all the time */
C
Chao Yu 已提交
232
	if (!f2fs_skip_inode_update(inode, datasync)) {
233
		f2fs_write_inode(inode, NULL);
234 235 236
		goto go_write;
	}

237 238 239
	/*
	 * if there is no written data, don't waste time to write recovery info.
	 */
240
	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
J
Jaegeuk Kim 已提交
241
			!exist_written_data(sbi, ino, APPEND_INO)) {
242

243 244
		/* it may call write_inode just prior to fsync */
		if (need_inode_page_update(sbi, ino))
245 246
			goto go_write;

247
		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
J
Jaegeuk Kim 已提交
248
				exist_written_data(sbi, ino, UPDATE_INO))
249 250 251
			goto flush_out;
		goto out;
	}
252
go_write:
253 254 255 256
	/*
	 * Both of fdatasync() and fsync() are able to be recovered from
	 * sudden-power-off.
	 */
257
	down_read(&F2FS_I(inode)->i_sem);
C
Chao Yu 已提交
258
	cp_reason = need_do_checkpoint(inode);
259
	up_read(&F2FS_I(inode)->i_sem);
260

C
Chao Yu 已提交
261
	if (cp_reason) {
J
Jaegeuk Kim 已提交
262 263
		/* all the dirty node pages should be flushed for POR */
		ret = f2fs_sync_fs(inode->i_sb, 1);
264

265 266 267 268 269
		/*
		 * We've secured consistency through sync_fs. Following pino
		 * will be used only for fsynced inodes after checkpoint.
		 */
		try_to_fix_pino(inode);
270 271
		clear_inode_flag(inode, FI_APPEND_WRITE);
		clear_inode_flag(inode, FI_UPDATE_WRITE);
272 273
		goto out;
	}
274
sync_nodes:
275
	ret = fsync_node_pages(sbi, inode, &wbc, atomic);
276 277
	if (ret)
		goto out;
278

279
	/* if cp_error was enabled, we should avoid infinite loop */
C
Chao Yu 已提交
280 281
	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
282
		goto out;
C
Chao Yu 已提交
283
	}
284

285
	if (need_inode_block_update(sbi, ino)) {
286
		f2fs_mark_inode_dirty_sync(inode, true);
287 288
		f2fs_write_inode(inode, NULL);
		goto sync_nodes;
J
Jaegeuk Kim 已提交
289
	}
290

291 292 293 294 295 296 297 298 299 300 301 302 303
	/*
	 * If it's atomic_write, it's just fine to keep write ordering. So
	 * here we don't need to wait for node write completion, since we use
	 * node chain which serializes node blocks. If one of node writes are
	 * reordered, we can see simply broken chain, resulting in stopping
	 * roll-forward recovery. It means we'll recover all or none node blocks
	 * given fsync mark.
	 */
	if (!atomic) {
		ret = wait_on_node_pages_writeback(sbi, ino);
		if (ret)
			goto out;
	}
304 305

	/* once recovery info is written, don't need to tack this */
306
	remove_ino_entry(sbi, ino, APPEND_INO);
307
	clear_inode_flag(inode, FI_APPEND_WRITE);
308
flush_out:
309
	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
C
Chao Yu 已提交
310
		ret = f2fs_issue_flush(sbi, inode->i_ino);
311 312 313
	if (!ret) {
		remove_ino_entry(sbi, ino, UPDATE_INO);
		clear_inode_flag(inode, FI_UPDATE_WRITE);
C
Chao Yu 已提交
314
		remove_ino_entry(sbi, ino, FLUSH_INO);
315
	}
316
	f2fs_update_time(sbi, REQ_TIME);
J
Jaegeuk Kim 已提交
317
out:
C
Chao Yu 已提交
318
	trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
319
	f2fs_trace_ios(NULL, 1);
J
Jaegeuk Kim 已提交
320 321 322
	return ret;
}

323 324
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
325 326
	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
		return -EIO;
327 328 329
	return f2fs_do_sync_file(file, start, end, datasync, false);
}

330 331 332
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
						pgoff_t pgofs, int whence)
{
333
	struct page *page;
334 335 336 337 338 339
	int nr_pages;

	if (whence != SEEK_DATA)
		return 0;

	/* find first dirty page index */
340 341 342 343 344 345
	nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
				      1, &page);
	if (!nr_pages)
		return ULONG_MAX;
	pgofs = page->index;
	put_page(page);
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
	return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
							int whence)
{
	switch (whence) {
	case SEEK_DATA:
		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
			return true;
		break;
	case SEEK_HOLE:
		if (blkaddr == NULL_ADDR)
			return true;
		break;
	}
	return false;
}

366 367 368 369 370
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;
	struct dnode_of_data dn;
371 372 373
	pgoff_t pgofs, end_offset, dirty;
	loff_t data_ofs = offset;
	loff_t isize;
374 375
	int err = 0;

A
Al Viro 已提交
376
	inode_lock(inode);
377 378 379 380 381 382

	isize = i_size_read(inode);
	if (offset >= isize)
		goto fail;

	/* handle inline data case */
C
Chao Yu 已提交
383
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
384 385 386 387 388
		if (whence == SEEK_HOLE)
			data_ofs = isize;
		goto found;
	}

389
	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
390

391 392
	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

393
	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
394
		set_new_dnode(&dn, inode, NULL, NULL, 0);
395
		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
396 397 398
		if (err && err != -ENOENT) {
			goto fail;
		} else if (err == -ENOENT) {
A
arter97 已提交
399
			/* direct node does not exists */
400
			if (whence == SEEK_DATA) {
401
				pgofs = get_next_page_offset(&dn, pgofs);
402 403 404 405 406 407
				continue;
			} else {
				goto found;
			}
		}

408
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
409 410 411 412

		/* find data/hole in dnode block */
		for (; dn.ofs_in_node < end_offset;
				dn.ofs_in_node++, pgofs++,
413
				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
414
			block_t blkaddr;
415 416
			blkaddr = datablock_addr(dn.inode,
					dn.node_page, dn.ofs_in_node);
417

418
			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
419 420 421 422 423 424 425 426 427 428
				f2fs_put_dnode(&dn);
				goto found;
			}
		}
		f2fs_put_dnode(&dn);
	}

	if (whence == SEEK_DATA)
		goto fail;
found:
429 430
	if (whence == SEEK_HOLE && data_ofs > isize)
		data_ofs = isize;
A
Al Viro 已提交
431
	inode_unlock(inode);
432 433
	return vfs_setpos(file, data_ofs, maxbytes);
fail:
A
Al Viro 已提交
434
	inode_unlock(inode);
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
	return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	loff_t maxbytes = inode->i_sb->s_maxbytes;

	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
	case SEEK_END:
		return generic_file_llseek_size(file, offset, whence,
						maxbytes, i_size_read(inode));
	case SEEK_DATA:
	case SEEK_HOLE:
451 452
		if (offset < 0)
			return -ENXIO;
453 454 455 456 457 458
		return f2fs_seek_block(file, offset, whence);
	}

	return -EINVAL;
}

J
Jaegeuk Kim 已提交
459 460
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
461
	struct inode *inode = file_inode(file);
462
	int err;
463

464 465 466
	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
		return -EIO;

467
	/* we don't need to use inline_data strictly */
468 469 470
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
471

J
Jaegeuk Kim 已提交
472 473 474 475 476
	file_accessed(file);
	vma->vm_ops = &f2fs_file_vm_ops;
	return 0;
}

477 478
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
479
	int err = fscrypt_file_open(inode, filp);
480

481 482
	if (err)
		return err;
H
Hyunchul Lee 已提交
483 484 485

	filp->f_mode |= FMODE_NOWAIT;

C
Chao Yu 已提交
486
	return dquot_file_open(inode, filp);
487 488
}

489
void truncate_data_blocks_range(struct dnode_of_data *dn, int count)
J
Jaegeuk Kim 已提交
490
{
491
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
J
Jaegeuk Kim 已提交
492
	struct f2fs_node *raw_node;
C
Chao Yu 已提交
493
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
J
Jaegeuk Kim 已提交
494
	__le32 *addr;
495 496 497 498
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
J
Jaegeuk Kim 已提交
499

500
	raw_node = F2FS_NODE(dn->node_page);
501
	addr = blkaddr_in_node(raw_node) + base + ofs;
J
Jaegeuk Kim 已提交
502

C
Chris Fries 已提交
503
	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
J
Jaegeuk Kim 已提交
504 505 506 507
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

J
Jaegeuk Kim 已提交
508
		dn->data_blkaddr = NULL_ADDR;
509
		set_data_blkaddr(dn);
J
Jaegeuk Kim 已提交
510
		invalidate_blocks(sbi, blkaddr);
511
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
512
			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
J
Jaegeuk Kim 已提交
513 514
		nr_free++;
	}
C
Chao Yu 已提交
515

J
Jaegeuk Kim 已提交
516
	if (nr_free) {
C
Chao Yu 已提交
517 518 519 520 521 522
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
523
							dn->inode) + ofs;
C
Chao Yu 已提交
524
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
525
		dec_valid_block_count(sbi, dn->inode, nr_free);
J
Jaegeuk Kim 已提交
526 527
	}
	dn->ofs_in_node = ofs;
528

529
	f2fs_update_time(sbi, REQ_TIME);
530 531
	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
J
Jaegeuk Kim 已提交
532 533 534 535 536 537 538
}

void truncate_data_blocks(struct dnode_of_data *dn)
{
	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}

539
static int truncate_partial_data_page(struct inode *inode, u64 from,
540
								bool cache_only)
J
Jaegeuk Kim 已提交
541
{
542 543
	unsigned offset = from & (PAGE_SIZE - 1);
	pgoff_t index = from >> PAGE_SHIFT;
544
	struct address_space *mapping = inode->i_mapping;
J
Jaegeuk Kim 已提交
545 546
	struct page *page;

547
	if (!offset && !cache_only)
548
		return 0;
J
Jaegeuk Kim 已提交
549

550
	if (cache_only) {
551
		page = find_lock_page(mapping, index);
552 553 554
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
555
		return 0;
556
	}
J
Jaegeuk Kim 已提交
557

558
	page = get_lock_data_page(inode, index, true);
559
	if (IS_ERR(page))
560
		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
561
truncate_out:
562
	f2fs_wait_on_page_writeback(page, DATA, true);
563
	zero_user(page, offset, PAGE_SIZE - offset);
564 565 566 567

	/* An encrypted inode should have a key and truncate the last page. */
	f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
	if (!cache_only)
568
		set_page_dirty(page);
J
Jaegeuk Kim 已提交
569
	f2fs_put_page(page, 1);
570
	return 0;
J
Jaegeuk Kim 已提交
571 572
}

573
int truncate_blocks(struct inode *inode, u64 from, bool lock)
J
Jaegeuk Kim 已提交
574
{
575
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
576 577
	struct dnode_of_data dn;
	pgoff_t free_from;
H
Huajun Li 已提交
578
	int count = 0, err = 0;
579
	struct page *ipage;
580
	bool truncate_page = false;
J
Jaegeuk Kim 已提交
581

582 583
	trace_f2fs_truncate_blocks_enter(inode, from);

C
Chao Yu 已提交
584
	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
J
Jaegeuk Kim 已提交
585

586 587 588
	if (free_from >= sbi->max_file_blocks)
		goto free_partial;

589 590
	if (lock)
		f2fs_lock_op(sbi);
H
Huajun Li 已提交
591

592 593 594 595 596 597 598
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	if (f2fs_has_inline_data(inode)) {
599
		truncate_inline_inode(inode, ipage, from);
600
		f2fs_put_page(ipage, 1);
601
		truncate_page = true;
602 603 604 605
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, NULL, 0);
606
	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
J
Jaegeuk Kim 已提交
607 608 609
	if (err) {
		if (err == -ENOENT)
			goto free_next;
610
		goto out;
611 612
	}

613
	count = ADDRS_PER_PAGE(dn.node_page, inode);
J
Jaegeuk Kim 已提交
614 615

	count -= dn.ofs_in_node;
616
	f2fs_bug_on(sbi, count < 0);
617

J
Jaegeuk Kim 已提交
618 619 620 621 622 623 624 625
	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
		truncate_data_blocks_range(&dn, count);
		free_from += count;
	}

	f2fs_put_dnode(&dn);
free_next:
	err = truncate_inode_blocks(inode, free_from);
626 627 628
out:
	if (lock)
		f2fs_unlock_op(sbi);
629
free_partial:
630 631
	/* lastly zero out the first data page */
	if (!err)
632
		err = truncate_partial_data_page(inode, from, truncate_page);
J
Jaegeuk Kim 已提交
633

634
	trace_f2fs_truncate_blocks_exit(inode, err);
J
Jaegeuk Kim 已提交
635 636 637
	return err;
}

638
int f2fs_truncate(struct inode *inode)
J
Jaegeuk Kim 已提交
639
{
640 641
	int err;

642 643 644
	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
		return -EIO;

J
Jaegeuk Kim 已提交
645 646
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
				S_ISLNK(inode->i_mode)))
647
		return 0;
J
Jaegeuk Kim 已提交
648

649 650
	trace_f2fs_truncate(inode);

651 652 653 654 655 656
#ifdef CONFIG_F2FS_FAULT_INJECTION
	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
		f2fs_show_injection_info(FAULT_TRUNCATE);
		return -EIO;
	}
#endif
657
	/* we should check inline_data size */
658
	if (!f2fs_may_inline_data(inode)) {
659 660 661
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
662 663
	}

664
	err = truncate_blocks(inode, i_size_read(inode), true);
665 666 667
	if (err)
		return err;

668
	inode->i_mtime = inode->i_ctime = current_time(inode);
669
	f2fs_mark_inode_dirty_sync(inode, false);
670
	return 0;
J
Jaegeuk Kim 已提交
671 672
}

673
int f2fs_getattr(const struct path *path, struct kstat *stat,
C
Chao Yu 已提交
674
		 u32 request_mask, unsigned int query_flags)
J
Jaegeuk Kim 已提交
675
{
676
	struct inode *inode = d_inode(path->dentry);
C
Chao Yu 已提交
677
	struct f2fs_inode_info *fi = F2FS_I(inode);
C
Chao Yu 已提交
678
	struct f2fs_inode *ri;
C
Chao Yu 已提交
679 680
	unsigned int flags;

C
Chao Yu 已提交
681 682 683 684 685 686 687 688
	if (f2fs_has_extra_attr(inode) &&
			f2fs_sb_has_inode_crtime(inode->i_sb) &&
			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
		stat->result_mask |= STATX_BTIME;
		stat->btime.tv_sec = fi->i_crtime.tv_sec;
		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
	}

689
	flags = fi->i_flags & F2FS_FL_USER_VISIBLE;
690
	if (flags & F2FS_APPEND_FL)
C
Chao Yu 已提交
691
		stat->attributes |= STATX_ATTR_APPEND;
692
	if (flags & F2FS_COMPR_FL)
C
Chao Yu 已提交
693 694 695
		stat->attributes |= STATX_ATTR_COMPRESSED;
	if (f2fs_encrypted_inode(inode))
		stat->attributes |= STATX_ATTR_ENCRYPTED;
696
	if (flags & F2FS_IMMUTABLE_FL)
C
Chao Yu 已提交
697
		stat->attributes |= STATX_ATTR_IMMUTABLE;
698
	if (flags & F2FS_NODUMP_FL)
C
Chao Yu 已提交
699 700 701 702 703 704 705 706
		stat->attributes |= STATX_ATTR_NODUMP;

	stat->attributes_mask |= (STATX_ATTR_APPEND |
				  STATX_ATTR_COMPRESSED |
				  STATX_ATTR_ENCRYPTED |
				  STATX_ATTR_IMMUTABLE |
				  STATX_ATTR_NODUMP);

J
Jaegeuk Kim 已提交
707
	generic_fillattr(inode, stat);
708 709 710 711 712 713

	/* we need to show initial sectors used for inline_data/dentries */
	if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
					f2fs_has_inline_dentry(inode))
		stat->blocks += (stat->size + 511) >> 9;

J
Jaegeuk Kim 已提交
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	return 0;
}

#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
	unsigned int ia_valid = attr->ia_valid;

	if (ia_valid & ATTR_UID)
		inode->i_uid = attr->ia_uid;
	if (ia_valid & ATTR_GID)
		inode->i_gid = attr->ia_gid;
	if (ia_valid & ATTR_ATIME)
		inode->i_atime = timespec_trunc(attr->ia_atime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MTIME)
		inode->i_mtime = timespec_trunc(attr->ia_mtime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_CTIME)
		inode->i_ctime = timespec_trunc(attr->ia_ctime,
						inode->i_sb->s_time_gran);
	if (ia_valid & ATTR_MODE) {
		umode_t mode = attr->ia_mode;

		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
			mode &= ~S_ISGID;
740
		set_acl_inode(inode, mode);
J
Jaegeuk Kim 已提交
741 742 743 744 745 746 747 748
	}
}
#else
#define __setattr_copy setattr_copy
#endif

int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
749
	struct inode *inode = d_inode(dentry);
J
Jaegeuk Kim 已提交
750
	int err;
751
	bool size_changed = false;
J
Jaegeuk Kim 已提交
752

753 754 755
	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
		return -EIO;

756
	err = setattr_prepare(dentry, attr);
J
Jaegeuk Kim 已提交
757 758 759
	if (err)
		return err;

760 761 762 763
	err = fscrypt_prepare_setattr(dentry, attr);
	if (err)
		return err;

C
Chao Yu 已提交
764 765 766 767 768 769 770 771 772 773 774 775 776 777
	if (is_quota_modification(inode, attr)) {
		err = dquot_initialize(inode);
		if (err)
			return err;
	}
	if ((attr->ia_valid & ATTR_UID &&
		!uid_eq(attr->ia_uid, inode->i_uid)) ||
		(attr->ia_valid & ATTR_GID &&
		!gid_eq(attr->ia_gid, inode->i_gid))) {
		err = dquot_transfer(inode, attr);
		if (err)
			return err;
	}

778
	if (attr->ia_valid & ATTR_SIZE) {
779
		if (attr->ia_size <= i_size_read(inode)) {
780
			down_write(&F2FS_I(inode)->i_mmap_sem);
781
			truncate_setsize(inode, attr->ia_size);
782
			err = f2fs_truncate(inode);
783
			up_write(&F2FS_I(inode)->i_mmap_sem);
784 785
			if (err)
				return err;
786 787
		} else {
			/*
788 789
			 * do not trim all blocks after i_size if target size is
			 * larger than i_size.
790
			 */
791
			down_write(&F2FS_I(inode)->i_mmap_sem);
792
			truncate_setsize(inode, attr->ia_size);
793
			up_write(&F2FS_I(inode)->i_mmap_sem);
794 795

			/* should convert inline inode here */
796
			if (!f2fs_may_inline_data(inode)) {
797 798 799 800
				err = f2fs_convert_inline_inode(inode);
				if (err)
					return err;
			}
801
			inode->i_mtime = inode->i_ctime = current_time(inode);
802
		}
803

804 805 806 807
		down_write(&F2FS_I(inode)->i_sem);
		F2FS_I(inode)->last_disk_size = i_size_read(inode);
		up_write(&F2FS_I(inode)->i_sem);

808
		size_changed = true;
J
Jaegeuk Kim 已提交
809 810 811 812 813
	}

	__setattr_copy(inode, attr);

	if (attr->ia_valid & ATTR_MODE) {
814
		err = posix_acl_chmod(inode, get_inode_mode(inode));
815 816 817
		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
			inode->i_mode = F2FS_I(inode)->i_acl_mode;
			clear_inode_flag(inode, FI_ACL_MODE);
J
Jaegeuk Kim 已提交
818 819 820
		}
	}

821 822
	/* file size may changed here */
	f2fs_mark_inode_dirty_sync(inode, size_changed);
823 824 825 826

	/* inode change will produce dirty node pages flushed by checkpoint */
	f2fs_balance_fs(F2FS_I_SB(inode), true);

J
Jaegeuk Kim 已提交
827 828 829 830 831 832 833
	return err;
}

const struct inode_operations f2fs_file_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
834
	.set_acl	= f2fs_set_acl,
J
Jaegeuk Kim 已提交
835 836 837
#ifdef CONFIG_F2FS_FS_XATTR
	.listxattr	= f2fs_listxattr,
#endif
J
Jaegeuk Kim 已提交
838
	.fiemap		= f2fs_fiemap,
J
Jaegeuk Kim 已提交
839 840
};

C
Chao Yu 已提交
841
static int fill_zero(struct inode *inode, pgoff_t index,
J
Jaegeuk Kim 已提交
842 843
					loff_t start, loff_t len)
{
844
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
J
Jaegeuk Kim 已提交
845 846 847
	struct page *page;

	if (!len)
C
Chao Yu 已提交
848
		return 0;
J
Jaegeuk Kim 已提交
849

J
Jaegeuk Kim 已提交
850
	f2fs_balance_fs(sbi, true);
851

852
	f2fs_lock_op(sbi);
853
	page = get_new_data_page(inode, NULL, index, false);
854
	f2fs_unlock_op(sbi);
J
Jaegeuk Kim 已提交
855

C
Chao Yu 已提交
856 857 858
	if (IS_ERR(page))
		return PTR_ERR(page);

859
	f2fs_wait_on_page_writeback(page, DATA, true);
C
Chao Yu 已提交
860 861 862 863
	zero_user(page, start, len);
	set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
J
Jaegeuk Kim 已提交
864 865 866 867 868 869
}

int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
{
	int err;

870
	while (pg_start < pg_end) {
J
Jaegeuk Kim 已提交
871
		struct dnode_of_data dn;
872
		pgoff_t end_offset, count;
873

J
Jaegeuk Kim 已提交
874
		set_new_dnode(&dn, inode, NULL, NULL, 0);
875
		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
J
Jaegeuk Kim 已提交
876
		if (err) {
877
			if (err == -ENOENT) {
878
				pg_start = get_next_page_offset(&dn, pg_start);
J
Jaegeuk Kim 已提交
879
				continue;
880
			}
J
Jaegeuk Kim 已提交
881 882 883
			return err;
		}

884
		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
885 886 887 888 889
		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);

		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);

		truncate_data_blocks_range(&dn, count);
J
Jaegeuk Kim 已提交
890
		f2fs_put_dnode(&dn);
891 892

		pg_start += count;
J
Jaegeuk Kim 已提交
893 894 895 896
	}
	return 0;
}

C
Chao Yu 已提交
897
static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
J
Jaegeuk Kim 已提交
898 899 900
{
	pgoff_t pg_start, pg_end;
	loff_t off_start, off_end;
901
	int ret;
J
Jaegeuk Kim 已提交
902

903 904 905
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
H
Huajun Li 已提交
906

907 908
	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
J
Jaegeuk Kim 已提交
909

910 911
	off_start = offset & (PAGE_SIZE - 1);
	off_end = (offset + len) & (PAGE_SIZE - 1);
J
Jaegeuk Kim 已提交
912 913

	if (pg_start == pg_end) {
C
Chao Yu 已提交
914
		ret = fill_zero(inode, pg_start, off_start,
J
Jaegeuk Kim 已提交
915
						off_end - off_start);
C
Chao Yu 已提交
916 917
		if (ret)
			return ret;
J
Jaegeuk Kim 已提交
918
	} else {
C
Chao Yu 已提交
919 920
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
921
						PAGE_SIZE - off_start);
C
Chao Yu 已提交
922 923 924 925 926 927 928 929
			if (ret)
				return ret;
		}
		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				return ret;
		}
J
Jaegeuk Kim 已提交
930 931 932 933

		if (pg_start < pg_end) {
			struct address_space *mapping = inode->i_mapping;
			loff_t blk_start, blk_end;
934
			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
935

J
Jaegeuk Kim 已提交
936
			f2fs_balance_fs(sbi, true);
J
Jaegeuk Kim 已提交
937

938 939
			blk_start = (loff_t)pg_start << PAGE_SHIFT;
			blk_end = (loff_t)pg_end << PAGE_SHIFT;
940
			down_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
941 942
			truncate_inode_pages_range(mapping, blk_start,
					blk_end - 1);
943

944
			f2fs_lock_op(sbi);
J
Jaegeuk Kim 已提交
945
			ret = truncate_hole(inode, pg_start, pg_end);
946
			f2fs_unlock_op(sbi);
947
			up_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
948 949 950 951 952 953
		}
	}

	return ret;
}

954 955
static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, pgoff_t len)
C
Chao Yu 已提交
956 957 958
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
959
	int ret, done, i;
960

961
next_dnode:
962
	set_new_dnode(&dn, inode, NULL, NULL, 0);
963
	ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
964 965 966
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
967 968 969 970 971 972 973 974 975 976 977
		if (dn.max_level == 0)
			return -ENOENT;
		done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
		blkaddr += done;
		do_replace += done;
		goto next;
	}

	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
							dn.ofs_in_node, len);
	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
978 979
		*blkaddr = datablock_addr(dn.inode,
					dn.node_page, dn.ofs_in_node);
980 981 982 983 984 985 986
		if (!is_checkpointed_data(sbi, *blkaddr)) {

			if (test_opt(sbi, LFS)) {
				f2fs_put_dnode(&dn);
				return -ENOTSUPP;
			}

987
			/* do not invalidate this block address */
988
			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
989
			*do_replace = 1;
C
Chao Yu 已提交
990
		}
991
	}
992 993 994 995 996 997 998 999
	f2fs_put_dnode(&dn);
next:
	len -= done;
	off += done;
	if (len)
		goto next_dnode;
	return 0;
}
C
Chao Yu 已提交
1000

1001 1002 1003 1004 1005 1006
static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
				int *do_replace, pgoff_t off, int len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	int ret, i;
C
Chao Yu 已提交
1007

1008 1009 1010
	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
		if (*do_replace == 0)
			continue;
C
Chao Yu 已提交
1011

1012 1013 1014 1015 1016 1017 1018
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
		if (ret) {
			dec_valid_block_count(sbi, inode, 1);
			invalidate_blocks(sbi, *blkaddr);
		} else {
			f2fs_update_data_blkaddr(&dn, *blkaddr);
1019
		}
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
		f2fs_put_dnode(&dn);
	}
	return 0;
}

static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
			block_t *blkaddr, int *do_replace,
			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
	pgoff_t i = 0;
	int ret;
1032

1033 1034 1035 1036
	while (i < len) {
		if (blkaddr[i] == NULL_ADDR && !full) {
			i++;
			continue;
1037
		}
C
Chao Yu 已提交
1038

1039 1040 1041 1042 1043
		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
			struct dnode_of_data dn;
			struct node_info ni;
			size_t new_size;
			pgoff_t ilen;
C
Chao Yu 已提交
1044

1045 1046 1047 1048
			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
			if (ret)
				return ret;
C
Chao Yu 已提交
1049

1050 1051 1052 1053 1054
			get_node_info(sbi, dn.nid, &ni);
			ilen = min((pgoff_t)
				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
						dn.ofs_in_node, len - i);
			do {
1055 1056
				dn.data_blkaddr = datablock_addr(dn.inode,
						dn.node_page, dn.ofs_in_node);
1057 1058 1059 1060
				truncate_data_blocks_range(&dn, 1);

				if (do_replace[i]) {
					f2fs_i_blocks_write(src_inode,
C
Chao Yu 已提交
1061
							1, false, false);
1062
					f2fs_i_blocks_write(dst_inode,
C
Chao Yu 已提交
1063
							1, true, false);
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
					blkaddr[i], ni.version, true, false);

					do_replace[i] = 0;
				}
				dn.ofs_in_node++;
				i++;
				new_size = (dst + i) << PAGE_SHIFT;
				if (dst_inode->i_size < new_size)
					f2fs_i_size_write(dst_inode, new_size);
1074
			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1075

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
			f2fs_put_dnode(&dn);
		} else {
			struct page *psrc, *pdst;

			psrc = get_lock_data_page(src_inode, src + i, true);
			if (IS_ERR(psrc))
				return PTR_ERR(psrc);
			pdst = get_new_data_page(dst_inode, NULL, dst + i,
								true);
			if (IS_ERR(pdst)) {
				f2fs_put_page(psrc, 1);
				return PTR_ERR(pdst);
			}
			f2fs_copy_page(psrc, pdst);
			set_page_dirty(pdst);
			f2fs_put_page(pdst, 1);
1092
			f2fs_put_page(psrc, 1);
C
Chao Yu 已提交
1093

1094 1095 1096 1097 1098
			ret = truncate_hole(src_inode, src + i, src + i + 1);
			if (ret)
				return ret;
			i++;
		}
1099 1100
	}
	return 0;
1101
}
C
Chao Yu 已提交
1102

1103 1104
static int __exchange_data_block(struct inode *src_inode,
			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1105
			pgoff_t len, bool full)
1106 1107 1108
{
	block_t *src_blkaddr;
	int *do_replace;
1109
	pgoff_t olen;
1110 1111
	int ret;

1112 1113
	while (len) {
		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
1114

C
Chao Yu 已提交
1115 1116
		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
					sizeof(block_t) * olen, GFP_KERNEL);
1117 1118
		if (!src_blkaddr)
			return -ENOMEM;
1119

C
Chao Yu 已提交
1120 1121
		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
					sizeof(int) * olen, GFP_KERNEL);
1122 1123 1124 1125
		if (!do_replace) {
			kvfree(src_blkaddr);
			return -ENOMEM;
		}
1126

1127 1128 1129 1130
		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
					do_replace, src, olen);
		if (ret)
			goto roll_back;
1131

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
					do_replace, src, dst, olen, full);
		if (ret)
			goto roll_back;

		src += olen;
		dst += olen;
		len -= olen;

		kvfree(src_blkaddr);
		kvfree(do_replace);
	}
1144 1145 1146 1147 1148 1149
	return 0;

roll_back:
	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
	kvfree(src_blkaddr);
	kvfree(do_replace);
1150 1151
	return ret;
}
C
Chao Yu 已提交
1152

1153 1154 1155 1156
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1157
	int ret;
1158

1159 1160
	f2fs_balance_fs(sbi, true);
	f2fs_lock_op(sbi);
1161 1162 1163

	f2fs_drop_extent_tree(inode);

1164 1165
	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
	f2fs_unlock_op(sbi);
C
Chao Yu 已提交
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	return ret;
}

static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
	pgoff_t pg_start, pg_end;
	loff_t new_size;
	int ret;

	if (offset + len >= i_size_read(inode))
		return -EINVAL;

	/* collapse range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

1182 1183 1184
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
1185

1186 1187
	pg_start = offset >> PAGE_SHIFT;
	pg_end = (offset + len) >> PAGE_SHIFT;
C
Chao Yu 已提交
1188

1189 1190 1191
	/* avoid gc operation during block exchange */
	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);

1192
	down_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1193 1194 1195
	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
1196
		goto out_unlock;
1197

C
Chao Yu 已提交
1198 1199 1200 1201
	truncate_pagecache(inode, offset);

	ret = f2fs_do_collapse(inode, pg_start, pg_end);
	if (ret)
1202
		goto out_unlock;
C
Chao Yu 已提交
1203

1204 1205 1206 1207
	/* write out all moved pages, if possible */
	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	truncate_pagecache(inode, offset);

C
Chao Yu 已提交
1208
	new_size = i_size_read(inode) - len;
1209
	truncate_pagecache(inode, new_size);
C
Chao Yu 已提交
1210 1211 1212

	ret = truncate_blocks(inode, new_size, true);
	if (!ret)
1213
		f2fs_i_size_write(inode, new_size);
1214
out_unlock:
1215
	up_write(&F2FS_I(inode)->i_mmap_sem);
1216
	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
C
Chao Yu 已提交
1217 1218 1219
	return ret;
}

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
								pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
	pgoff_t index = start;
	unsigned int ofs_in_node = dn->ofs_in_node;
	blkcnt_t count = 0;
	int ret;

	for (; index < end; index++, dn->ofs_in_node++) {
1230 1231
		if (datablock_addr(dn->inode, dn->node_page,
					dn->ofs_in_node) == NULL_ADDR)
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
			count++;
	}

	dn->ofs_in_node = ofs_in_node;
	ret = reserve_new_blocks(dn, count);
	if (ret)
		return ret;

	dn->ofs_in_node = ofs_in_node;
	for (index = start; index < end; index++, dn->ofs_in_node++) {
1242 1243
		dn->data_blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
		/*
		 * reserve_new_blocks will not guarantee entire block
		 * allocation.
		 */
		if (dn->data_blkaddr == NULL_ADDR) {
			ret = -ENOSPC;
			break;
		}
		if (dn->data_blkaddr != NEW_ADDR) {
			invalidate_blocks(sbi, dn->data_blkaddr);
			dn->data_blkaddr = NEW_ADDR;
			set_data_blkaddr(dn);
		}
	}

	f2fs_update_extent_cache_range(dn, start, 0, index - start);

	return ret;
}

C
Chao Yu 已提交
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
								int mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	pgoff_t index, pg_start, pg_end;
	loff_t new_size = i_size_read(inode);
	loff_t off_start, off_end;
	int ret = 0;

	ret = inode_newsize_ok(inode, (len + offset));
	if (ret)
		return ret;

1278 1279 1280
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
C
Chao Yu 已提交
1281

1282
	down_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1283 1284
	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
	if (ret)
1285
		goto out_sem;
C
Chao Yu 已提交
1286 1287 1288

	truncate_pagecache_range(inode, offset, offset + len - 1);

1289 1290
	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
C
Chao Yu 已提交
1291

1292 1293
	off_start = offset & (PAGE_SIZE - 1);
	off_end = (offset + len) & (PAGE_SIZE - 1);
C
Chao Yu 已提交
1294 1295

	if (pg_start == pg_end) {
C
Chao Yu 已提交
1296 1297 1298
		ret = fill_zero(inode, pg_start, off_start,
						off_end - off_start);
		if (ret)
1299
			goto out_sem;
C
Chao Yu 已提交
1300

C
Chao Yu 已提交
1301 1302 1303
		new_size = max_t(loff_t, new_size, offset + len);
	} else {
		if (off_start) {
C
Chao Yu 已提交
1304
			ret = fill_zero(inode, pg_start++, off_start,
1305
						PAGE_SIZE - off_start);
C
Chao Yu 已提交
1306
			if (ret)
1307
				goto out_sem;
C
Chao Yu 已提交
1308

C
Chao Yu 已提交
1309
			new_size = max_t(loff_t, new_size,
1310
					(loff_t)pg_start << PAGE_SHIFT);
C
Chao Yu 已提交
1311 1312
		}

1313
		for (index = pg_start; index < pg_end;) {
C
Chao Yu 已提交
1314
			struct dnode_of_data dn;
1315 1316
			unsigned int end_offset;
			pgoff_t end;
C
Chao Yu 已提交
1317 1318 1319

			f2fs_lock_op(sbi);

1320 1321
			set_new_dnode(&dn, inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
C
Chao Yu 已提交
1322 1323 1324 1325 1326
			if (ret) {
				f2fs_unlock_op(sbi);
				goto out;
			}

1327 1328 1329 1330
			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
			end = min(pg_end, end_offset - dn.ofs_in_node + index);

			ret = f2fs_do_zero_range(&dn, index, end);
C
Chao Yu 已提交
1331 1332
			f2fs_put_dnode(&dn);
			f2fs_unlock_op(sbi);
1333 1334 1335

			f2fs_balance_fs(sbi, dn.node_changed);

1336 1337
			if (ret)
				goto out;
C
Chao Yu 已提交
1338

1339
			index = end;
C
Chao Yu 已提交
1340
			new_size = max_t(loff_t, new_size,
1341
					(loff_t)index << PAGE_SHIFT);
C
Chao Yu 已提交
1342 1343 1344
		}

		if (off_end) {
C
Chao Yu 已提交
1345 1346 1347 1348
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				goto out;

C
Chao Yu 已提交
1349 1350 1351 1352 1353
			new_size = max_t(loff_t, new_size, offset + len);
		}
	}

out:
1354 1355 1356 1357 1358 1359
	if (new_size > i_size_read(inode)) {
		if (mode & FALLOC_FL_KEEP_SIZE)
			file_set_keep_isize(inode);
		else
			f2fs_i_size_write(inode, new_size);
	}
1360 1361
out_sem:
	up_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1362 1363 1364 1365

	return ret;
}

C
Chao Yu 已提交
1366 1367 1368
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1369
	pgoff_t nr, pg_start, pg_end, delta, idx;
C
Chao Yu 已提交
1370
	loff_t new_size;
1371
	int ret = 0;
C
Chao Yu 已提交
1372 1373

	new_size = i_size_read(inode) + len;
1374 1375 1376
	ret = inode_newsize_ok(inode, new_size);
	if (ret)
		return ret;
C
Chao Yu 已提交
1377 1378 1379 1380 1381 1382 1383 1384

	if (offset >= i_size_read(inode))
		return -EINVAL;

	/* insert range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

1385 1386 1387
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;
1388

J
Jaegeuk Kim 已提交
1389
	f2fs_balance_fs(sbi, true);
1390

1391 1392 1393
	/* avoid gc operation during block exchange */
	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);

1394
	down_write(&F2FS_I(inode)->i_mmap_sem);
C
Chao Yu 已提交
1395 1396
	ret = truncate_blocks(inode, i_size_read(inode), true);
	if (ret)
1397
		goto out;
C
Chao Yu 已提交
1398 1399 1400 1401

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
1402
		goto out;
C
Chao Yu 已提交
1403 1404 1405

	truncate_pagecache(inode, offset);

1406 1407
	pg_start = offset >> PAGE_SHIFT;
	pg_end = (offset + len) >> PAGE_SHIFT;
C
Chao Yu 已提交
1408
	delta = pg_end - pg_start;
1409 1410 1411 1412 1413 1414 1415
	idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;

	while (!ret && idx > pg_start) {
		nr = idx - pg_start;
		if (nr > delta)
			nr = delta;
		idx -= nr;
C
Chao Yu 已提交
1416 1417

		f2fs_lock_op(sbi);
1418 1419
		f2fs_drop_extent_tree(inode);

1420 1421
		ret = __exchange_data_block(inode, inode, idx,
					idx + delta, nr, false);
C
Chao Yu 已提交
1422 1423 1424
		f2fs_unlock_op(sbi);
	}

1425 1426 1427 1428 1429
	/* write out all moved pages, if possible */
	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	truncate_pagecache(inode, offset);

	if (!ret)
1430
		f2fs_i_size_write(inode, new_size);
1431 1432
out:
	up_write(&F2FS_I(inode)->i_mmap_sem);
1433
	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
C
Chao Yu 已提交
1434 1435 1436
	return ret;
}

J
Jaegeuk Kim 已提交
1437 1438 1439
static int expand_inode_data(struct inode *inode, loff_t offset,
					loff_t len, int mode)
{
1440
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1441
	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1442
			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE };
1443
	pgoff_t pg_end;
J
Jaegeuk Kim 已提交
1444
	loff_t new_size = i_size_read(inode);
1445
	loff_t off_end;
1446
	int err;
J
Jaegeuk Kim 已提交
1447

1448 1449 1450
	err = inode_newsize_ok(inode, (len + offset));
	if (err)
		return err;
J
Jaegeuk Kim 已提交
1451

1452 1453 1454
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
1455

J
Jaegeuk Kim 已提交
1456
	f2fs_balance_fs(sbi, true);
1457

1458
	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1459
	off_end = (offset + len) & (PAGE_SIZE - 1);
J
Jaegeuk Kim 已提交
1460

1461 1462 1463 1464
	map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
	map.m_len = pg_end - map.m_lblk;
	if (off_end)
		map.m_len++;
1465

1466 1467
	err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
	if (err) {
1468
		pgoff_t last_off;
J
Jaegeuk Kim 已提交
1469

1470
		if (!map.m_len)
1471
			return err;
1472

1473 1474 1475 1476 1477 1478 1479
		last_off = map.m_lblk + map.m_len - 1;

		/* update new size to the failed position */
		new_size = (last_off == pg_end) ? offset + len:
					(loff_t)(last_off + 1) << PAGE_SHIFT;
	} else {
		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
J
Jaegeuk Kim 已提交
1480 1481
	}

1482 1483 1484 1485 1486 1487
	if (new_size > i_size_read(inode)) {
		if (mode & FALLOC_FL_KEEP_SIZE)
			file_set_keep_isize(inode);
		else
			f2fs_i_size_write(inode, new_size);
	}
J
Jaegeuk Kim 已提交
1488

1489
	return err;
J
Jaegeuk Kim 已提交
1490 1491 1492 1493 1494
}

static long f2fs_fallocate(struct file *file, int mode,
				loff_t offset, loff_t len)
{
A
Al Viro 已提交
1495
	struct inode *inode = file_inode(file);
1496
	long ret = 0;
J
Jaegeuk Kim 已提交
1497

1498 1499 1500
	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
		return -EIO;

1501 1502 1503 1504
	/* f2fs only support ->fallocate for regular file */
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

C
Chao Yu 已提交
1505 1506
	if (f2fs_encrypted_inode(inode) &&
		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1507 1508
		return -EOPNOTSUPP;

C
Chao Yu 已提交
1509
	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
C
Chao Yu 已提交
1510 1511
			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
			FALLOC_FL_INSERT_RANGE))
J
Jaegeuk Kim 已提交
1512 1513
		return -EOPNOTSUPP;

A
Al Viro 已提交
1514
	inode_lock(inode);
1515

1516 1517 1518 1519
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		if (offset >= inode->i_size)
			goto out;

C
Chao Yu 已提交
1520
		ret = punch_hole(inode, offset, len);
C
Chao Yu 已提交
1521 1522
	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
		ret = f2fs_collapse_range(inode, offset, len);
C
Chao Yu 已提交
1523 1524
	} else if (mode & FALLOC_FL_ZERO_RANGE) {
		ret = f2fs_zero_range(inode, offset, len, mode);
C
Chao Yu 已提交
1525 1526
	} else if (mode & FALLOC_FL_INSERT_RANGE) {
		ret = f2fs_insert_range(inode, offset, len);
C
Chao Yu 已提交
1527
	} else {
J
Jaegeuk Kim 已提交
1528
		ret = expand_inode_data(inode, offset, len, mode);
C
Chao Yu 已提交
1529
	}
J
Jaegeuk Kim 已提交
1530

1531
	if (!ret) {
1532
		inode->i_mtime = inode->i_ctime = current_time(inode);
1533
		f2fs_mark_inode_dirty_sync(inode, false);
1534
		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1535
	}
1536

1537
out:
A
Al Viro 已提交
1538
	inode_unlock(inode);
1539

1540
	trace_f2fs_fallocate(inode, mode, offset, len, ret);
J
Jaegeuk Kim 已提交
1541 1542 1543
	return ret;
}

1544 1545
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
1546 1547 1548 1549 1550 1551 1552 1553
	/*
	 * f2fs_relase_file is called at every close calls. So we should
	 * not drop any inmemory pages by close called by other process.
	 */
	if (!(filp->f_mode & FMODE_WRITE) ||
			atomic_read(&inode->i_writecount) != 1)
		return 0;

1554 1555
	/* some remained atomic pages should discarded */
	if (f2fs_is_atomic_file(inode))
1556
		drop_inmem_pages(inode);
1557
	if (f2fs_is_volatile_file(inode)) {
1558
		clear_inode_flag(inode, FI_VOLATILE_FILE);
1559
		stat_dec_volatile_write(inode);
1560
		set_inode_flag(inode, FI_DROP_CACHE);
1561
		filemap_fdatawrite(inode->i_mapping);
1562
		clear_inode_flag(inode, FI_DROP_CACHE);
1563 1564 1565 1566
	}
	return 0;
}

1567
static int f2fs_file_flush(struct file *file, fl_owner_t id)
J
Jaegeuk Kim 已提交
1568
{
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
	struct inode *inode = file_inode(file);

	/*
	 * If the process doing a transaction is crashed, we should do
	 * roll-back. Otherwise, other reader/write can see corrupted database
	 * until all the writers close its file. Since this should be done
	 * before dropping file lock, it needs to do in ->flush.
	 */
	if (f2fs_is_atomic_file(inode) &&
			F2FS_I(inode)->inmem_task == current)
		drop_inmem_pages(inode);
	return 0;
J
Jaegeuk Kim 已提交
1581 1582
}

1583
static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
J
Jaegeuk Kim 已提交
1584
{
A
Al Viro 已提交
1585
	struct inode *inode = file_inode(filp);
J
Jaegeuk Kim 已提交
1586
	struct f2fs_inode_info *fi = F2FS_I(inode);
1587 1588 1589 1590 1591 1592 1593 1594 1595
	unsigned int flags = fi->i_flags;

	if (file_is_encrypt(inode))
		flags |= F2FS_ENCRYPT_FL;
	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
		flags |= F2FS_INLINE_DATA_FL;

	flags &= F2FS_FL_USER_VISIBLE;

1596 1597
	return put_user(flags, (int __user *)arg);
}
J
Jaegeuk Kim 已提交
1598

1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	unsigned int oldflags;

	/* Is it quota file? Do not allow user to mess with it */
	if (IS_NOQUOTA(inode))
		return -EPERM;

	flags = f2fs_mask_flags(inode->i_mode, flags);

	oldflags = fi->i_flags;

1612
	if ((flags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
1613 1614 1615
		if (!capable(CAP_LINUX_IMMUTABLE))
			return -EPERM;

1616 1617
	flags = flags & F2FS_FL_USER_MODIFIABLE;
	flags |= oldflags & ~F2FS_FL_USER_MODIFIABLE;
1618 1619
	fi->i_flags = flags;

1620
	if (fi->i_flags & F2FS_PROJINHERIT_FL)
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
		set_inode_flag(inode, FI_PROJ_INHERIT);
	else
		clear_inode_flag(inode, FI_PROJ_INHERIT);

	inode->i_ctime = current_time(inode);
	f2fs_set_inode_flags(inode);
	f2fs_mark_inode_dirty_sync(inode, false);
	return 0;
}

1631 1632 1633
static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
1634
	unsigned int flags;
1635
	int ret;
J
Jaegeuk Kim 已提交
1636

1637 1638 1639 1640 1641 1642
	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (get_user(flags, (int __user *)arg))
		return -EFAULT;

1643 1644 1645
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;
J
Jaegeuk Kim 已提交
1646

A
Al Viro 已提交
1647
	inode_lock(inode);
J
Jaegeuk Kim 已提交
1648

1649
	ret = __f2fs_ioc_setflags(inode, flags);
J
Jaegeuk Kim 已提交
1650

1651
	inode_unlock(inode);
1652 1653 1654
	mnt_drop_write_file(filp);
	return ret;
}
1655

C
Chao Yu 已提交
1656 1657 1658 1659 1660 1661 1662
static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);

	return put_user(inode->i_generation, (int __user *)arg);
}

J
Jaegeuk Kim 已提交
1663 1664 1665
static int f2fs_ioc_start_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1666
	int ret;
J
Jaegeuk Kim 已提交
1667 1668 1669 1670

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1671 1672 1673
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

1674 1675 1676 1677
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1678 1679
	inode_lock(inode);

1680 1681
	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);

1682
	if (f2fs_is_atomic_file(inode))
1683
		goto out;
J
Jaegeuk Kim 已提交
1684

1685 1686
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
1687
		goto out;
J
Jaegeuk Kim 已提交
1688

1689
	if (!get_dirty_pages(inode))
1690
		goto skip_flush;
1691 1692

	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
1693
		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
1694 1695
					inode->i_ino, get_dirty_pages(inode));
	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1696
	if (ret)
1697
		goto out;
1698 1699 1700 1701
skip_flush:
	set_inode_flag(inode, FI_HOT_DATA);
	set_inode_flag(inode, FI_ATOMIC_FILE);
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1702

1703
	F2FS_I(inode)->inmem_task = current;
1704 1705
	stat_inc_atomic_write(inode);
	stat_update_max_atomic_write(inode);
1706
out:
1707
	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
1708
	inode_unlock(inode);
1709
	mnt_drop_write_file(filp);
1710
	return ret;
J
Jaegeuk Kim 已提交
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
}

static int f2fs_ioc_commit_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1725 1726
	inode_lock(inode);

1727 1728
	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);

1729 1730 1731
	if (f2fs_is_volatile_file(inode))
		goto err_out;

1732
	if (f2fs_is_atomic_file(inode)) {
1733
		ret = commit_inmem_pages(inode);
C
Chao Yu 已提交
1734
		if (ret)
1735
			goto err_out;
C
Chao Yu 已提交
1736

1737
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
C
Chao Yu 已提交
1738 1739
		if (!ret) {
			clear_inode_flag(inode, FI_ATOMIC_FILE);
C
Chao Yu 已提交
1740
			clear_inode_flag(inode, FI_HOT_DATA);
C
Chao Yu 已提交
1741
			stat_dec_atomic_write(inode);
1742
		}
1743
	} else {
1744
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
1745
	}
1746
err_out:
1747
	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
1748
	inode_unlock(inode);
J
Jaegeuk Kim 已提交
1749 1750 1751 1752
	mnt_drop_write_file(filp);
	return ret;
}

1753 1754 1755
static int f2fs_ioc_start_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1756
	int ret;
1757 1758 1759 1760

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1761 1762 1763
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

1764 1765 1766 1767
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1768 1769
	inode_lock(inode);

1770
	if (f2fs_is_volatile_file(inode))
1771
		goto out;
1772

1773 1774
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
1775
		goto out;
1776

1777 1778 1779
	stat_inc_volatile_write(inode);
	stat_update_max_volatile_write(inode);

1780
	set_inode_flag(inode, FI_VOLATILE_FILE);
1781
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1782
out:
1783
	inode_unlock(inode);
1784 1785
	mnt_drop_write_file(filp);
	return ret;
1786 1787
}

1788 1789 1790
static int f2fs_ioc_release_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
1791
	int ret;
1792 1793 1794 1795

	if (!inode_owner_or_capable(inode))
		return -EACCES;

1796 1797 1798 1799
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1800 1801
	inode_lock(inode);

1802
	if (!f2fs_is_volatile_file(inode))
1803
		goto out;
1804

1805 1806 1807 1808
	if (!f2fs_is_first_block_written(inode)) {
		ret = truncate_partial_data_page(inode, 0, true);
		goto out;
	}
1809

1810 1811
	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
out:
1812
	inode_unlock(inode);
1813 1814
	mnt_drop_write_file(filp);
	return ret;
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
}

static int f2fs_ioc_abort_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1829 1830
	inode_lock(inode);

1831
	if (f2fs_is_atomic_file(inode))
1832
		drop_inmem_pages(inode);
1833
	if (f2fs_is_volatile_file(inode)) {
1834
		clear_inode_flag(inode, FI_VOLATILE_FILE);
1835
		stat_dec_volatile_write(inode);
1836
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1837
	}
1838

1839 1840
	inode_unlock(inode);

1841
	mnt_drop_write_file(filp);
1842
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1843 1844 1845
	return ret;
}

J
Jaegeuk Kim 已提交
1846 1847 1848 1849 1850 1851
static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct super_block *sb = sbi->sb;
	__u32 in;
1852
	int ret;
J
Jaegeuk Kim 已提交
1853 1854 1855 1856 1857 1858 1859

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (get_user(in, (__u32 __user *)arg))
		return -EFAULT;

1860 1861 1862 1863
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
1864 1865 1866
	switch (in) {
	case F2FS_GOING_DOWN_FULLSYNC:
		sb = freeze_bdev(sb->s_bdev);
1867 1868 1869 1870 1871
		if (IS_ERR(sb)) {
			ret = PTR_ERR(sb);
			goto out;
		}
		if (sb) {
1872
			f2fs_stop_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
1873 1874 1875 1876 1877
			thaw_bdev(sb->s_bdev, sb);
		}
		break;
	case F2FS_GOING_DOWN_METASYNC:
		/* do checkpoint only */
1878 1879 1880
		ret = f2fs_sync_fs(sb, 1);
		if (ret)
			goto out;
1881
		f2fs_stop_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
1882 1883
		break;
	case F2FS_GOING_DOWN_NOSYNC:
1884
		f2fs_stop_checkpoint(sbi, false);
J
Jaegeuk Kim 已提交
1885
		break;
1886
	case F2FS_GOING_DOWN_METAFLUSH:
C
Chao Yu 已提交
1887
		sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
1888
		f2fs_stop_checkpoint(sbi, false);
1889
		break;
J
Jaegeuk Kim 已提交
1890
	default:
1891 1892
		ret = -EINVAL;
		goto out;
J
Jaegeuk Kim 已提交
1893
	}
1894 1895 1896 1897 1898 1899 1900

	stop_gc_thread(sbi);
	stop_discard_thread(sbi);

	drop_discard_cmd(sbi);
	clear_opt(sbi, DISCARD);

1901
	f2fs_update_time(sbi, REQ_TIME);
1902 1903 1904
out:
	mnt_drop_write_file(filp);
	return ret;
J
Jaegeuk Kim 已提交
1905 1906
}

1907 1908 1909 1910 1911 1912 1913
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct super_block *sb = inode->i_sb;
	struct request_queue *q = bdev_get_queue(sb->s_bdev);
	struct fstrim_range range;
	int ret;
1914

1915 1916
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
1917

1918 1919
	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;
1920

1921 1922 1923
	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
				sizeof(range)))
		return -EFAULT;
1924

1925 1926 1927 1928
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

1929 1930 1931
	range.minlen = max((unsigned int)range.minlen,
				q->limits.discard_granularity);
	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
1932
	mnt_drop_write_file(filp);
1933 1934
	if (ret < 0)
		return ret;
1935

1936 1937 1938
	if (copy_to_user((struct fstrim_range __user *)arg, &range,
				sizeof(range)))
		return -EFAULT;
1939
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1940 1941 1942
	return 0;
}

1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
static bool uuid_is_nonzero(__u8 u[16])
{
	int i;

	for (i = 0; i < 16; i++)
		if (u[i])
			return true;
	return false;
}

static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);

1957
	if (!f2fs_sb_has_encrypt(inode->i_sb))
1958 1959
		return -EOPNOTSUPP;

1960
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1961

1962
	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
1963 1964 1965 1966
}

static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
1967
	if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb))
1968
		return -EOPNOTSUPP;
1969
	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
1970 1971 1972 1973 1974 1975 1976 1977
}

static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	int err;

1978
	if (!f2fs_sb_has_encrypt(inode->i_sb))
1979 1980 1981 1982 1983 1984
		return -EOPNOTSUPP;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

1985
	down_write(&sbi->sb_lock);
1986 1987 1988 1989

	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
		goto got_it;

1990 1991 1992
	/* update superblock with uuid */
	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);

C
Chao Yu 已提交
1993
	err = f2fs_commit_super(sbi, false);
1994 1995 1996
	if (err) {
		/* undo new data */
		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
1997
		goto out_err;
1998 1999 2000 2001
	}
got_it:
	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
									16))
2002 2003
		err = -EFAULT;
out_err:
2004
	up_write(&sbi->sb_lock);
2005 2006
	mnt_drop_write_file(filp);
	return err;
2007 2008
}

2009 2010 2011 2012
static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
C
Chao Yu 已提交
2013
	__u32 sync;
2014
	int ret;
2015 2016 2017 2018

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

C
Chao Yu 已提交
2019
	if (get_user(sync, (__u32 __user *)arg))
2020 2021
		return -EFAULT;

C
Chao Yu 已提交
2022 2023
	if (f2fs_readonly(sbi->sb))
		return -EROFS;
2024

2025 2026 2027 2028
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

C
Chao Yu 已提交
2029
	if (!sync) {
2030 2031 2032 2033
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
C
Chao Yu 已提交
2034 2035
	} else {
		mutex_lock(&sbi->gc_mutex);
2036 2037
	}

2038
	ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2039 2040 2041
out:
	mnt_drop_write_file(filp);
	return ret;
2042 2043
}

2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_gc_range range;
	u64 end;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
							sizeof(range)))
		return -EFAULT;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	end = range.start + range.len;
2067 2068 2069 2070
	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
		ret = -EINVAL;
		goto out;
	}
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089
do_more:
	if (!range.sync) {
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
	} else {
		mutex_lock(&sbi->gc_mutex);
	}

	ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
	range.start += sbi->blocks_per_seg;
	if (range.start <= end)
		goto do_more;
out:
	mnt_drop_write_file(filp);
	return ret;
}

2090 2091 2092 2093
static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2094
	int ret;
2095 2096 2097 2098 2099 2100 2101

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

2102 2103 2104 2105 2106 2107 2108 2109
	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	ret = f2fs_sync_fs(sbi->sb, 1);

	mnt_drop_write_file(filp);
	return ret;
2110 2111
}

C
Chao Yu 已提交
2112 2113 2114 2115 2116
static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
					struct file *filp,
					struct f2fs_defragment *range)
{
	struct inode *inode = file_inode(filp);
2117 2118
	struct f2fs_map_blocks map = { .m_next_extent = NULL,
					.m_seg_type = NO_CHECK_TYPE };
2119
	struct extent_info ei = {0,0,0};
2120
	pgoff_t pg_start, pg_end, next_pgofs;
2121
	unsigned int blk_per_seg = sbi->blocks_per_seg;
C
Chao Yu 已提交
2122 2123 2124 2125 2126 2127
	unsigned int total = 0, sec_num;
	block_t blk_end = 0;
	bool fragmented = false;
	int err;

	/* if in-place-update policy is enabled, don't waste time here */
C
Chao Yu 已提交
2128
	if (should_update_inplace(inode, NULL))
C
Chao Yu 已提交
2129 2130
		return -EINVAL;

2131 2132
	pg_start = range->start >> PAGE_SHIFT;
	pg_end = (range->start + range->len) >> PAGE_SHIFT;
C
Chao Yu 已提交
2133

J
Jaegeuk Kim 已提交
2134
	f2fs_balance_fs(sbi, true);
C
Chao Yu 已提交
2135

A
Al Viro 已提交
2136
	inode_lock(inode);
C
Chao Yu 已提交
2137 2138 2139

	/* writeback all dirty pages in the range */
	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2140
						range->start + range->len - 1);
C
Chao Yu 已提交
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
	if (err)
		goto out;

	/*
	 * lookup mapping info in extent cache, skip defragmenting if physical
	 * block addresses are continuous.
	 */
	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
		if (ei.fofs + ei.len >= pg_end)
			goto out;
	}

	map.m_lblk = pg_start;
2154
	map.m_next_pgofs = &next_pgofs;
C
Chao Yu 已提交
2155 2156 2157 2158 2159 2160 2161

	/*
	 * lookup mapping info in dnode page cache, skip defragmenting if all
	 * physical block addresses are continuous even if there are hole(s)
	 * in logical blocks.
	 */
	while (map.m_lblk < pg_end) {
F
Fan Li 已提交
2162
		map.m_len = pg_end - map.m_lblk;
2163
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
C
Chao Yu 已提交
2164 2165 2166 2167
		if (err)
			goto out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2168
			map.m_lblk = next_pgofs;
C
Chao Yu 已提交
2169 2170 2171
			continue;
		}

2172
		if (blk_end && blk_end != map.m_pblk)
C
Chao Yu 已提交
2173
			fragmented = true;
2174 2175 2176 2177

		/* record total count of block that we're going to move */
		total += map.m_len;

C
Chao Yu 已提交
2178 2179 2180 2181 2182 2183 2184 2185
		blk_end = map.m_pblk + map.m_len;

		map.m_lblk += map.m_len;
	}

	if (!fragmented)
		goto out;

2186
	sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
C
Chao Yu 已提交
2187 2188 2189 2190 2191 2192

	/*
	 * make sure there are enough free section for LFS allocation, this can
	 * avoid defragment running in SSR mode when free section are allocated
	 * intensively
	 */
2193
	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
C
Chao Yu 已提交
2194 2195 2196 2197
		err = -EAGAIN;
		goto out;
	}

2198 2199 2200 2201
	map.m_lblk = pg_start;
	map.m_len = pg_end - pg_start;
	total = 0;

C
Chao Yu 已提交
2202 2203 2204 2205 2206
	while (map.m_lblk < pg_end) {
		pgoff_t idx;
		int cnt = 0;

do_map:
F
Fan Li 已提交
2207
		map.m_len = pg_end - map.m_lblk;
2208
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
C
Chao Yu 已提交
2209 2210 2211 2212
		if (err)
			goto clear_out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2213
			map.m_lblk = next_pgofs;
C
Chao Yu 已提交
2214 2215 2216
			continue;
		}

2217
		set_inode_flag(inode, FI_DO_DEFRAG);
C
Chao Yu 已提交
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241

		idx = map.m_lblk;
		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
			struct page *page;

			page = get_lock_data_page(inode, idx, true);
			if (IS_ERR(page)) {
				err = PTR_ERR(page);
				goto clear_out;
			}

			set_page_dirty(page);
			f2fs_put_page(page, 1);

			idx++;
			cnt++;
			total++;
		}

		map.m_lblk = idx;

		if (idx < pg_end && cnt < blk_per_seg)
			goto do_map;

2242
		clear_inode_flag(inode, FI_DO_DEFRAG);
C
Chao Yu 已提交
2243 2244 2245 2246 2247 2248

		err = filemap_fdatawrite(inode->i_mapping);
		if (err)
			goto out;
	}
clear_out:
2249
	clear_inode_flag(inode, FI_DO_DEFRAG);
C
Chao Yu 已提交
2250
out:
A
Al Viro 已提交
2251
	inode_unlock(inode);
C
Chao Yu 已提交
2252
	if (!err)
2253
		range->len = (u64)total << PAGE_SHIFT;
C
Chao Yu 已提交
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266
	return err;
}

static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_defragment range;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

2267
	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
C
Chao Yu 已提交
2268 2269
		return -EINVAL;

2270 2271
	if (f2fs_readonly(sbi->sb))
		return -EROFS;
C
Chao Yu 已提交
2272 2273

	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2274 2275
							sizeof(range)))
		return -EFAULT;
C
Chao Yu 已提交
2276 2277

	/* verify alignment of offset & size */
2278 2279
	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
		return -EINVAL;
C
Chao Yu 已提交
2280

S
Sheng Yong 已提交
2281
	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2282 2283 2284 2285 2286 2287
					sbi->max_file_blocks))
		return -EINVAL;

	err = mnt_want_write_file(filp);
	if (err)
		return err;
S
Sheng Yong 已提交
2288

C
Chao Yu 已提交
2289
	err = f2fs_defragment_range(sbi, filp, &range);
2290 2291
	mnt_drop_write_file(filp);

2292
	f2fs_update_time(sbi, REQ_TIME);
C
Chao Yu 已提交
2293
	if (err < 0)
2294
		return err;
C
Chao Yu 已提交
2295 2296 2297

	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
							sizeof(range)))
2298 2299 2300
		return -EFAULT;

	return 0;
C
Chao Yu 已提交
2301 2302
}

2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
			struct file *file_out, loff_t pos_out, size_t len)
{
	struct inode *src = file_inode(file_in);
	struct inode *dst = file_inode(file_out);
	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
	size_t olen = len, dst_max_i_size = 0;
	size_t dst_osize;
	int ret;

	if (file_in->f_path.mnt != file_out->f_path.mnt ||
				src->i_sb != dst->i_sb)
		return -EXDEV;

	if (unlikely(f2fs_readonly(src->i_sb)))
		return -EROFS;

2320 2321
	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
		return -EINVAL;
2322 2323 2324 2325

	if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
		return -EOPNOTSUPP;

2326 2327 2328 2329 2330 2331 2332
	if (src == dst) {
		if (pos_in == pos_out)
			return 0;
		if (pos_out > pos_in && pos_out < pos_in + len)
			return -EINVAL;
	}

2333
	inode_lock(src);
2334
	down_write(&F2FS_I(src)->dio_rwsem[WRITE]);
2335
	if (src != dst) {
2336 2337 2338 2339 2340
		ret = -EBUSY;
		if (!inode_trylock(dst))
			goto out;
		if (!down_write_trylock(&F2FS_I(dst)->dio_rwsem[WRITE])) {
			inode_unlock(dst);
2341 2342 2343
			goto out;
		}
	}
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387

	ret = -EINVAL;
	if (pos_in + len > src->i_size || pos_in + len < pos_in)
		goto out_unlock;
	if (len == 0)
		olen = len = src->i_size - pos_in;
	if (pos_in + len == src->i_size)
		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
	if (len == 0) {
		ret = 0;
		goto out_unlock;
	}

	dst_osize = dst->i_size;
	if (pos_out + olen > dst->i_size)
		dst_max_i_size = pos_out + olen;

	/* verify the end result is block aligned */
	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
		goto out_unlock;

	ret = f2fs_convert_inline_inode(src);
	if (ret)
		goto out_unlock;

	ret = f2fs_convert_inline_inode(dst);
	if (ret)
		goto out_unlock;

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(src->i_mapping,
					pos_in, pos_in + len);
	if (ret)
		goto out_unlock;

	ret = filemap_write_and_wait_range(dst->i_mapping,
					pos_out, pos_out + len);
	if (ret)
		goto out_unlock;

	f2fs_balance_fs(sbi, true);
	f2fs_lock_op(sbi);
2388 2389 2390
	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
				pos_out >> F2FS_BLKSIZE_BITS,
				len >> F2FS_BLKSIZE_BITS, false);
2391 2392 2393 2394 2395 2396 2397 2398 2399

	if (!ret) {
		if (dst_max_i_size)
			f2fs_i_size_write(dst, dst_max_i_size);
		else if (dst_osize != dst->i_size)
			f2fs_i_size_write(dst, dst_osize);
	}
	f2fs_unlock_op(sbi);
out_unlock:
2400 2401
	if (src != dst) {
		up_write(&F2FS_I(dst)->dio_rwsem[WRITE]);
2402
		inode_unlock(dst);
2403
	}
2404
out:
2405
	up_write(&F2FS_I(src)->dio_rwsem[WRITE]);
2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440
	inode_unlock(src);
	return ret;
}

static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
{
	struct f2fs_move_range range;
	struct fd dst;
	int err;

	if (!(filp->f_mode & FMODE_READ) ||
			!(filp->f_mode & FMODE_WRITE))
		return -EBADF;

	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
							sizeof(range)))
		return -EFAULT;

	dst = fdget(range.dst_fd);
	if (!dst.file)
		return -EBADF;

	if (!(dst.file->f_mode & FMODE_WRITE)) {
		err = -EBADF;
		goto err_out;
	}

	err = mnt_want_write_file(filp);
	if (err)
		goto err_out;

	err = f2fs_move_file_range(filp, range.pos_in, dst.file,
					range.pos_out, range.len);

	mnt_drop_write_file(filp);
2441 2442
	if (err)
		goto err_out;
2443 2444 2445 2446 2447 2448 2449 2450 2451

	if (copy_to_user((struct f2fs_move_range __user *)arg,
						&range, sizeof(range)))
		err = -EFAULT;
err_out:
	fdput(dst);
	return err;
}

2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct sit_info *sm = SIT_I(sbi);
	unsigned int start_segno = 0, end_segno = 0;
	unsigned int dev_start_segno = 0, dev_end_segno = 0;
	struct f2fs_flush_device range;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (f2fs_readonly(sbi->sb))
		return -EROFS;

	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
							sizeof(range)))
		return -EFAULT;

	if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
			sbi->segs_per_sec != 1) {
		f2fs_msg(sbi->sb, KERN_WARNING,
			"Can't flush %u in %d for segs_per_sec %u != 1\n",
				range.dev_num, sbi->s_ndevs,
				sbi->segs_per_sec);
		return -EINVAL;
	}

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	if (range.dev_num != 0)
		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);

	start_segno = sm->last_victim[FLUSH_DEVICE];
	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
		start_segno = dev_start_segno;
	end_segno = min(start_segno + range.segments, dev_end_segno);

	while (start_segno < end_segno) {
		if (!mutex_trylock(&sbi->gc_mutex)) {
			ret = -EBUSY;
			goto out;
		}
		sm->last_victim[GC_CB] = end_segno + 1;
		sm->last_victim[GC_GREEDY] = end_segno + 1;
		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
		ret = f2fs_gc(sbi, true, true, start_segno);
		if (ret == -EAGAIN)
			ret = 0;
		else if (ret < 0)
			break;
		start_segno++;
	}
out:
	mnt_drop_write_file(filp);
	return ret;
}

2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);

	/* Must validate to set it with SQLite behavior in Android. */
	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;

	return put_user(sb_feature, (u32 __user *)arg);
}
2524

2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609
#ifdef CONFIG_QUOTA
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct super_block *sb = sbi->sb;
	struct dquot *transfer_to[MAXQUOTAS] = {};
	struct page *ipage;
	kprojid_t kprojid;
	int err;

	if (!f2fs_sb_has_project_quota(sb)) {
		if (projid != F2FS_DEF_PROJID)
			return -EOPNOTSUPP;
		else
			return 0;
	}

	if (!f2fs_has_extra_attr(inode))
		return -EOPNOTSUPP;

	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);

	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
		return 0;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	err = -EPERM;
	inode_lock(inode);

	/* Is it quota file? Do not allow user to mess with it */
	if (IS_NOQUOTA(inode))
		goto out_unlock;

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out_unlock;
	}

	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
								i_projid)) {
		err = -EOVERFLOW;
		f2fs_put_page(ipage, 1);
		goto out_unlock;
	}
	f2fs_put_page(ipage, 1);

	dquot_initialize(inode);

	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
	if (!IS_ERR(transfer_to[PRJQUOTA])) {
		err = __dquot_transfer(inode, transfer_to);
		dqput(transfer_to[PRJQUOTA]);
		if (err)
			goto out_dirty;
	}

	F2FS_I(inode)->i_projid = kprojid;
	inode->i_ctime = current_time(inode);
out_dirty:
	f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	return err;
}
#else
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
	if (projid != F2FS_DEF_PROJID)
		return -EOPNOTSUPP;
	return 0;
}
#endif

/* Transfer internal flags to xflags */
static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags)
{
	__u32 xflags = 0;

2610
	if (iflags & F2FS_SYNC_FL)
2611
		xflags |= FS_XFLAG_SYNC;
2612
	if (iflags & F2FS_IMMUTABLE_FL)
2613
		xflags |= FS_XFLAG_IMMUTABLE;
2614
	if (iflags & F2FS_APPEND_FL)
2615
		xflags |= FS_XFLAG_APPEND;
2616
	if (iflags & F2FS_NODUMP_FL)
2617
		xflags |= FS_XFLAG_NODUMP;
2618
	if (iflags & F2FS_NOATIME_FL)
2619
		xflags |= FS_XFLAG_NOATIME;
2620
	if (iflags & F2FS_PROJINHERIT_FL)
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
		xflags |= FS_XFLAG_PROJINHERIT;
	return xflags;
}

#define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
				  FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
				  FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)

/* Transfer xflags flags to internal */
static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags)
{
	unsigned long iflags = 0;

	if (xflags & FS_XFLAG_SYNC)
2635
		iflags |= F2FS_SYNC_FL;
2636
	if (xflags & FS_XFLAG_IMMUTABLE)
2637
		iflags |= F2FS_IMMUTABLE_FL;
2638
	if (xflags & FS_XFLAG_APPEND)
2639
		iflags |= F2FS_APPEND_FL;
2640
	if (xflags & FS_XFLAG_NODUMP)
2641
		iflags |= F2FS_NODUMP_FL;
2642
	if (xflags & FS_XFLAG_NOATIME)
2643
		iflags |= F2FS_NOATIME_FL;
2644
	if (xflags & FS_XFLAG_PROJINHERIT)
2645
		iflags |= F2FS_PROJINHERIT_FL;
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657

	return iflags;
}

static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct fsxattr fa;

	memset(&fa, 0, sizeof(struct fsxattr));
	fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
2658
				F2FS_FL_USER_VISIBLE);
2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709

	if (f2fs_sb_has_project_quota(inode->i_sb))
		fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
							fi->i_projid);

	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
		return -EFAULT;
	return 0;
}

static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct fsxattr fa;
	unsigned int flags;
	int err;

	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
		return -EFAULT;

	/* Make sure caller has proper permission */
	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS)
		return -EOPNOTSUPP;

	flags = f2fs_xflags_to_iflags(fa.fsx_xflags);
	if (f2fs_mask_flags(inode->i_mode, flags) != flags)
		return -EOPNOTSUPP;

	err = mnt_want_write_file(filp);
	if (err)
		return err;

	inode_lock(inode);
	flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) |
				(flags & F2FS_FL_XFLAG_VISIBLE);
	err = __f2fs_ioc_setflags(inode, flags);
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	if (err)
		return err;

	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
	if (err)
		return err;

	return 0;
}
2710

2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753
int f2fs_pin_file_control(struct inode *inode, bool inc)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	/* Use i_gc_failures for normal file as a risk signal. */
	if (inc)
		f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1);

	if (fi->i_gc_failures > sbi->gc_pin_file_threshold) {
		f2fs_msg(sbi->sb, KERN_WARNING,
			"%s: Enable GC = ino %lx after %x GC trials\n",
			__func__, inode->i_ino, fi->i_gc_failures);
		clear_inode_flag(inode, FI_PIN_FILE);
		return -EAGAIN;
	}
	return 0;
}

static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	__u32 pin;
	int ret = 0;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (get_user(pin, (__u32 __user *)arg))
		return -EFAULT;

	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
		return -EROFS;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	inode_lock(inode);

C
Chao Yu 已提交
2754 2755 2756 2757 2758
	if (should_update_outplace(inode, NULL)) {
		ret = -EINVAL;
		goto out;
	}

2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
	if (!pin) {
		clear_inode_flag(inode, FI_PIN_FILE);
		F2FS_I(inode)->i_gc_failures = 1;
		goto done;
	}

	if (f2fs_pin_file_control(inode, false)) {
		ret = -EAGAIN;
		goto out;
	}
	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		goto out;

	set_inode_flag(inode, FI_PIN_FILE);
	ret = F2FS_I(inode)->i_gc_failures;
done:
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
out:
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	return ret;
}

static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	__u32 pin = 0;

	if (is_inode_flag_set(inode, FI_PIN_FILE))
		pin = F2FS_I(inode)->i_gc_failures;
	return put_user(pin, (u32 __user *)arg);
}

2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829
int f2fs_precache_extents(struct inode *inode)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct f2fs_map_blocks map;
	pgoff_t m_next_extent;
	loff_t end;
	int err;

	if (is_inode_flag_set(inode, FI_NO_EXTENT))
		return -EOPNOTSUPP;

	map.m_lblk = 0;
	map.m_next_pgofs = NULL;
	map.m_next_extent = &m_next_extent;
	map.m_seg_type = NO_CHECK_TYPE;
	end = F2FS_I_SB(inode)->max_file_blocks;

	while (map.m_lblk < end) {
		map.m_len = end - map.m_lblk;

		down_write(&fi->dio_rwsem[WRITE]);
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
		up_write(&fi->dio_rwsem[WRITE]);
		if (err)
			return err;

		map.m_lblk = m_next_extent;
	}

	return err;
}

static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
{
	return f2fs_precache_extents(file_inode(filp));
}

2830 2831
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
2832 2833 2834
	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
		return -EIO;

2835 2836 2837 2838 2839
	switch (cmd) {
	case F2FS_IOC_GETFLAGS:
		return f2fs_ioc_getflags(filp, arg);
	case F2FS_IOC_SETFLAGS:
		return f2fs_ioc_setflags(filp, arg);
C
Chao Yu 已提交
2840 2841
	case F2FS_IOC_GETVERSION:
		return f2fs_ioc_getversion(filp, arg);
J
Jaegeuk Kim 已提交
2842 2843 2844 2845
	case F2FS_IOC_START_ATOMIC_WRITE:
		return f2fs_ioc_start_atomic_write(filp);
	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
		return f2fs_ioc_commit_atomic_write(filp);
2846 2847
	case F2FS_IOC_START_VOLATILE_WRITE:
		return f2fs_ioc_start_volatile_write(filp);
2848 2849 2850 2851
	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
		return f2fs_ioc_release_volatile_write(filp);
	case F2FS_IOC_ABORT_VOLATILE_WRITE:
		return f2fs_ioc_abort_volatile_write(filp);
J
Jaegeuk Kim 已提交
2852 2853
	case F2FS_IOC_SHUTDOWN:
		return f2fs_ioc_shutdown(filp, arg);
2854 2855
	case FITRIM:
		return f2fs_ioc_fitrim(filp, arg);
2856 2857 2858 2859 2860 2861
	case F2FS_IOC_SET_ENCRYPTION_POLICY:
		return f2fs_ioc_set_encryption_policy(filp, arg);
	case F2FS_IOC_GET_ENCRYPTION_POLICY:
		return f2fs_ioc_get_encryption_policy(filp, arg);
	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
2862 2863
	case F2FS_IOC_GARBAGE_COLLECT:
		return f2fs_ioc_gc(filp, arg);
2864 2865
	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
		return f2fs_ioc_gc_range(filp, arg);
2866 2867
	case F2FS_IOC_WRITE_CHECKPOINT:
		return f2fs_ioc_write_checkpoint(filp, arg);
C
Chao Yu 已提交
2868 2869
	case F2FS_IOC_DEFRAGMENT:
		return f2fs_ioc_defragment(filp, arg);
2870 2871
	case F2FS_IOC_MOVE_RANGE:
		return f2fs_ioc_move_range(filp, arg);
2872 2873
	case F2FS_IOC_FLUSH_DEVICE:
		return f2fs_ioc_flush_device(filp, arg);
2874 2875
	case F2FS_IOC_GET_FEATURES:
		return f2fs_ioc_get_features(filp, arg);
2876 2877 2878 2879
	case F2FS_IOC_FSGETXATTR:
		return f2fs_ioc_fsgetxattr(filp, arg);
	case F2FS_IOC_FSSETXATTR:
		return f2fs_ioc_fssetxattr(filp, arg);
2880 2881 2882 2883
	case F2FS_IOC_GET_PIN_FILE:
		return f2fs_ioc_get_pin_file(filp, arg);
	case F2FS_IOC_SET_PIN_FILE:
		return f2fs_ioc_set_pin_file(filp, arg);
2884 2885
	case F2FS_IOC_PRECACHE_EXTENTS:
		return f2fs_ioc_precache_extents(filp, arg);
J
Jaegeuk Kim 已提交
2886 2887 2888 2889 2890
	default:
		return -ENOTTY;
	}
}

2891 2892
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
2893 2894 2895
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
	ssize_t ret;
2896

2897 2898 2899
	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
		return -EIO;

H
Hyunchul Lee 已提交
2900 2901 2902 2903 2904 2905 2906 2907 2908
	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
		return -EINVAL;

	if (!inode_trylock(inode)) {
		if (iocb->ki_flags & IOCB_NOWAIT)
			return -EAGAIN;
		inode_lock(inode);
	}

2909 2910
	ret = generic_write_checks(iocb, from);
	if (ret > 0) {
2911 2912
		bool preallocated = false;
		size_t target_size = 0;
2913
		int err;
2914

2915 2916
		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
			set_inode_flag(inode, FI_NO_PREALLOC);
2917

H
Hyunchul Lee 已提交
2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928
		if ((iocb->ki_flags & IOCB_NOWAIT) &&
			(iocb->ki_flags & IOCB_DIRECT)) {
				if (!f2fs_overwrite_io(inode, iocb->ki_pos,
						iov_iter_count(from)) ||
					f2fs_has_inline_data(inode) ||
					f2fs_force_buffered_io(inode, WRITE)) {
						inode_unlock(inode);
						return -EAGAIN;
				}

		} else {
2929 2930 2931
			preallocated = true;
			target_size = iocb->ki_pos + iov_iter_count(from);

H
Hyunchul Lee 已提交
2932 2933 2934 2935 2936 2937
			err = f2fs_preallocate_blocks(iocb, from);
			if (err) {
				clear_inode_flag(inode, FI_NO_PREALLOC);
				inode_unlock(inode);
				return err;
			}
2938
		}
2939
		ret = __generic_file_write_iter(iocb, from);
2940
		clear_inode_flag(inode, FI_NO_PREALLOC);
C
Chao Yu 已提交
2941

2942 2943 2944 2945
		/* if we couldn't write data, we should deallocate blocks. */
		if (preallocated && i_size_read(inode) < target_size)
			f2fs_truncate(inode);

C
Chao Yu 已提交
2946 2947
		if (ret > 0)
			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
2948 2949 2950
	}
	inode_unlock(inode);

2951 2952
	if (ret > 0)
		ret = generic_write_sync(iocb, ret);
2953
	return ret;
2954 2955
}

2956 2957 2958 2959 2960 2961 2962 2963 2964 2965
#ifdef CONFIG_COMPAT
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case F2FS_IOC32_GETFLAGS:
		cmd = F2FS_IOC_GETFLAGS;
		break;
	case F2FS_IOC32_SETFLAGS:
		cmd = F2FS_IOC_SETFLAGS;
		break;
2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
	case F2FS_IOC32_GETVERSION:
		cmd = F2FS_IOC_GETVERSION;
		break;
	case F2FS_IOC_START_ATOMIC_WRITE:
	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
	case F2FS_IOC_START_VOLATILE_WRITE:
	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
	case F2FS_IOC_ABORT_VOLATILE_WRITE:
	case F2FS_IOC_SHUTDOWN:
	case F2FS_IOC_SET_ENCRYPTION_POLICY:
	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
	case F2FS_IOC_GET_ENCRYPTION_POLICY:
	case F2FS_IOC_GARBAGE_COLLECT:
2979
	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
2980 2981
	case F2FS_IOC_WRITE_CHECKPOINT:
	case F2FS_IOC_DEFRAGMENT:
2982
	case F2FS_IOC_MOVE_RANGE:
2983
	case F2FS_IOC_FLUSH_DEVICE:
2984
	case F2FS_IOC_GET_FEATURES:
2985 2986
	case F2FS_IOC_FSGETXATTR:
	case F2FS_IOC_FSSETXATTR:
2987 2988
	case F2FS_IOC_GET_PIN_FILE:
	case F2FS_IOC_SET_PIN_FILE:
2989
	case F2FS_IOC_PRECACHE_EXTENTS:
2990
		break;
2991 2992 2993 2994 2995 2996 2997
	default:
		return -ENOIOCTLCMD;
	}
	return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif

J
Jaegeuk Kim 已提交
2998
const struct file_operations f2fs_file_operations = {
2999
	.llseek		= f2fs_llseek,
3000
	.read_iter	= generic_file_read_iter,
3001 3002
	.write_iter	= f2fs_file_write_iter,
	.open		= f2fs_file_open,
3003
	.release	= f2fs_release_file,
J
Jaegeuk Kim 已提交
3004
	.mmap		= f2fs_file_mmap,
3005
	.flush		= f2fs_file_flush,
J
Jaegeuk Kim 已提交
3006 3007 3008
	.fsync		= f2fs_sync_file,
	.fallocate	= f2fs_fallocate,
	.unlocked_ioctl	= f2fs_ioctl,
3009 3010 3011
#ifdef CONFIG_COMPAT
	.compat_ioctl	= f2fs_compat_ioctl,
#endif
J
Jaegeuk Kim 已提交
3012
	.splice_read	= generic_file_splice_read,
A
Al Viro 已提交
3013
	.splice_write	= iter_file_splice_write,
J
Jaegeuk Kim 已提交
3014
};