data.c 46.2 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
22 23
#include <linux/mm.h>
#include <linux/memcontrol.h>
J
Jaegeuk Kim 已提交
24
#include <linux/cleancache.h>
25 26 27 28

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
29
#include "trace.h"
30
#include <trace/events/f2fs.h>
31

32
static void f2fs_read_end_io(struct bio *bio)
33
{
34 35
	struct bio_vec *bvec;
	int i;
36

C
Chao Yu 已提交
37
#ifdef CONFIG_F2FS_FAULT_INJECTION
38
	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
C
Chao Yu 已提交
39 40 41
		bio->bi_error = -EIO;
#endif

42
	if (f2fs_bio_encrypted(bio)) {
43
		if (bio->bi_error) {
44
			fscrypt_release_ctx(bio->bi_private);
45
		} else {
46
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
47 48 49 50
			return;
		}
	}

51 52
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
53

54
		if (!bio->bi_error) {
55 56
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
57 58 59 60 61 62 63 64 65
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

66
static void f2fs_write_end_io(struct bio *bio)
67
{
68
	struct f2fs_sb_info *sbi = bio->bi_private;
69 70
	struct bio_vec *bvec;
	int i;
71

72
	bio_for_each_segment_all(bvec, bio, i) {
73 74
		struct page *page = bvec->bv_page;

75
		fscrypt_pullback_bio_page(&page, true);
76

77
		if (unlikely(bio->bi_error)) {
78
			mapping_set_error(page->mapping, -EIO);
79
			f2fs_stop_checkpoint(sbi, true);
80 81
		}
		end_page_writeback(page);
82
	}
83 84
	if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
				wq_has_sleeper(&sbi->cp_wait))
85 86 87 88 89
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

90 91 92 93 94 95 96 97
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
98
	bio = f2fs_bio_alloc(npages);
99 100

	bio->bi_bdev = sbi->sb->s_bdev;
C
Chao Yu 已提交
101
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
102
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
103
	bio->bi_private = is_read ? NULL : sbi;
104 105 106 107

	return bio;
}

108 109
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
110
{
111
	if (!is_read_io(bio_op(bio))) {
112
		atomic_inc(&sbi->nr_wb_bios);
113 114
		if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
			current->plug && (type == DATA || type == NODE))
J
Jaegeuk Kim 已提交
115 116
			blk_finish_plug(current->plug);
	}
117
	submit_bio(bio);
118 119
}

J
Jaegeuk Kim 已提交
120
static void __submit_merged_bio(struct f2fs_bio_info *io)
121
{
J
Jaegeuk Kim 已提交
122
	struct f2fs_io_info *fio = &io->fio;
123 124 125 126

	if (!io->bio)
		return;

M
Mike Christie 已提交
127
	if (is_read_io(fio->op))
128
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
129
	else
130
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
131

M
Mike Christie 已提交
132 133
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

134
	__submit_bio(io->sbi, io->bio, fio->type);
135 136 137
	io->bio = NULL;
}

138 139
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
C
Chao Yu 已提交
140 141 142 143 144
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

145
	if (!io->bio)
C
Chao Yu 已提交
146
		return false;
147 148 149

	if (!inode && !page && !ino)
		return true;
C
Chao Yu 已提交
150 151 152

	bio_for_each_segment_all(bvec, io->bio, i) {

153
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
154
			target = bvec->bv_page;
155 156
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
157

158 159 160 161 162
		if (inode && inode == target->mapping->host)
			return true;
		if (page && page == target)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
163 164 165 166 167 168
			return true;
	}

	return false;
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	bool ret;

	down_read(&io->io_rwsem);
	ret = __has_merged_page(io, inode, page, ino);
	up_read(&io->io_rwsem);
	return ret;
}

static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
186 187 188 189 190 191
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

192
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
193

194 195 196
	if (!__has_merged_page(io, inode, page, ino))
		goto out;

J
Jaegeuk Kim 已提交
197 198 199
	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
M
Mike Christie 已提交
200
		io->fio.op = REQ_OP_WRITE;
J
Jaegeuk Kim 已提交
201
		if (test_opt(sbi, NOBARRIER))
M
Mike Christie 已提交
202
			io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
203
		else
M
Mike Christie 已提交
204 205
			io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
								REQ_PRIO;
J
Jaegeuk Kim 已提交
206 207
	}
	__submit_merged_bio(io);
208
out:
209
	up_write(&io->io_rwsem);
210 211
}

212 213 214 215 216 217 218 219 220 221 222 223 224 225
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
									int rw)
{
	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}

void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
{
	if (has_merged_page(sbi, inode, page, ino, type))
		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}

226 227 228 229 230 231 232
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, WRITE);
	f2fs_submit_merged_bio(sbi, META, WRITE);
}

233 234 235 236
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
237
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
238 239
{
	struct bio *bio;
240 241
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
242

243
	trace_f2fs_submit_page_bio(page, fio);
244
	f2fs_trace_ios(fio, 0);
245 246

	/* Allocate a new bio */
M
Mike Christie 已提交
247
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
248

249
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
250 251 252
		bio_put(bio);
		return -EFAULT;
	}
M
Mike Christie 已提交
253
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
254

255
	__submit_bio(fio->sbi, bio, fio->type);
256 257 258
	return 0;
}

259
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
260
{
261
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
262
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
263
	struct f2fs_bio_info *io;
M
Mike Christie 已提交
264
	bool is_read = is_read_io(fio->op);
265
	struct page *bio_page;
266

267
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
268

269 270 271
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
272

273
	down_write(&io->io_rwsem);
274

275
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
M
Mike Christie 已提交
276
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
J
Jaegeuk Kim 已提交
277
		__submit_merged_bio(io);
278 279
alloc_new:
	if (io->bio == NULL) {
J
Jaegeuk Kim 已提交
280
		int bio_blocks = MAX_BIO_BLOCKS(sbi);
281

282 283
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
						bio_blocks, is_read);
J
Jaegeuk Kim 已提交
284
		io->fio = *fio;
285 286
	}

287 288
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

289 290
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
							PAGE_SIZE) {
J
Jaegeuk Kim 已提交
291
		__submit_merged_bio(io);
292 293 294
		goto alloc_new;
	}

295
	io->last_block_in_bio = fio->new_blkaddr;
296
	f2fs_trace_ios(fio, 0);
297

298
	up_write(&io->io_rwsem);
299
	trace_f2fs_submit_page_mbio(fio->page, fio);
300 301
}

302 303 304 305 306 307 308 309 310 311
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}

J
Jaegeuk Kim 已提交
312
/*
313 314 315 316 317
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
318
void set_data_blkaddr(struct dnode_of_data *dn)
319
{
320 321 322
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
323
		dn->node_changed = true;
324 325
}

326 327 328 329 330 331 332
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

333 334
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
335
{
336
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
337

338 339 340
	if (!count)
		return 0;

341
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
342
		return -EPERM;
343
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
344 345
		return -ENOSPC;

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr =
			datablock_addr(dn->node_page, dn->ofs_in_node);
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
363 364 365
	return 0;
}

366 367 368 369 370 371 372 373 374 375 376
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

377 378 379 380 381 382 383 384
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
385

386 387
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
388
	if (err || need_put)
389 390 391 392
		f2fs_put_dnode(dn);
	return err;
}

393
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
394
{
395
	struct extent_info ei;
396
	struct inode *inode = dn->inode;
397

398 399 400
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
401
	}
402

403
	return f2fs_reserve_block(dn, index);
404 405
}

406
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
M
Mike Christie 已提交
407
						int op_flags, bool for_write)
408 409 410 411
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
412
	struct extent_info ei;
413
	int err;
414
	struct f2fs_io_info fio = {
415
		.sbi = F2FS_I_SB(inode),
416
		.type = DATA,
M
Mike Christie 已提交
417 418
		.op = REQ_OP_READ,
		.op_flags = op_flags,
419
		.encrypted_page = NULL,
420
	};
421

422 423 424
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

425
	page = f2fs_grab_cache_page(mapping, index, for_write);
426 427 428
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
429 430 431 432 433
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

434
	set_new_dnode(&dn, inode, NULL, NULL, 0);
435
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
436 437
	if (err)
		goto put_err;
438 439
	f2fs_put_dnode(&dn);

440
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
441 442
		err = -ENOENT;
		goto put_err;
443
	}
C
Chao Yu 已提交
444
got_it:
445 446
	if (PageUptodate(page)) {
		unlock_page(page);
447
		return page;
448
	}
449

J
Jaegeuk Kim 已提交
450 451 452 453 454 455 456
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
457
		zero_user_segment(page, 0, PAGE_SIZE);
458 459
		if (!PageUptodate(page))
			SetPageUptodate(page);
460
		unlock_page(page);
J
Jaegeuk Kim 已提交
461 462
		return page;
	}
463

464
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
465 466
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
467
	if (err)
468
		goto put_err;
469
	return page;
470 471 472 473

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
474 475 476 477 478 479 480 481 482 483 484 485
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

486
	page = get_read_data_page(inode, index, READ_SYNC, false);
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
506 507
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
508 509 510 511
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
512
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
513 514
	if (IS_ERR(page))
		return page;
515

516
	/* wait for read completion */
517
	lock_page(page);
518
	if (unlikely(page->mapping != mapping)) {
519 520
		f2fs_put_page(page, 1);
		goto repeat;
521
	}
522 523 524 525
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
526 527 528
	return page;
}

J
Jaegeuk Kim 已提交
529
/*
530 531
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
532
 *
C
Chao Yu 已提交
533 534
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
535 536
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
537
 */
538
struct page *get_new_data_page(struct inode *inode,
539
		struct page *ipage, pgoff_t index, bool new_i_size)
540 541 542 543 544
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
545

546
	page = f2fs_grab_cache_page(mapping, index, true);
547 548 549 550 551 552
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
553
		return ERR_PTR(-ENOMEM);
554
	}
555

556
	set_new_dnode(&dn, inode, ipage, NULL, 0);
557
	err = f2fs_reserve_block(&dn, index);
558 559
	if (err) {
		f2fs_put_page(page, 1);
560
		return ERR_PTR(err);
561
	}
562 563
	if (!ipage)
		f2fs_put_dnode(&dn);
564 565

	if (PageUptodate(page))
566
		goto got_it;
567 568

	if (dn.data_blkaddr == NEW_ADDR) {
569
		zero_user_segment(page, 0, PAGE_SIZE);
570 571
		if (!PageUptodate(page))
			SetPageUptodate(page);
572
	} else {
573
		f2fs_put_page(page, 1);
574

575 576 577
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
578
		if (IS_ERR(page))
579
			return page;
580
	}
581
got_it:
C
Chao Yu 已提交
582
	if (new_i_size && i_size_read(inode) <
583
				((loff_t)(index + 1) << PAGE_SHIFT))
584
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
585 586 587
	return page;
}

588 589
static int __allocate_data_block(struct dnode_of_data *dn)
{
590
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
591 592
	struct f2fs_summary sum;
	struct node_info ni;
593
	int seg = CURSEG_WARM_DATA;
594
	pgoff_t fofs;
595
	blkcnt_t count = 1;
596

597
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
598
		return -EPERM;
599 600 601 602 603

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

604
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
605 606
		return -ENOSPC;

607
alloc:
608 609 610
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

611 612 613
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

614 615
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
616
	set_data_blkaddr(dn);
617

618
	/* update i_size */
619
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
620
							dn->ofs_in_node;
621
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
622
		f2fs_i_size_write(dn->inode,
623
				((loff_t)(fofs + 1) << PAGE_SHIFT));
624 625 626
	return 0;
}

627
ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
628
{
629
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
630
	struct f2fs_map_blocks map;
631
	ssize_t ret = 0;
632

633
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
634 635 636 637 638 639
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

640
	map.m_next_pgofs = NULL;
641

642 643 644 645 646 647 648
	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
649 650 651 652
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
653 654
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
655
	return ret;
656 657
}

J
Jaegeuk Kim 已提交
658
/*
J
Jaegeuk Kim 已提交
659 660
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
661 662 663 664 665
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
666
 */
C
Chao Yu 已提交
667
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
668
						int create, int flag)
669
{
J
Jaegeuk Kim 已提交
670
	unsigned int maxblocks = map->m_len;
671
	struct dnode_of_data dn;
672
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
673
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
674
	pgoff_t pgofs, end_offset, end;
675
	int err = 0, ofs = 1;
676 677
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
678
	struct extent_info ei;
679
	bool allocated = false;
680
	block_t blkaddr;
681

682 683 684
	if (!maxblocks)
		return 0;

J
Jaegeuk Kim 已提交
685 686 687 688 689
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
690
	end = pgofs + maxblocks;
691

692
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
693 694 695
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
696
		goto out;
697
	}
698

C
Chao Yu 已提交
699
next_dnode:
700
	if (create)
701
		f2fs_lock_op(sbi);
702 703 704

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
705
	err = get_dnode_of_data(&dn, pgofs, mode);
706
	if (err) {
C
Chao Yu 已提交
707 708
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
709
		if (err == -ENOENT) {
710
			err = 0;
711 712 713 714
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
715
		goto unlock_out;
716
	}
C
Chao Yu 已提交
717

718 719
	prealloc = 0;
	ofs_in_node = dn.ofs_in_node;
720
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
721 722 723 724 725

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
726
		if (create) {
727 728
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
729
				goto sync_out;
730
			}
731
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
732 733 734 735
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
736 737
			} else {
				err = __allocate_data_block(&dn);
738
				if (!err) {
739
					set_inode_flag(inode, FI_APPEND_WRITE);
740 741
					allocated = true;
				}
742
			}
C
Chao Yu 已提交
743
			if (err)
C
Chao Yu 已提交
744
				goto sync_out;
C
Chao Yu 已提交
745
			map->m_flags = F2FS_MAP_NEW;
C
Chao Yu 已提交
746
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
747
		} else {
C
Chao Yu 已提交
748 749 750 751
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
752 753 754 755 756
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
757
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
758
						blkaddr != NEW_ADDR)
C
Chao Yu 已提交
759
				goto sync_out;
C
Chao Yu 已提交
760 761
		}
	}
762

763 764 765
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
766 767 768 769 770 771 772 773 774 775
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
776
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
777
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
778 779 780 781 782
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
783

784
skip:
785 786 787
	dn.ofs_in_node++;
	pgofs++;

788 789 790
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
791

792 793 794 795
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
796
		allocated = dn.node_changed;
797

798 799 800 801
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
802
		}
803 804 805 806 807 808 809 810 811 812 813 814 815
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

	f2fs_put_dnode(&dn);

	if (create) {
		f2fs_unlock_op(sbi);
		f2fs_balance_fs(sbi, allocated);
816
	}
817 818
	allocated = false;
	goto next_dnode;
819

820
sync_out:
821
	f2fs_put_dnode(&dn);
822
unlock_out:
823
	if (create) {
824
		f2fs_unlock_op(sbi);
825
		f2fs_balance_fs(sbi, allocated);
826
	}
827
out:
J
Jaegeuk Kim 已提交
828
	trace_f2fs_map_blocks(inode, map, err);
829
	return err;
830 831
}

J
Jaegeuk Kim 已提交
832
static int __get_data_block(struct inode *inode, sector_t iblock,
833 834
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
835 836 837 838 839 840
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
841
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
842

C
Chao Yu 已提交
843
	ret = f2fs_map_blocks(inode, &map, create, flag);
J
Jaegeuk Kim 已提交
844 845 846 847 848 849 850 851
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

852
static int get_data_block(struct inode *inode, sector_t iblock,
853 854
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
855
{
856 857
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
858 859 860
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
861 862
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
863
	return __get_data_block(inode, iblock, bh_result, create,
864
						F2FS_GET_BLOCK_DIO, NULL);
865 866
}

C
Chao Yu 已提交
867
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
868 869
			struct buffer_head *bh_result, int create)
{
870
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
871
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
872 873
		return -EFBIG;

C
Chao Yu 已提交
874
	return __get_data_block(inode, iblock, bh_result, create,
875
						F2FS_GET_BLOCK_BMAP, NULL);
876 877
}

878 879 880 881 882 883 884 885 886 887
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
888 889 890
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
891 892
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
893
	pgoff_t next_pgofs;
894
	loff_t isize;
895 896 897 898 899 900 901 902
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
903 904 905 906 907 908
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
909
	inode_lock(inode);
910 911

	isize = i_size_read(inode);
912 913
	if (start >= isize)
		goto out;
914

915 916
	if (start + len > isize)
		len = isize - start;
917 918 919 920 921 922

	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
923

924 925 926 927
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
928
	ret = get_data_block(inode, start_blk, &map_bh, 0,
929
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
930 931 932 933 934
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
935
		start_blk = next_pgofs;
936
		/* Go through holes util pass the EOF */
937
		if (blk_to_logical(inode, start_blk) < isize)
938 939 940 941 942 943 944
			goto prep_next;
		/* Found a hole beyond isize means no more extents.
		 * Note that the premise is that filesystems don't
		 * punch holes beyond isize and keep size unchanged.
		 */
		flags |= FIEMAP_EXTENT_LAST;
	}
945

946 947 948 949
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

950 951
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
952
	}
953

954 955
	if (start_blk > last_blk || ret)
		goto out;
956

957 958 959 960 961 962
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
963

964
	start_blk += logical_to_blk(inode, size);
965

966
prep_next:
967 968 969 970 971 972 973 974 975
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
976
	inode_unlock(inode);
977
	return ret;
J
Jaegeuk Kim 已提交
978 979
}

980 981
static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
				 unsigned nr_pages)
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct fscrypt_ctx *ctx = NULL;
	struct block_device *bdev = sbi->sb->s_bdev;
	struct bio *bio;

	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
		if (IS_ERR(ctx))
			return ERR_CAST(ctx);

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
	}

	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
	if (!bio) {
		if (ctx)
			fscrypt_release_ctx(ctx);
		return ERR_PTR(-ENOMEM);
	}
	bio->bi_bdev = bdev;
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
	bio->bi_end_io = f2fs_read_end_io;
	bio->bi_private = ctx;

	return bio;
}

J
Jaegeuk Kim 已提交
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1035
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
1036 1037 1038 1039 1040 1041 1042 1043

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1044 1045
						  page->index,
						  readahead_gfp_mask(mapping)))
J
Jaegeuk Kim 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1074
			if (f2fs_map_blocks(inode, &map, 0,
1075
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1088
			zero_user_segment(page, 0, PAGE_SIZE);
1089 1090
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
1101
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1102 1103 1104
			bio = NULL;
		}
		if (bio == NULL) {
1105
			bio = f2fs_grab_bio(inode, block_nr, nr_pages);
J
Jaegeuk Kim 已提交
1106 1107
			if (IS_ERR(bio)) {
				bio = NULL;
J
Jaegeuk Kim 已提交
1108
				goto set_error_page;
1109
			}
M
Mike Christie 已提交
1110
			bio_set_op_attrs(bio, REQ_OP_READ, 0);
J
Jaegeuk Kim 已提交
1111 1112 1113 1114 1115 1116 1117 1118 1119
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1120
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1121 1122 1123 1124
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1125
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1126 1127 1128 1129 1130
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1131
			put_page(page);
J
Jaegeuk Kim 已提交
1132 1133 1134
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1135
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1136 1137 1138
	return 0;
}

1139 1140
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1141
	struct inode *inode = page->mapping->host;
1142
	int ret = -EAGAIN;
H
Huajun Li 已提交
1143

1144 1145
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1146
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1147 1148
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1149
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1150
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1151
	return ret;
1152 1153 1154 1155 1156 1157
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1158
	struct inode *inode = file->f_mapping->host;
1159 1160 1161
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1162 1163 1164 1165 1166

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1167
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1168 1169
}

1170
int do_write_data_page(struct f2fs_io_info *fio)
1171
{
1172
	struct page *page = fio->page;
1173 1174 1175 1176 1177
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1178
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1179 1180 1181
	if (err)
		return err;

1182
	fio->old_blkaddr = dn.data_blkaddr;
1183 1184

	/* This page is already truncated */
1185
	if (fio->old_blkaddr == NULL_ADDR) {
1186
		ClearPageUptodate(page);
1187
		goto out_writepage;
1188
	}
1189

1190
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1191
		gfp_t gfp_flags = GFP_NOFS;
1192 1193 1194

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
1195
							fio->old_blkaddr);
1196 1197 1198
retry_encrypt:
		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
								gfp_flags);
1199 1200
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
1201 1202 1203 1204 1205 1206 1207 1208
			if (err == -ENOMEM) {
				/* flush pending ios and wait for a while */
				f2fs_flush_merged_bios(F2FS_I_SB(inode));
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				gfp_flags |= __GFP_NOFAIL;
				err = 0;
				goto retry_encrypt;
			}
1209 1210 1211 1212
			goto out_writepage;
		}
	}

1213 1214 1215 1216 1217 1218
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1219
	if (unlikely(fio->old_blkaddr != NEW_ADDR &&
1220
			!is_cold_data(page) &&
1221
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1222
			need_inplace_update(inode))) {
1223
		rewrite_data_page(fio);
1224
		set_inode_flag(inode, FI_UPDATE_WRITE);
1225
		trace_f2fs_do_write_data_page(page, IPU);
1226
	} else {
1227
		write_data_page(&dn, fio);
1228
		trace_f2fs_do_write_data_page(page, OPU);
1229
		set_inode_flag(inode, FI_APPEND_WRITE);
1230
		if (page->index == 0)
1231
			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1242
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1243 1244
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1245
							>> PAGE_SHIFT;
1246
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
H
Huajun Li 已提交
1247
	unsigned offset = 0;
1248
	bool need_balance_fs = false;
1249
	int err = 0;
J
Jaegeuk Kim 已提交
1250
	struct f2fs_io_info fio = {
1251
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1252
		.type = DATA,
M
Mike Christie 已提交
1253 1254
		.op = REQ_OP_WRITE,
		.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
1255
		.page = page,
1256
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1257
	};
1258

1259 1260
	trace_f2fs_writepage(page, DATA);

1261
	if (page->index < end_index)
1262
		goto write;
1263 1264 1265 1266 1267

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1268
	offset = i_size & (PAGE_SIZE - 1);
1269
	if ((page->index >= end_index + 1) || !offset)
1270
		goto out;
1271

1272
	zero_user_segment(page, offset, PAGE_SIZE);
1273
write:
1274
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1275
		goto redirty_out;
1276 1277
	if (f2fs_is_drop_cache(inode))
		goto out;
1278 1279 1280 1281
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1282
		goto redirty_out;
1283

1284 1285
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
J
Jaegeuk Kim 已提交
1286
		mapping_set_error(page->mapping, -EIO);
1287
		goto out;
1288 1289
	}

1290
	/* Dentry blocks are controlled by checkpoint */
1291
	if (S_ISDIR(inode->i_mode)) {
1292
		err = do_write_data_page(&fio);
1293 1294
		goto done;
	}
H
Huajun Li 已提交
1295

1296
	if (!wbc->for_reclaim)
1297
		need_balance_fs = true;
1298
	else if (has_not_enough_free_secs(sbi, 0, 0))
1299
		goto redirty_out;
1300

1301
	err = -EAGAIN;
1302
	f2fs_lock_op(sbi);
1303 1304 1305
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1306
		err = do_write_data_page(&fio);
1307 1308
	if (F2FS_I(inode)->last_disk_size < psize)
		F2FS_I(inode)->last_disk_size = psize;
1309 1310 1311 1312
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1313 1314

	clear_cold_data(page);
1315
out:
1316
	inode_dec_dirty_pages(inode);
1317 1318
	if (err)
		ClearPageUptodate(page);
1319 1320 1321 1322 1323 1324

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
		remove_dirty_inode(inode);
	}

1325
	unlock_page(page);
J
Jaegeuk Kim 已提交
1326
	f2fs_balance_fs(sbi, need_balance_fs);
1327 1328

	if (unlikely(f2fs_cp_error(sbi)))
1329
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1330

1331 1332 1333
	return 0;

redirty_out:
1334
	redirty_page_for_writepage(wbc, page);
J
Jaegeuk Kim 已提交
1335 1336
	unlock_page(page);
	return err;
1337 1338
}

C
Chao Yu 已提交
1339 1340 1341 1342 1343 1344
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
J
Jaegeuk Kim 已提交
1345
					struct writeback_control *wbc)
C
Chao Yu 已提交
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
C
Chao Yu 已提交
1358
	int nwritten = 0;
C
Chao Yu 已提交
1359 1360

	pagevec_init(&pvec, 0);
1361

C
Chao Yu 已提交
1362 1363 1364 1365 1366 1367 1368 1369 1370
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1371 1372
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1418 1419
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1420 1421 1422 1423 1424 1425 1426 1427
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

J
Jaegeuk Kim 已提交
1428
			ret = mapping->a_ops->writepage(page, wbc);
C
Chao Yu 已提交
1429
			if (unlikely(ret)) {
J
Jaegeuk Kim 已提交
1430 1431 1432
				done_index = page->index + 1;
				done = 1;
				break;
C
Chao Yu 已提交
1433 1434
			} else {
				nwritten++;
C
Chao Yu 已提交
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

C
Chao Yu 已提交
1456 1457 1458 1459
	if (nwritten)
		f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
							NULL, 0, DATA, WRITE);

C
Chao Yu 已提交
1460 1461 1462
	return ret;
}

1463
static int f2fs_write_data_pages(struct address_space *mapping,
1464 1465 1466
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1467
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1468
	struct blk_plug plug;
1469 1470
	int ret;

P
P J P 已提交
1471 1472 1473 1474
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1475 1476 1477 1478
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1479 1480 1481 1482 1483
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1484
	/* skip writing during file defragment */
1485
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
C
Chao Yu 已提交
1486 1487
		goto skip_write;

1488 1489 1490 1491
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

Y
Yunlei He 已提交
1492 1493
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1494
	blk_start_plug(&plug);
J
Jaegeuk Kim 已提交
1495
	ret = f2fs_write_cache_pages(mapping, wbc);
1496
	blk_finish_plug(&plug);
1497 1498 1499 1500
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
J
Jaegeuk Kim 已提交
1501

1502
	remove_dirty_inode(inode);
1503
	return ret;
1504 1505

skip_write:
1506
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1507
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1508
	return 0;
1509 1510
}

1511 1512 1513
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1514
	loff_t i_size = i_size_read(inode);
1515

J
Jaegeuk Kim 已提交
1516 1517 1518
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1519 1520 1521
	}
}

1522 1523 1524 1525 1526 1527 1528 1529
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1530 1531
	bool locked = false;
	struct extent_info ei;
1532 1533
	int err = 0;

1534 1535 1536 1537
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
1538
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
1539 1540
		return 0;

1541
	if (f2fs_has_inline_data(inode) ||
1542
			(pos & PAGE_MASK) >= i_size_read(inode)) {
1543 1544 1545 1546
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
1559
			set_inode_flag(inode, FI_DATA_EXIST);
1560 1561
			if (inode->i_nlink)
				set_inline_node(ipage);
1562 1563 1564
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1577
			if (err || dn.data_blkaddr == NULL_ADDR) {
1578 1579 1580 1581 1582
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1583 1584
		}
	}
1585

1586 1587 1588
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1589
out:
1590 1591
	f2fs_put_dnode(&dn);
unlock_out:
1592 1593
	if (locked)
		f2fs_unlock_op(sbi);
1594 1595 1596
	return err;
}

1597 1598 1599 1600 1601
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1602
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1603
	struct page *page = NULL;
1604
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1605 1606
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1607 1608
	int err = 0;

1609 1610
	trace_f2fs_write_begin(inode, pos, len, flags);

1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1621
repeat:
1622
	page = grab_cache_page_write_begin(mapping, index, flags);
1623 1624 1625 1626
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1627

1628 1629
	*pagep = page;

1630 1631
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1632
	if (err)
1633
		goto fail;
1634

1635
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
1636
		unlock_page(page);
J
Jaegeuk Kim 已提交
1637
		f2fs_balance_fs(sbi, true);
1638 1639 1640 1641 1642 1643 1644 1645
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1646
	f2fs_wait_on_page_writeback(page, DATA, false);
1647

1648 1649
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1650
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1651

1652 1653
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
1654

1655
	if (blkaddr == NEW_ADDR) {
1656
		zero_user_segment(page, 0, PAGE_SIZE);
1657
		SetPageUptodate(page);
1658
	} else {
1659
		struct bio *bio;
1660

1661 1662 1663
		bio = f2fs_grab_bio(inode, blkaddr, 1);
		if (IS_ERR(bio)) {
			err = PTR_ERR(bio);
1664
			goto fail;
1665
		}
1666
		bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
1667 1668 1669 1670 1671 1672
		if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
			bio_put(bio);
			err = -EFAULT;
			goto fail;
		}

1673
		__submit_bio(sbi, bio, DATA);
1674

1675
		lock_page(page);
1676
		if (unlikely(page->mapping != mapping)) {
1677 1678
			f2fs_put_page(page, 1);
			goto repeat;
1679
		}
1680 1681 1682
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
1683
		}
1684 1685
	}
	return 0;
1686

1687
fail:
1688
	f2fs_put_page(page, 1);
1689 1690
	f2fs_write_failed(mapping, pos + len);
	return err;
1691 1692
}

1693 1694 1695 1696 1697 1698 1699
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1700 1701
	trace_f2fs_write_end(inode, pos, len, copied);

1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
		if (unlikely(copied != PAGE_SIZE))
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

1716
	set_page_dirty(page);
1717
	clear_cold_data(page);
1718

1719 1720
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
1721
unlock_out:
1722
	f2fs_put_page(page, 1);
1723
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1724 1725 1726
	return copied;
}

1727 1728
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1729 1730 1731 1732 1733 1734
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1735 1736 1737
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1738 1739 1740
	return 0;
}

1741
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1742
{
1743
	struct address_space *mapping = iocb->ki_filp->f_mapping;
1744 1745
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
1746
	loff_t offset = iocb->ki_pos;
1747
	int rw = iov_iter_rw(iter);
1748
	int err;
1749

1750
	err = check_direct_IO(inode, iter, offset);
1751 1752
	if (err)
		return err;
H
Huajun Li 已提交
1753

1754 1755
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;
1756 1757
	if (test_opt(F2FS_I_SB(inode), LFS))
		return 0;
1758

1759
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1760

1761
	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
1762
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
1763 1764 1765
	up_read(&F2FS_I(inode)->dio_rwsem[rw]);

	if (rw == WRITE) {
1766
		if (err > 0)
1767
			set_inode_flag(inode, FI_UPDATE_WRITE);
1768 1769 1770
		else if (err < 0)
			f2fs_write_failed(mapping, offset + count);
	}
1771

1772
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
1773

1774
	return err;
1775 1776
}

1777 1778
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1779 1780
{
	struct inode *inode = page->mapping->host;
1781
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1782

1783
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1784
		(offset % PAGE_SIZE || length != PAGE_SIZE))
1785 1786
		return;

1787 1788 1789 1790 1791 1792 1793 1794
	if (PageDirty(page)) {
		if (inode->i_ino == F2FS_META_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_META);
		else if (inode->i_ino == F2FS_NODE_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_NODES);
		else
			inode_dec_dirty_pages(inode);
	}
C
Chao Yu 已提交
1795 1796 1797 1798 1799

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1800
	set_page_private(page, 0);
1801 1802 1803
	ClearPagePrivate(page);
}

1804
int f2fs_release_page(struct page *page, gfp_t wait)
1805
{
1806 1807 1808 1809
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1810 1811 1812 1813
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1814
	set_page_private(page, 0);
1815
	ClearPagePrivate(page);
1816
	return 1;
1817 1818
}

1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847
/*
 * This was copied from __set_page_dirty_buffers which gives higher performance
 * in very high speed storages. (e.g., pmem)
 */
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
	struct address_space *mapping = page->mapping;
	unsigned long flags;

	if (unlikely(!mapping))
		return;

	spin_lock(&mapping->private_lock);
	lock_page_memcg(page);
	SetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	spin_lock_irqsave(&mapping->tree_lock, flags);
	WARN_ON_ONCE(!PageUptodate(page));
	account_page_dirtied(page, mapping);
	radix_tree_tag_set(&mapping->page_tree,
			page_index(page), PAGECACHE_TAG_DIRTY);
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
	unlock_page_memcg(page);

	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	return;
}

1848 1849 1850 1851 1852
static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1853 1854
	trace_f2fs_set_page_dirty(page, DATA);

1855 1856
	if (!PageUptodate(page))
		SetPageUptodate(page);
1857

1858
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1859 1860 1861 1862 1863 1864 1865 1866 1867
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1868 1869
	}

1870
	if (!PageDirty(page)) {
1871
		f2fs_set_page_dirty_nobuffers(page);
1872
		update_dirty_page(inode, page);
1873 1874 1875 1876 1877
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1878 1879
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1880 1881
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1882 1883 1884 1885 1886 1887 1888
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1889
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1890 1891
}

1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
	if (atomic_written && !mutex_trylock(&fi->inmem_lock))
		return -EAGAIN;

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

	migrate_page_copy(newpage, page);

	return MIGRATEPAGE_SUCCESS;
}
#endif

1944 1945 1946 1947 1948 1949
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1950
	.write_end	= f2fs_write_end,
1951
	.set_page_dirty	= f2fs_set_data_page_dirty,
1952 1953
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1954
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1955
	.bmap		= f2fs_bmap,
1956 1957 1958
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
1959
};