data.c 43.6 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
J
Jaegeuk Kim 已提交
22
#include <linux/cleancache.h>
23 24 25 26

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
27
#include "trace.h"
28
#include <trace/events/f2fs.h>
29

30
static void f2fs_read_end_io(struct bio *bio)
31
{
32 33
	struct bio_vec *bvec;
	int i;
34

35
	if (f2fs_bio_encrypted(bio)) {
36
		if (bio->bi_error) {
37
			fscrypt_release_ctx(bio->bi_private);
38
		} else {
39
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
40 41 42 43
			return;
		}
	}

44 45
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
46

47
		if (!bio->bi_error) {
J
Jaegeuk Kim 已提交
48 49 50 51 52 53 54 55 56 57
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

58
static void f2fs_write_end_io(struct bio *bio)
59
{
60
	struct f2fs_sb_info *sbi = bio->bi_private;
61 62
	struct bio_vec *bvec;
	int i;
63

64
	bio_for_each_segment_all(bvec, bio, i) {
65 66
		struct page *page = bvec->bv_page;

67
		fscrypt_pullback_bio_page(&page, true);
68

69
		if (unlikely(bio->bi_error)) {
70
			set_bit(AS_EIO, &page->mapping->flags);
71
			f2fs_stop_checkpoint(sbi, true);
72 73
		}
		end_page_writeback(page);
74
	}
75 76
	if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
				wq_has_sleeper(&sbi->cp_wait))
77 78 79 80 81
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

82 83 84 85 86 87 88 89
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
90
	bio = f2fs_bio_alloc(npages);
91 92

	bio->bi_bdev = sbi->sb->s_bdev;
C
Chao Yu 已提交
93
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
94
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
95
	bio->bi_private = is_read ? NULL : sbi;
96 97 98 99

	return bio;
}

100 101 102 103 104 105 106 107
static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
						struct bio *bio)
{
	if (!is_read_io(rw))
		atomic_inc(&sbi->nr_wb_bios);
	submit_bio(rw, bio);
}

J
Jaegeuk Kim 已提交
108
static void __submit_merged_bio(struct f2fs_bio_info *io)
109
{
J
Jaegeuk Kim 已提交
110
	struct f2fs_io_info *fio = &io->fio;
111 112 113 114

	if (!io->bio)
		return;

115
	if (is_read_io(fio->rw))
116
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
117
	else
118
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
119

120
	__submit_bio(io->sbi, fio->rw, io->bio);
121 122 123
	io->bio = NULL;
}

124 125
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
C
Chao Yu 已提交
126 127 128 129 130
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

131
	if (!io->bio)
C
Chao Yu 已提交
132
		return false;
133 134 135

	if (!inode && !page && !ino)
		return true;
C
Chao Yu 已提交
136 137 138

	bio_for_each_segment_all(bvec, io->bio, i) {

139
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
140
			target = bvec->bv_page;
141 142
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
143

144 145 146 147 148
		if (inode && inode == target->mapping->host)
			return true;
		if (page && page == target)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
149 150 151 152 153 154
			return true;
	}

	return false;
}

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	bool ret;

	down_read(&io->io_rwsem);
	ret = __has_merged_page(io, inode, page, ino);
	up_read(&io->io_rwsem);
	return ret;
}

static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
172 173 174 175 176 177
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

178
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
179

180 181 182
	if (!__has_merged_page(io, inode, page, ino))
		goto out;

J
Jaegeuk Kim 已提交
183 184 185
	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
J
Jaegeuk Kim 已提交
186 187 188 189
		if (test_opt(sbi, NOBARRIER))
			io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
		else
			io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
190 191
	}
	__submit_merged_bio(io);
192
out:
193
	up_write(&io->io_rwsem);
194 195
}

196 197 198 199 200 201 202 203 204 205 206 207 208 209
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
									int rw)
{
	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}

void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
{
	if (has_merged_page(sbi, inode, page, ino, type))
		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}

210 211 212 213 214 215 216
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, WRITE);
	f2fs_submit_merged_bio(sbi, META, WRITE);
}

217 218 219 220
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
221
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
222 223
{
	struct bio *bio;
224 225
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
226

227
	trace_f2fs_submit_page_bio(page, fio);
228
	f2fs_trace_ios(fio, 0);
229 230

	/* Allocate a new bio */
231
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
232

233
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
234 235 236 237
		bio_put(bio);
		return -EFAULT;
	}

238
	__submit_bio(fio->sbi, fio->rw, bio);
239 240 241
	return 0;
}

242
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
243
{
244
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
245
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
246
	struct f2fs_bio_info *io;
247
	bool is_read = is_read_io(fio->rw);
248
	struct page *bio_page;
249

250
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
251

252 253 254
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
255

256
	down_write(&io->io_rwsem);
257

258
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
259 260
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
261 262
alloc_new:
	if (io->bio == NULL) {
J
Jaegeuk Kim 已提交
263
		int bio_blocks = MAX_BIO_BLOCKS(sbi);
264

265 266
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
						bio_blocks, is_read);
J
Jaegeuk Kim 已提交
267
		io->fio = *fio;
268 269
	}

270 271
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

272 273
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
							PAGE_SIZE) {
J
Jaegeuk Kim 已提交
274
		__submit_merged_bio(io);
275 276 277
		goto alloc_new;
	}

278
	io->last_block_in_bio = fio->new_blkaddr;
279
	f2fs_trace_ios(fio, 0);
280

281
	up_write(&io->io_rwsem);
282
	trace_f2fs_submit_page_mbio(fio->page, fio);
283 284
}

285 286 287 288 289 290 291 292 293 294
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}

J
Jaegeuk Kim 已提交
295
/*
296 297 298 299 300
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
301
void set_data_blkaddr(struct dnode_of_data *dn)
302
{
303 304 305
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
306
		dn->node_changed = true;
307 308
}

309 310 311 312 313 314 315
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

316 317
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
318
{
319
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
320

321 322 323
	if (!count)
		return 0;

324
	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
325
		return -EPERM;
326
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
327 328
		return -ENOSPC;

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr =
			datablock_addr(dn->node_page, dn->ofs_in_node);
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
346

347
	mark_inode_dirty(dn->inode);
348 349 350 351
	sync_inode_page(dn);
	return 0;
}

352 353 354 355 356 357 358 359 360 361 362
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

363 364 365 366 367 368 369 370
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
371

372 373
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
374
	if (err || need_put)
375 376 377 378
		f2fs_put_dnode(dn);
	return err;
}

379
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
380
{
381
	struct extent_info ei;
382
	struct inode *inode = dn->inode;
383

384 385 386
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
387
	}
388

389
	return f2fs_reserve_block(dn, index);
390 391
}

392 393
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
						int rw, bool for_write)
394 395 396 397
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
398
	struct extent_info ei;
399
	int err;
400
	struct f2fs_io_info fio = {
401
		.sbi = F2FS_I_SB(inode),
402
		.type = DATA,
403
		.rw = rw,
404
		.encrypted_page = NULL,
405
	};
406

407 408 409
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

410
	page = f2fs_grab_cache_page(mapping, index, for_write);
411 412 413
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
414 415 416 417 418
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

419
	set_new_dnode(&dn, inode, NULL, NULL, 0);
420
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
421 422
	if (err)
		goto put_err;
423 424
	f2fs_put_dnode(&dn);

425
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
426 427
		err = -ENOENT;
		goto put_err;
428
	}
C
Chao Yu 已提交
429
got_it:
430 431
	if (PageUptodate(page)) {
		unlock_page(page);
432
		return page;
433
	}
434

J
Jaegeuk Kim 已提交
435 436 437 438 439 440 441
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
442
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
443
		SetPageUptodate(page);
444
		unlock_page(page);
J
Jaegeuk Kim 已提交
445 446
		return page;
	}
447

448
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
449 450
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
451
	if (err)
452
		goto put_err;
453
	return page;
454 455 456 457

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
458 459 460 461 462 463 464 465 466 467 468 469
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

470
	page = get_read_data_page(inode, index, READ_SYNC, false);
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
490 491
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
492 493 494 495
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
496
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
497 498
	if (IS_ERR(page))
		return page;
499

500
	/* wait for read completion */
501
	lock_page(page);
502
	if (unlikely(!PageUptodate(page))) {
503 504
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
505
	}
506
	if (unlikely(page->mapping != mapping)) {
507 508
		f2fs_put_page(page, 1);
		goto repeat;
509 510 511 512
	}
	return page;
}

J
Jaegeuk Kim 已提交
513
/*
514 515
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
516
 *
C
Chao Yu 已提交
517 518
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
519 520
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
521
 */
522
struct page *get_new_data_page(struct inode *inode,
523
		struct page *ipage, pgoff_t index, bool new_i_size)
524 525 526 527 528
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
529

530
	page = f2fs_grab_cache_page(mapping, index, true);
531 532 533 534 535 536
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
537
		return ERR_PTR(-ENOMEM);
538
	}
539

540
	set_new_dnode(&dn, inode, ipage, NULL, 0);
541
	err = f2fs_reserve_block(&dn, index);
542 543
	if (err) {
		f2fs_put_page(page, 1);
544
		return ERR_PTR(err);
545
	}
546 547
	if (!ipage)
		f2fs_put_dnode(&dn);
548 549

	if (PageUptodate(page))
550
		goto got_it;
551 552

	if (dn.data_blkaddr == NEW_ADDR) {
553
		zero_user_segment(page, 0, PAGE_SIZE);
554
		SetPageUptodate(page);
555
	} else {
556
		f2fs_put_page(page, 1);
557

558 559 560
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
561
		if (IS_ERR(page))
562
			return page;
563
	}
564
got_it:
C
Chao Yu 已提交
565
	if (new_i_size && i_size_read(inode) <
566 567
				((loff_t)(index + 1) << PAGE_SHIFT)) {
		i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
568 569
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
570 571 572 573
	}
	return page;
}

574 575
static int __allocate_data_block(struct dnode_of_data *dn)
{
576
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
577 578
	struct f2fs_summary sum;
	struct node_info ni;
579
	int seg = CURSEG_WARM_DATA;
580
	pgoff_t fofs;
581
	blkcnt_t count = 1;
582 583 584

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;
585 586 587 588 589

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

590
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
591 592
		return -ENOSPC;

593
alloc:
594 595 596
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

597 598 599
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

600 601
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
602
	set_data_blkaddr(dn);
603

604
	/* update i_size */
605
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
606
							dn->ofs_in_node;
607
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
C
Chao Yu 已提交
608
		i_size_write(dn->inode,
609
				((loff_t)(fofs + 1) << PAGE_SHIFT));
610 611 612
	return 0;
}

613
ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
614
{
615
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
616
	struct f2fs_map_blocks map;
617
	ssize_t ret = 0;
618

619 620
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
	map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
621
	map.m_next_pgofs = NULL;
622

623 624 625 626 627 628 629 630 631 632
	if (f2fs_encrypted_inode(inode))
		return 0;

	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
633 634 635 636
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
637 638
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
639
	return ret;
640 641
}

J
Jaegeuk Kim 已提交
642
/*
J
Jaegeuk Kim 已提交
643 644
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
645 646 647 648 649
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
650
 */
C
Chao Yu 已提交
651
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
652
						int create, int flag)
653
{
J
Jaegeuk Kim 已提交
654
	unsigned int maxblocks = map->m_len;
655
	struct dnode_of_data dn;
656
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
657
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
658
	pgoff_t pgofs, end_offset, end;
659
	int err = 0, ofs = 1;
660 661
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
662
	struct extent_info ei;
663
	bool allocated = false;
664
	block_t blkaddr;
665

J
Jaegeuk Kim 已提交
666 667 668 669 670
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
671
	end = pgofs + maxblocks;
672

673
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
674 675 676
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
677
		goto out;
678
	}
679

C
Chao Yu 已提交
680
next_dnode:
681
	if (create)
682
		f2fs_lock_op(sbi);
683 684 685

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
686
	err = get_dnode_of_data(&dn, pgofs, mode);
687
	if (err) {
C
Chao Yu 已提交
688 689
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
690
		if (err == -ENOENT) {
691
			err = 0;
692 693 694 695
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
696
		goto unlock_out;
697
	}
C
Chao Yu 已提交
698

699 700
	prealloc = 0;
	ofs_in_node = dn.ofs_in_node;
701
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
702 703 704 705 706

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
707
		if (create) {
708 709
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
710
				goto sync_out;
711
			}
712
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
713 714 715 716
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
717 718
			} else {
				err = __allocate_data_block(&dn);
719
				if (!err) {
720 721
					set_inode_flag(F2FS_I(inode),
							FI_APPEND_WRITE);
722 723
					allocated = true;
				}
724
			}
C
Chao Yu 已提交
725
			if (err)
C
Chao Yu 已提交
726
				goto sync_out;
C
Chao Yu 已提交
727
			map->m_flags = F2FS_MAP_NEW;
C
Chao Yu 已提交
728
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
729
		} else {
C
Chao Yu 已提交
730 731 732 733
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
734 735 736 737 738
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
739
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
740
						blkaddr != NEW_ADDR)
C
Chao Yu 已提交
741
				goto sync_out;
C
Chao Yu 已提交
742 743
		}
	}
744

745 746 747
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
748 749 750 751 752 753 754 755 756 757
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
758
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
759
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
760 761 762 763 764
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
765

766
skip:
767 768 769
	dn.ofs_in_node++;
	pgofs++;

770 771 772
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
773

774 775 776 777
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
778

779 780 781 782
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
783
		}
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

	if (allocated)
		sync_inode_page(&dn);
	f2fs_put_dnode(&dn);

	if (create) {
		f2fs_unlock_op(sbi);
		f2fs_balance_fs(sbi, allocated);
799
	}
800 801
	allocated = false;
	goto next_dnode;
802

803 804 805
sync_out:
	if (allocated)
		sync_inode_page(&dn);
806
	f2fs_put_dnode(&dn);
807
unlock_out:
808
	if (create) {
809
		f2fs_unlock_op(sbi);
810
		f2fs_balance_fs(sbi, allocated);
811
	}
812
out:
J
Jaegeuk Kim 已提交
813
	trace_f2fs_map_blocks(inode, map, err);
814
	return err;
815 816
}

J
Jaegeuk Kim 已提交
817
static int __get_data_block(struct inode *inode, sector_t iblock,
818 819
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
820 821 822 823 824 825
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
826
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
827

C
Chao Yu 已提交
828
	ret = f2fs_map_blocks(inode, &map, create, flag);
J
Jaegeuk Kim 已提交
829 830 831 832 833 834 835 836
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

837
static int get_data_block(struct inode *inode, sector_t iblock,
838 839
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
840
{
841 842
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
843 844 845
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
846 847
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
848
	return __get_data_block(inode, iblock, bh_result, create,
849
						F2FS_GET_BLOCK_DIO, NULL);
850 851
}

C
Chao Yu 已提交
852
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
853 854
			struct buffer_head *bh_result, int create)
{
855
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
856
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
857 858
		return -EFBIG;

C
Chao Yu 已提交
859
	return __get_data_block(inode, iblock, bh_result, create,
860
						F2FS_GET_BLOCK_BMAP, NULL);
861 862
}

863 864 865 866 867 868 869 870 871 872
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
873 874 875
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
876 877
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
878
	pgoff_t next_pgofs;
879
	loff_t isize;
880 881 882 883 884 885 886 887
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
888 889 890 891 892 893
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
894
	inode_lock(inode);
895 896

	isize = i_size_read(inode);
897 898
	if (start >= isize)
		goto out;
899

900 901
	if (start + len > isize)
		len = isize - start;
902 903 904 905 906 907

	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
908

909 910 911 912
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
913
	ret = get_data_block(inode, start_blk, &map_bh, 0,
914
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
915 916 917 918 919
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
920
		start_blk = next_pgofs;
921
		/* Go through holes util pass the EOF */
922
		if (blk_to_logical(inode, start_blk) < isize)
923 924 925 926 927 928 929
			goto prep_next;
		/* Found a hole beyond isize means no more extents.
		 * Note that the premise is that filesystems don't
		 * punch holes beyond isize and keep size unchanged.
		 */
		flags |= FIEMAP_EXTENT_LAST;
	}
930

931 932 933 934
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

935 936
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
937
	}
938

939 940
	if (start_blk > last_blk || ret)
		goto out;
941

942 943 944 945 946 947
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
948

949
	start_blk += logical_to_blk(inode, size);
950

951
prep_next:
952 953 954 955 956 957 958 959 960
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
961
	inode_unlock(inode);
962
	return ret;
J
Jaegeuk Kim 已提交
963 964
}

J
Jaegeuk Kim 已提交
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
990
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1028
			if (f2fs_map_blocks(inode, &map, 0,
1029
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1042
			zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
1054
			__submit_bio(F2FS_I_SB(inode), READ, bio);
J
Jaegeuk Kim 已提交
1055 1056 1057
			bio = NULL;
		}
		if (bio == NULL) {
1058
			struct fscrypt_ctx *ctx = NULL;
1059 1060 1061 1062

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {

1063
				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
1064 1065 1066 1067
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
1068 1069
				f2fs_wait_on_encrypted_page_writeback(
						F2FS_I_SB(inode), block_nr);
1070 1071
			}

J
Jaegeuk Kim 已提交
1072
			bio = bio_alloc(GFP_KERNEL,
1073
				min_t(int, nr_pages, BIO_MAX_PAGES));
1074 1075
			if (!bio) {
				if (ctx)
1076
					fscrypt_release_ctx(ctx);
J
Jaegeuk Kim 已提交
1077
				goto set_error_page;
1078
			}
J
Jaegeuk Kim 已提交
1079 1080
			bio->bi_bdev = bdev;
			bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1081
			bio->bi_end_io = f2fs_read_end_io;
1082
			bio->bi_private = ctx;
J
Jaegeuk Kim 已提交
1083 1084 1085 1086 1087 1088 1089 1090 1091
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1092
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1093 1094 1095 1096
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1097
			__submit_bio(F2FS_I_SB(inode), READ, bio);
J
Jaegeuk Kim 已提交
1098 1099 1100 1101 1102
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1103
			put_page(page);
J
Jaegeuk Kim 已提交
1104 1105 1106
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1107
		__submit_bio(F2FS_I_SB(inode), READ, bio);
J
Jaegeuk Kim 已提交
1108 1109 1110
	return 0;
}

1111 1112
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1113
	struct inode *inode = page->mapping->host;
1114
	int ret = -EAGAIN;
H
Huajun Li 已提交
1115

1116 1117
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1118
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1119 1120
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1121
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1122
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1123
	return ret;
1124 1125 1126 1127 1128 1129
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1130
	struct inode *inode = file->f_mapping->host;
1131 1132 1133
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1134 1135 1136 1137 1138

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1139
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1140 1141
}

1142
int do_write_data_page(struct f2fs_io_info *fio)
1143
{
1144
	struct page *page = fio->page;
1145 1146 1147 1148 1149
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1150
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1151 1152 1153
	if (err)
		return err;

1154
	fio->old_blkaddr = dn.data_blkaddr;
1155 1156

	/* This page is already truncated */
1157
	if (fio->old_blkaddr == NULL_ADDR) {
1158
		ClearPageUptodate(page);
1159
		goto out_writepage;
1160
	}
1161

1162
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1163
		gfp_t gfp_flags = GFP_NOFS;
1164 1165 1166

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
1167
							fio->old_blkaddr);
1168 1169 1170
retry_encrypt:
		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
								gfp_flags);
1171 1172
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
1173 1174 1175 1176 1177 1178 1179 1180
			if (err == -ENOMEM) {
				/* flush pending ios and wait for a while */
				f2fs_flush_merged_bios(F2FS_I_SB(inode));
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				gfp_flags |= __GFP_NOFAIL;
				err = 0;
				goto retry_encrypt;
			}
1181 1182 1183 1184
			goto out_writepage;
		}
	}

1185 1186 1187 1188 1189 1190
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1191
	if (unlikely(fio->old_blkaddr != NEW_ADDR &&
1192
			!is_cold_data(page) &&
1193
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1194
			need_inplace_update(inode))) {
1195
		rewrite_data_page(fio);
1196
		set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1197
		trace_f2fs_do_write_data_page(page, IPU);
1198
	} else {
1199
		write_data_page(&dn, fio);
1200
		trace_f2fs_do_write_data_page(page, OPU);
1201
		set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
1202 1203
		if (page->index == 0)
			set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1214
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1215 1216
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1217
							>> PAGE_SHIFT;
H
Huajun Li 已提交
1218
	unsigned offset = 0;
1219
	bool need_balance_fs = false;
1220
	int err = 0;
J
Jaegeuk Kim 已提交
1221
	struct f2fs_io_info fio = {
1222
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1223
		.type = DATA,
C
Chris Fries 已提交
1224
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1225
		.page = page,
1226
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1227
	};
1228

1229 1230
	trace_f2fs_writepage(page, DATA);

1231
	if (page->index < end_index)
1232
		goto write;
1233 1234 1235 1236 1237

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1238
	offset = i_size & (PAGE_SIZE - 1);
1239
	if ((page->index >= end_index + 1) || !offset)
1240
		goto out;
1241

1242
	zero_user_segment(page, offset, PAGE_SIZE);
1243
write:
1244
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1245
		goto redirty_out;
1246 1247
	if (f2fs_is_drop_cache(inode))
		goto out;
1248 1249 1250 1251
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1252
		goto redirty_out;
1253

1254
	/* Dentry blocks are controlled by checkpoint */
1255
	if (S_ISDIR(inode->i_mode)) {
1256 1257
		if (unlikely(f2fs_cp_error(sbi)))
			goto redirty_out;
1258
		err = do_write_data_page(&fio);
1259 1260
		goto done;
	}
H
Huajun Li 已提交
1261

1262 1263 1264
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		SetPageError(page);
1265
		goto out;
1266 1267
	}

1268
	if (!wbc->for_reclaim)
1269
		need_balance_fs = true;
1270
	else if (has_not_enough_free_secs(sbi, 0))
1271
		goto redirty_out;
1272

1273
	err = -EAGAIN;
1274
	f2fs_lock_op(sbi);
1275 1276 1277
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1278
		err = do_write_data_page(&fio);
1279 1280 1281 1282
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1283 1284

	clear_cold_data(page);
1285
out:
1286
	inode_dec_dirty_pages(inode);
1287 1288
	if (err)
		ClearPageUptodate(page);
1289 1290 1291 1292 1293 1294

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
		remove_dirty_inode(inode);
	}

1295
	unlock_page(page);
J
Jaegeuk Kim 已提交
1296
	f2fs_balance_fs(sbi, need_balance_fs);
1297 1298

	if (unlikely(f2fs_cp_error(sbi)))
1299
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1300

1301 1302 1303
	return 0;

redirty_out:
1304
	redirty_page_for_writepage(wbc, page);
1305
	return AOP_WRITEPAGE_ACTIVATE;
1306 1307
}

1308 1309 1310 1311 1312 1313 1314 1315 1316
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

C
Chao Yu 已提交
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
			struct writeback_control *wbc, writepage_t writepage,
			void *data)
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	int step = 0;

	pagevec_init(&pvec, 0);
next:
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1350 1351
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

1395
			if (step == is_cold_data(page))
C
Chao Yu 已提交
1396 1397 1398 1399
				goto continue_unlock;

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1400 1401
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			ret = (*writepage)(page, wbc, data);
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					done_index = page->index + 1;
					done = 1;
					break;
				}
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (step < 1) {
		step++;
		goto next;
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

	return ret;
}

1449
static int f2fs_write_data_pages(struct address_space *mapping,
1450 1451 1452
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1453
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1454
	bool locked = false;
1455
	int ret;
1456
	long diff;
1457

P
P J P 已提交
1458 1459 1460 1461
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1462 1463 1464 1465
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1466 1467 1468 1469 1470
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1471 1472 1473 1474
	/* skip writing during file defragment */
	if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG))
		goto skip_write;

1475 1476 1477 1478
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

Y
Yunlei He 已提交
1479 1480
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1481
	diff = nr_pages_to_write(sbi, DATA, wbc);
1482

1483
	if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) {
1484 1485 1486
		mutex_lock(&sbi->writepages);
		locked = true;
	}
C
Chao Yu 已提交
1487
	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1488
	f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
1489 1490
	if (locked)
		mutex_unlock(&sbi->writepages);
J
Jaegeuk Kim 已提交
1491

1492
	remove_dirty_inode(inode);
1493

1494
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1495
	return ret;
1496 1497

skip_write:
1498
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1499
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1500
	return 0;
1501 1502
}

1503 1504 1505
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1506
	loff_t i_size = i_size_read(inode);
1507

J
Jaegeuk Kim 已提交
1508 1509 1510
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1511 1512 1513
	}
}

1514 1515 1516 1517 1518 1519 1520 1521
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1522 1523
	bool locked = false;
	struct extent_info ei;
1524 1525
	int err = 0;

1526 1527 1528 1529 1530
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
	if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
1531
					len == PAGE_SIZE)
1532 1533
		return 0;

1534
	if (f2fs_has_inline_data(inode) ||
1535
			(pos & PAGE_MASK) >= i_size_read(inode)) {
1536 1537 1538 1539
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1553 1554
			if (inode->i_nlink)
				set_inline_node(ipage);
1555 1556 1557
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1570
			if (err || dn.data_blkaddr == NULL_ADDR) {
1571 1572 1573 1574 1575
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1576 1577
		}
	}
1578

1579 1580 1581
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1582
out:
1583 1584
	f2fs_put_dnode(&dn);
unlock_out:
1585 1586
	if (locked)
		f2fs_unlock_op(sbi);
1587 1588 1589
	return err;
}

1590 1591 1592 1593 1594
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1595
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1596
	struct page *page = NULL;
1597
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1598 1599
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1600 1601
	int err = 0;

1602 1603
	trace_f2fs_write_begin(inode, pos, len, flags);

1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1614
repeat:
1615
	page = grab_cache_page_write_begin(mapping, index, flags);
1616 1617 1618 1619
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1620

1621 1622
	*pagep = page;

1623 1624
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1625
	if (err)
1626
		goto fail;
1627

1628
	if (need_balance && has_not_enough_free_secs(sbi, 0)) {
1629
		unlock_page(page);
J
Jaegeuk Kim 已提交
1630
		f2fs_balance_fs(sbi, true);
1631 1632 1633 1634 1635 1636 1637 1638
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1639
	f2fs_wait_on_page_writeback(page, DATA, false);
1640

1641 1642
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1643
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1644

1645
	if (len == PAGE_SIZE)
C
Chao Yu 已提交
1646 1647 1648
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;
1649

1650 1651
	if ((pos & PAGE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_SIZE - 1);
1652 1653 1654
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
1655
		zero_user_segments(page, 0, start, end, PAGE_SIZE);
C
Chao Yu 已提交
1656
		goto out_update;
1657 1658
	}

1659
	if (blkaddr == NEW_ADDR) {
1660
		zero_user_segment(page, 0, PAGE_SIZE);
1661
	} else {
1662
		struct f2fs_io_info fio = {
1663
			.sbi = sbi,
1664 1665
			.type = DATA,
			.rw = READ_SYNC,
1666 1667
			.old_blkaddr = blkaddr,
			.new_blkaddr = blkaddr,
1668
			.page = page,
1669
			.encrypted_page = NULL,
1670
		};
1671
		err = f2fs_submit_page_bio(&fio);
1672 1673
		if (err)
			goto fail;
1674

1675
		lock_page(page);
1676
		if (unlikely(!PageUptodate(page))) {
1677 1678
			err = -EIO;
			goto fail;
1679
		}
1680
		if (unlikely(page->mapping != mapping)) {
1681 1682
			f2fs_put_page(page, 1);
			goto repeat;
1683
		}
1684 1685 1686

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1687
			err = fscrypt_decrypt_page(page);
1688
			if (err)
1689 1690
				goto fail;
		}
1691
	}
C
Chao Yu 已提交
1692
out_update:
1693
	SetPageUptodate(page);
C
Chao Yu 已提交
1694
out_clear:
1695 1696
	clear_cold_data(page);
	return 0;
1697

1698
fail:
1699
	f2fs_put_page(page, 1);
1700 1701
	f2fs_write_failed(mapping, pos + len);
	return err;
1702 1703
}

1704 1705 1706 1707 1708 1709 1710
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1711 1712
	trace_f2fs_write_end(inode, pos, len, copied);

1713
	set_page_dirty(page);
1714 1715 1716 1717 1718 1719

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
	}

1720
	f2fs_put_page(page, 1);
1721
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1722 1723 1724
	return copied;
}

1725 1726
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1727 1728 1729 1730 1731 1732
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1733 1734 1735
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1736 1737 1738
	return 0;
}

1739
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1740
{
1741
	struct address_space *mapping = iocb->ki_filp->f_mapping;
1742 1743
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
1744
	loff_t offset = iocb->ki_pos;
1745
	int err;
1746

1747
	err = check_direct_IO(inode, iter, offset);
1748 1749
	if (err)
		return err;
H
Huajun Li 已提交
1750

1751 1752 1753
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;

1754
	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
1755

1756
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
1757 1758 1759 1760 1761 1762
	if (iov_iter_rw(iter) == WRITE) {
		if (err > 0)
			set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
		else if (err < 0)
			f2fs_write_failed(mapping, offset + count);
	}
1763

1764
	trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
1765

1766
	return err;
1767 1768
}

1769 1770
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1771 1772
{
	struct inode *inode = page->mapping->host;
1773
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1774

1775
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1776
		(offset % PAGE_SIZE || length != PAGE_SIZE))
1777 1778
		return;

1779 1780 1781 1782 1783 1784 1785 1786
	if (PageDirty(page)) {
		if (inode->i_ino == F2FS_META_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_META);
		else if (inode->i_ino == F2FS_NODE_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_NODES);
		else
			inode_dec_dirty_pages(inode);
	}
C
Chao Yu 已提交
1787 1788 1789 1790 1791

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1792
	set_page_private(page, 0);
1793 1794 1795
	ClearPagePrivate(page);
}

1796
int f2fs_release_page(struct page *page, gfp_t wait)
1797
{
1798 1799 1800 1801
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1802 1803 1804 1805
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1806
	set_page_private(page, 0);
1807
	ClearPagePrivate(page);
1808
	return 1;
1809 1810 1811 1812 1813 1814 1815
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1816 1817
	trace_f2fs_set_page_dirty(page, DATA);

1818
	SetPageUptodate(page);
1819

1820
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1821 1822 1823 1824 1825 1826 1827 1828 1829
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1830 1831
	}

1832 1833
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
1834
		update_dirty_page(inode, page);
1835 1836 1837 1838 1839
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1840 1841
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1842 1843
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1844 1845 1846 1847 1848 1849 1850
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1851
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1852 1853
}

1854 1855 1856 1857 1858 1859
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1860
	.write_end	= f2fs_write_end,
1861
	.set_page_dirty	= f2fs_set_data_page_dirty,
1862 1863
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1864
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1865
	.bmap		= f2fs_bmap,
1866
};