data.c 43.7 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
J
Jaegeuk Kim 已提交
22
#include <linux/cleancache.h>
23 24 25 26

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
27
#include "trace.h"
28
#include <trace/events/f2fs.h>
29

30
static void f2fs_read_end_io(struct bio *bio)
31
{
32 33
	struct bio_vec *bvec;
	int i;
34

35
	if (f2fs_bio_encrypted(bio)) {
36
		if (bio->bi_error) {
37
			fscrypt_release_ctx(bio->bi_private);
38
		} else {
39
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
40 41 42 43
			return;
		}
	}

44 45
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
46

47
		if (!bio->bi_error) {
J
Jaegeuk Kim 已提交
48 49 50 51 52 53 54 55 56 57
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

58
static void f2fs_write_end_io(struct bio *bio)
59
{
60
	struct f2fs_sb_info *sbi = bio->bi_private;
61 62
	struct bio_vec *bvec;
	int i;
63

64
	bio_for_each_segment_all(bvec, bio, i) {
65 66
		struct page *page = bvec->bv_page;

67
		fscrypt_pullback_bio_page(&page, true);
68

69
		if (unlikely(bio->bi_error)) {
70
			set_bit(AS_EIO, &page->mapping->flags);
71
			f2fs_stop_checkpoint(sbi, true);
72 73
		}
		end_page_writeback(page);
74
	}
75 76
	if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
				wq_has_sleeper(&sbi->cp_wait))
77 78 79 80 81
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

82 83 84 85 86 87 88 89
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
90
	bio = f2fs_bio_alloc(npages);
91 92

	bio->bi_bdev = sbi->sb->s_bdev;
C
Chao Yu 已提交
93
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
94
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
95
	bio->bi_private = is_read ? NULL : sbi;
96 97 98 99

	return bio;
}

100 101 102 103 104
static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
						struct bio *bio)
{
	if (!is_read_io(rw))
		atomic_inc(&sbi->nr_wb_bios);
105 106
	bio->bi_rw = rw;
	submit_bio(bio);
107 108
}

J
Jaegeuk Kim 已提交
109
static void __submit_merged_bio(struct f2fs_bio_info *io)
110
{
J
Jaegeuk Kim 已提交
111
	struct f2fs_io_info *fio = &io->fio;
112 113 114 115

	if (!io->bio)
		return;

116
	if (is_read_io(fio->rw))
117
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
118
	else
119
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
120

121
	__submit_bio(io->sbi, fio->rw, io->bio);
122 123 124
	io->bio = NULL;
}

125 126
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
C
Chao Yu 已提交
127 128 129 130 131
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

132
	if (!io->bio)
C
Chao Yu 已提交
133
		return false;
134 135 136

	if (!inode && !page && !ino)
		return true;
C
Chao Yu 已提交
137 138 139

	bio_for_each_segment_all(bvec, io->bio, i) {

140
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
141
			target = bvec->bv_page;
142 143
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
144

145 146 147 148 149
		if (inode && inode == target->mapping->host)
			return true;
		if (page && page == target)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
150 151 152 153 154 155
			return true;
	}

	return false;
}

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	bool ret;

	down_read(&io->io_rwsem);
	ret = __has_merged_page(io, inode, page, ino);
	up_read(&io->io_rwsem);
	return ret;
}

static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
173 174 175 176 177 178
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

179
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
180

181 182 183
	if (!__has_merged_page(io, inode, page, ino))
		goto out;

J
Jaegeuk Kim 已提交
184 185 186
	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
J
Jaegeuk Kim 已提交
187 188 189 190
		if (test_opt(sbi, NOBARRIER))
			io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
		else
			io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
191 192
	}
	__submit_merged_bio(io);
193
out:
194
	up_write(&io->io_rwsem);
195 196
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
									int rw)
{
	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}

void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
{
	if (has_merged_page(sbi, inode, page, ino, type))
		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}

211 212 213 214 215 216 217
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, WRITE);
	f2fs_submit_merged_bio(sbi, META, WRITE);
}

218 219 220 221
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
222
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
223 224
{
	struct bio *bio;
225 226
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
227

228
	trace_f2fs_submit_page_bio(page, fio);
229
	f2fs_trace_ios(fio, 0);
230 231

	/* Allocate a new bio */
232
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
233

234
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
235 236 237 238
		bio_put(bio);
		return -EFAULT;
	}

239
	__submit_bio(fio->sbi, fio->rw, bio);
240 241 242
	return 0;
}

243
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
244
{
245
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
246
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
247
	struct f2fs_bio_info *io;
248
	bool is_read = is_read_io(fio->rw);
249
	struct page *bio_page;
250

251
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
252

253 254 255
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
256

257
	down_write(&io->io_rwsem);
258

259
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
260 261
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
262 263
alloc_new:
	if (io->bio == NULL) {
J
Jaegeuk Kim 已提交
264
		int bio_blocks = MAX_BIO_BLOCKS(sbi);
265

266 267
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
						bio_blocks, is_read);
J
Jaegeuk Kim 已提交
268
		io->fio = *fio;
269 270
	}

271 272
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

273 274
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
							PAGE_SIZE) {
J
Jaegeuk Kim 已提交
275
		__submit_merged_bio(io);
276 277 278
		goto alloc_new;
	}

279
	io->last_block_in_bio = fio->new_blkaddr;
280
	f2fs_trace_ios(fio, 0);
281

282
	up_write(&io->io_rwsem);
283
	trace_f2fs_submit_page_mbio(fio->page, fio);
284 285
}

286 287 288 289 290 291 292 293 294 295
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}

J
Jaegeuk Kim 已提交
296
/*
297 298 299 300 301
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
302
void set_data_blkaddr(struct dnode_of_data *dn)
303
{
304 305 306
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
307
		dn->node_changed = true;
308 309
}

310 311 312 313 314 315 316
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

317 318
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
319
{
320
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
321

322 323 324
	if (!count)
		return 0;

325
	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
326
		return -EPERM;
327
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
328 329
		return -ENOSPC;

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr =
			datablock_addr(dn->node_page, dn->ofs_in_node);
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
347

348
	mark_inode_dirty(dn->inode);
349 350 351 352
	sync_inode_page(dn);
	return 0;
}

353 354 355 356 357 358 359 360 361 362 363
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

364 365 366 367 368 369 370 371
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
372

373 374
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
375
	if (err || need_put)
376 377 378 379
		f2fs_put_dnode(dn);
	return err;
}

380
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
381
{
382
	struct extent_info ei;
383
	struct inode *inode = dn->inode;
384

385 386 387
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
388
	}
389

390
	return f2fs_reserve_block(dn, index);
391 392
}

393 394
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
						int rw, bool for_write)
395 396 397 398
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
399
	struct extent_info ei;
400
	int err;
401
	struct f2fs_io_info fio = {
402
		.sbi = F2FS_I_SB(inode),
403
		.type = DATA,
404
		.rw = rw,
405
		.encrypted_page = NULL,
406
	};
407

408 409 410
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

411
	page = f2fs_grab_cache_page(mapping, index, for_write);
412 413 414
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
415 416 417 418 419
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

420
	set_new_dnode(&dn, inode, NULL, NULL, 0);
421
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
422 423
	if (err)
		goto put_err;
424 425
	f2fs_put_dnode(&dn);

426
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
427 428
		err = -ENOENT;
		goto put_err;
429
	}
C
Chao Yu 已提交
430
got_it:
431 432
	if (PageUptodate(page)) {
		unlock_page(page);
433
		return page;
434
	}
435

J
Jaegeuk Kim 已提交
436 437 438 439 440 441 442
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
443
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
444
		SetPageUptodate(page);
445
		unlock_page(page);
J
Jaegeuk Kim 已提交
446 447
		return page;
	}
448

449
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
450 451
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
452
	if (err)
453
		goto put_err;
454
	return page;
455 456 457 458

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
459 460 461 462 463 464 465 466 467 468 469 470
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

471
	page = get_read_data_page(inode, index, READ_SYNC, false);
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
491 492
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
493 494 495 496
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
497
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
498 499
	if (IS_ERR(page))
		return page;
500

501
	/* wait for read completion */
502
	lock_page(page);
503
	if (unlikely(!PageUptodate(page))) {
504 505
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
506
	}
507
	if (unlikely(page->mapping != mapping)) {
508 509
		f2fs_put_page(page, 1);
		goto repeat;
510 511 512 513
	}
	return page;
}

J
Jaegeuk Kim 已提交
514
/*
515 516
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
517
 *
C
Chao Yu 已提交
518 519
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
520 521
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
522
 */
523
struct page *get_new_data_page(struct inode *inode,
524
		struct page *ipage, pgoff_t index, bool new_i_size)
525 526 527 528 529
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
530

531
	page = f2fs_grab_cache_page(mapping, index, true);
532 533 534 535 536 537
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
538
		return ERR_PTR(-ENOMEM);
539
	}
540

541
	set_new_dnode(&dn, inode, ipage, NULL, 0);
542
	err = f2fs_reserve_block(&dn, index);
543 544
	if (err) {
		f2fs_put_page(page, 1);
545
		return ERR_PTR(err);
546
	}
547 548
	if (!ipage)
		f2fs_put_dnode(&dn);
549 550

	if (PageUptodate(page))
551
		goto got_it;
552 553

	if (dn.data_blkaddr == NEW_ADDR) {
554
		zero_user_segment(page, 0, PAGE_SIZE);
555
		SetPageUptodate(page);
556
	} else {
557
		f2fs_put_page(page, 1);
558

559 560 561
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
562
		if (IS_ERR(page))
563
			return page;
564
	}
565
got_it:
C
Chao Yu 已提交
566
	if (new_i_size && i_size_read(inode) <
567 568
				((loff_t)(index + 1) << PAGE_SHIFT)) {
		i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
569 570
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
571 572 573 574
	}
	return page;
}

575 576
static int __allocate_data_block(struct dnode_of_data *dn)
{
577
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
578 579
	struct f2fs_summary sum;
	struct node_info ni;
580
	int seg = CURSEG_WARM_DATA;
581
	pgoff_t fofs;
582
	blkcnt_t count = 1;
583 584 585

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;
586 587 588 589 590

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

591
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
592 593
		return -ENOSPC;

594
alloc:
595 596 597
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

598 599 600
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

601 602
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
603
	set_data_blkaddr(dn);
604

605
	/* update i_size */
606
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
607
							dn->ofs_in_node;
608
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
C
Chao Yu 已提交
609
		i_size_write(dn->inode,
610
				((loff_t)(fofs + 1) << PAGE_SHIFT));
611 612 613
	return 0;
}

614
ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
615
{
616
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
617
	struct f2fs_map_blocks map;
618
	ssize_t ret = 0;
619

620 621
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
	map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
622
	map.m_next_pgofs = NULL;
623

624 625 626 627 628 629 630 631 632 633
	if (f2fs_encrypted_inode(inode))
		return 0;

	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
634 635 636 637
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
638 639
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
640
	return ret;
641 642
}

J
Jaegeuk Kim 已提交
643
/*
J
Jaegeuk Kim 已提交
644 645
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
646 647 648 649 650
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
651
 */
C
Chao Yu 已提交
652
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
653
						int create, int flag)
654
{
J
Jaegeuk Kim 已提交
655
	unsigned int maxblocks = map->m_len;
656
	struct dnode_of_data dn;
657
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
658
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
659
	pgoff_t pgofs, end_offset, end;
660
	int err = 0, ofs = 1;
661 662
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
663
	struct extent_info ei;
664
	bool allocated = false;
665
	block_t blkaddr;
666

J
Jaegeuk Kim 已提交
667 668 669 670 671
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
672
	end = pgofs + maxblocks;
673

674
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
675 676 677
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
678
		goto out;
679
	}
680

C
Chao Yu 已提交
681
next_dnode:
682
	if (create)
683
		f2fs_lock_op(sbi);
684 685 686

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
687
	err = get_dnode_of_data(&dn, pgofs, mode);
688
	if (err) {
C
Chao Yu 已提交
689 690
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
691
		if (err == -ENOENT) {
692
			err = 0;
693 694 695 696
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
697
		goto unlock_out;
698
	}
C
Chao Yu 已提交
699

700 701
	prealloc = 0;
	ofs_in_node = dn.ofs_in_node;
702
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
703 704 705 706 707

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
708
		if (create) {
709 710
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
711
				goto sync_out;
712
			}
713
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
714 715 716 717
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
718 719
			} else {
				err = __allocate_data_block(&dn);
720
				if (!err) {
721 722
					set_inode_flag(F2FS_I(inode),
							FI_APPEND_WRITE);
723 724
					allocated = true;
				}
725
			}
C
Chao Yu 已提交
726
			if (err)
C
Chao Yu 已提交
727
				goto sync_out;
C
Chao Yu 已提交
728
			map->m_flags = F2FS_MAP_NEW;
C
Chao Yu 已提交
729
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
730
		} else {
C
Chao Yu 已提交
731 732 733 734
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
735 736 737 738 739
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
740
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
741
						blkaddr != NEW_ADDR)
C
Chao Yu 已提交
742
				goto sync_out;
C
Chao Yu 已提交
743 744
		}
	}
745

746 747 748
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
749 750 751 752 753 754 755 756 757 758
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
759
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
760
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
761 762 763 764 765
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
766

767
skip:
768 769 770
	dn.ofs_in_node++;
	pgofs++;

771 772 773
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
774

775 776 777 778
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
779

780 781 782 783
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
784
		}
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

	if (allocated)
		sync_inode_page(&dn);
	f2fs_put_dnode(&dn);

	if (create) {
		f2fs_unlock_op(sbi);
		f2fs_balance_fs(sbi, allocated);
800
	}
801 802
	allocated = false;
	goto next_dnode;
803

804 805 806
sync_out:
	if (allocated)
		sync_inode_page(&dn);
807
	f2fs_put_dnode(&dn);
808
unlock_out:
809
	if (create) {
810
		f2fs_unlock_op(sbi);
811
		f2fs_balance_fs(sbi, allocated);
812
	}
813
out:
J
Jaegeuk Kim 已提交
814
	trace_f2fs_map_blocks(inode, map, err);
815
	return err;
816 817
}

J
Jaegeuk Kim 已提交
818
static int __get_data_block(struct inode *inode, sector_t iblock,
819 820
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
821 822 823 824 825 826
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
827
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
828

C
Chao Yu 已提交
829
	ret = f2fs_map_blocks(inode, &map, create, flag);
J
Jaegeuk Kim 已提交
830 831 832 833 834 835 836 837
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

838
static int get_data_block(struct inode *inode, sector_t iblock,
839 840
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
841
{
842 843
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
844 845 846
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
847 848
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
849
	return __get_data_block(inode, iblock, bh_result, create,
850
						F2FS_GET_BLOCK_DIO, NULL);
851 852
}

C
Chao Yu 已提交
853
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
854 855
			struct buffer_head *bh_result, int create)
{
856
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
857
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
858 859
		return -EFBIG;

C
Chao Yu 已提交
860
	return __get_data_block(inode, iblock, bh_result, create,
861
						F2FS_GET_BLOCK_BMAP, NULL);
862 863
}

864 865 866 867 868 869 870 871 872 873
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
874 875 876
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
877 878
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
879
	pgoff_t next_pgofs;
880
	loff_t isize;
881 882 883 884 885 886 887 888
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
889 890 891 892 893 894
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
895
	inode_lock(inode);
896 897

	isize = i_size_read(inode);
898 899
	if (start >= isize)
		goto out;
900

901 902
	if (start + len > isize)
		len = isize - start;
903 904 905 906 907 908

	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
909

910 911 912 913
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
914
	ret = get_data_block(inode, start_blk, &map_bh, 0,
915
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
916 917 918 919 920
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
921
		start_blk = next_pgofs;
922
		/* Go through holes util pass the EOF */
923
		if (blk_to_logical(inode, start_blk) < isize)
924 925 926 927 928 929 930
			goto prep_next;
		/* Found a hole beyond isize means no more extents.
		 * Note that the premise is that filesystems don't
		 * punch holes beyond isize and keep size unchanged.
		 */
		flags |= FIEMAP_EXTENT_LAST;
	}
931

932 933 934 935
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

936 937
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
938
	}
939

940 941
	if (start_blk > last_blk || ret)
		goto out;
942

943 944 945 946 947 948
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
949

950
	start_blk += logical_to_blk(inode, size);
951

952
prep_next:
953 954 955 956 957 958 959 960 961
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
962
	inode_unlock(inode);
963
	return ret;
J
Jaegeuk Kim 已提交
964 965
}

J
Jaegeuk Kim 已提交
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
991
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1029
			if (f2fs_map_blocks(inode, &map, 0,
1030
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1043
			zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
1055
			__submit_bio(F2FS_I_SB(inode), READ, bio);
J
Jaegeuk Kim 已提交
1056 1057 1058
			bio = NULL;
		}
		if (bio == NULL) {
1059
			struct fscrypt_ctx *ctx = NULL;
1060 1061 1062 1063

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {

1064
				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
1065 1066 1067 1068
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
1069 1070
				f2fs_wait_on_encrypted_page_writeback(
						F2FS_I_SB(inode), block_nr);
1071 1072
			}

J
Jaegeuk Kim 已提交
1073
			bio = bio_alloc(GFP_KERNEL,
1074
				min_t(int, nr_pages, BIO_MAX_PAGES));
1075 1076
			if (!bio) {
				if (ctx)
1077
					fscrypt_release_ctx(ctx);
J
Jaegeuk Kim 已提交
1078
				goto set_error_page;
1079
			}
J
Jaegeuk Kim 已提交
1080 1081
			bio->bi_bdev = bdev;
			bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1082
			bio->bi_end_io = f2fs_read_end_io;
1083
			bio->bi_private = ctx;
1084
			bio->bi_rw = READ;
J
Jaegeuk Kim 已提交
1085 1086 1087 1088 1089 1090 1091 1092 1093
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1094
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1095 1096 1097 1098
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1099
			__submit_bio(F2FS_I_SB(inode), READ, bio);
J
Jaegeuk Kim 已提交
1100 1101 1102 1103 1104
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1105
			put_page(page);
J
Jaegeuk Kim 已提交
1106 1107 1108
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1109
		__submit_bio(F2FS_I_SB(inode), READ, bio);
J
Jaegeuk Kim 已提交
1110 1111 1112
	return 0;
}

1113 1114
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1115
	struct inode *inode = page->mapping->host;
1116
	int ret = -EAGAIN;
H
Huajun Li 已提交
1117

1118 1119
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1120
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1121 1122
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1123
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1124
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1125
	return ret;
1126 1127 1128 1129 1130 1131
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1132
	struct inode *inode = file->f_mapping->host;
1133 1134 1135
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1136 1137 1138 1139 1140

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1141
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1142 1143
}

1144
int do_write_data_page(struct f2fs_io_info *fio)
1145
{
1146
	struct page *page = fio->page;
1147 1148 1149 1150 1151
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1152
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1153 1154 1155
	if (err)
		return err;

1156
	fio->old_blkaddr = dn.data_blkaddr;
1157 1158

	/* This page is already truncated */
1159
	if (fio->old_blkaddr == NULL_ADDR) {
1160
		ClearPageUptodate(page);
1161
		goto out_writepage;
1162
	}
1163

1164
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1165
		gfp_t gfp_flags = GFP_NOFS;
1166 1167 1168

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
1169
							fio->old_blkaddr);
1170 1171 1172
retry_encrypt:
		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
								gfp_flags);
1173 1174
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
1175 1176 1177 1178 1179 1180 1181 1182
			if (err == -ENOMEM) {
				/* flush pending ios and wait for a while */
				f2fs_flush_merged_bios(F2FS_I_SB(inode));
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				gfp_flags |= __GFP_NOFAIL;
				err = 0;
				goto retry_encrypt;
			}
1183 1184 1185 1186
			goto out_writepage;
		}
	}

1187 1188 1189 1190 1191 1192
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1193
	if (unlikely(fio->old_blkaddr != NEW_ADDR &&
1194
			!is_cold_data(page) &&
1195
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1196
			need_inplace_update(inode))) {
1197
		rewrite_data_page(fio);
1198
		set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1199
		trace_f2fs_do_write_data_page(page, IPU);
1200
	} else {
1201
		write_data_page(&dn, fio);
1202
		trace_f2fs_do_write_data_page(page, OPU);
1203
		set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
1204 1205
		if (page->index == 0)
			set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1216
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1217 1218
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1219
							>> PAGE_SHIFT;
H
Huajun Li 已提交
1220
	unsigned offset = 0;
1221
	bool need_balance_fs = false;
1222
	int err = 0;
J
Jaegeuk Kim 已提交
1223
	struct f2fs_io_info fio = {
1224
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1225
		.type = DATA,
C
Chris Fries 已提交
1226
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1227
		.page = page,
1228
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1229
	};
1230

1231 1232
	trace_f2fs_writepage(page, DATA);

1233
	if (page->index < end_index)
1234
		goto write;
1235 1236 1237 1238 1239

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1240
	offset = i_size & (PAGE_SIZE - 1);
1241
	if ((page->index >= end_index + 1) || !offset)
1242
		goto out;
1243

1244
	zero_user_segment(page, offset, PAGE_SIZE);
1245
write:
1246
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1247
		goto redirty_out;
1248 1249
	if (f2fs_is_drop_cache(inode))
		goto out;
1250 1251 1252 1253
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1254
		goto redirty_out;
1255

1256
	/* Dentry blocks are controlled by checkpoint */
1257
	if (S_ISDIR(inode->i_mode)) {
1258 1259
		if (unlikely(f2fs_cp_error(sbi)))
			goto redirty_out;
1260
		err = do_write_data_page(&fio);
1261 1262
		goto done;
	}
H
Huajun Li 已提交
1263

1264 1265 1266
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		SetPageError(page);
1267
		goto out;
1268 1269
	}

1270
	if (!wbc->for_reclaim)
1271
		need_balance_fs = true;
1272
	else if (has_not_enough_free_secs(sbi, 0))
1273
		goto redirty_out;
1274

1275
	err = -EAGAIN;
1276
	f2fs_lock_op(sbi);
1277 1278 1279
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1280
		err = do_write_data_page(&fio);
1281 1282 1283 1284
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1285 1286

	clear_cold_data(page);
1287
out:
1288
	inode_dec_dirty_pages(inode);
1289 1290
	if (err)
		ClearPageUptodate(page);
1291 1292 1293 1294 1295 1296

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
		remove_dirty_inode(inode);
	}

1297
	unlock_page(page);
J
Jaegeuk Kim 已提交
1298
	f2fs_balance_fs(sbi, need_balance_fs);
1299 1300

	if (unlikely(f2fs_cp_error(sbi)))
1301
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1302

1303 1304 1305
	return 0;

redirty_out:
1306
	redirty_page_for_writepage(wbc, page);
1307
	return AOP_WRITEPAGE_ACTIVATE;
1308 1309
}

1310 1311 1312 1313 1314 1315 1316 1317 1318
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

C
Chao Yu 已提交
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
			struct writeback_control *wbc, writepage_t writepage,
			void *data)
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	int step = 0;

	pagevec_init(&pvec, 0);
next:
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1352 1353
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

1397
			if (step == is_cold_data(page))
C
Chao Yu 已提交
1398 1399 1400 1401
				goto continue_unlock;

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1402 1403
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			ret = (*writepage)(page, wbc, data);
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					done_index = page->index + 1;
					done = 1;
					break;
				}
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (step < 1) {
		step++;
		goto next;
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

	return ret;
}

1451
static int f2fs_write_data_pages(struct address_space *mapping,
1452 1453 1454
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1455
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1456
	bool locked = false;
1457
	int ret;
1458
	long diff;
1459

P
P J P 已提交
1460 1461 1462 1463
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1464 1465 1466 1467
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1468 1469 1470 1471 1472
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1473 1474 1475 1476
	/* skip writing during file defragment */
	if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG))
		goto skip_write;

1477 1478 1479 1480
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

Y
Yunlei He 已提交
1481 1482
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1483
	diff = nr_pages_to_write(sbi, DATA, wbc);
1484

1485
	if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) {
1486 1487 1488
		mutex_lock(&sbi->writepages);
		locked = true;
	}
C
Chao Yu 已提交
1489
	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1490
	f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
1491 1492
	if (locked)
		mutex_unlock(&sbi->writepages);
J
Jaegeuk Kim 已提交
1493

1494
	remove_dirty_inode(inode);
1495

1496
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1497
	return ret;
1498 1499

skip_write:
1500
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1501
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1502
	return 0;
1503 1504
}

1505 1506 1507
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1508
	loff_t i_size = i_size_read(inode);
1509

J
Jaegeuk Kim 已提交
1510 1511 1512
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1513 1514 1515
	}
}

1516 1517 1518 1519 1520 1521 1522 1523
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1524 1525
	bool locked = false;
	struct extent_info ei;
1526 1527
	int err = 0;

1528 1529 1530 1531 1532
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
	if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
1533
					len == PAGE_SIZE)
1534 1535
		return 0;

1536
	if (f2fs_has_inline_data(inode) ||
1537
			(pos & PAGE_MASK) >= i_size_read(inode)) {
1538 1539 1540 1541
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1555 1556
			if (inode->i_nlink)
				set_inline_node(ipage);
1557 1558 1559
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1572
			if (err || dn.data_blkaddr == NULL_ADDR) {
1573 1574 1575 1576 1577
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1578 1579
		}
	}
1580

1581 1582 1583
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1584
out:
1585 1586
	f2fs_put_dnode(&dn);
unlock_out:
1587 1588
	if (locked)
		f2fs_unlock_op(sbi);
1589 1590 1591
	return err;
}

1592 1593 1594 1595 1596
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1597
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1598
	struct page *page = NULL;
1599
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1600 1601
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1602 1603
	int err = 0;

1604 1605
	trace_f2fs_write_begin(inode, pos, len, flags);

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1616
repeat:
1617
	page = grab_cache_page_write_begin(mapping, index, flags);
1618 1619 1620 1621
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1622

1623 1624
	*pagep = page;

1625 1626
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1627
	if (err)
1628
		goto fail;
1629

1630
	if (need_balance && has_not_enough_free_secs(sbi, 0)) {
1631
		unlock_page(page);
J
Jaegeuk Kim 已提交
1632
		f2fs_balance_fs(sbi, true);
1633 1634 1635 1636 1637 1638 1639 1640
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1641
	f2fs_wait_on_page_writeback(page, DATA, false);
1642

1643 1644
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1645
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1646

1647
	if (len == PAGE_SIZE)
C
Chao Yu 已提交
1648 1649 1650
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;
1651

1652 1653
	if ((pos & PAGE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_SIZE - 1);
1654 1655 1656
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
1657
		zero_user_segments(page, 0, start, end, PAGE_SIZE);
C
Chao Yu 已提交
1658
		goto out_update;
1659 1660
	}

1661
	if (blkaddr == NEW_ADDR) {
1662
		zero_user_segment(page, 0, PAGE_SIZE);
1663
	} else {
1664
		struct f2fs_io_info fio = {
1665
			.sbi = sbi,
1666 1667
			.type = DATA,
			.rw = READ_SYNC,
1668 1669
			.old_blkaddr = blkaddr,
			.new_blkaddr = blkaddr,
1670
			.page = page,
1671
			.encrypted_page = NULL,
1672
		};
1673
		err = f2fs_submit_page_bio(&fio);
1674 1675
		if (err)
			goto fail;
1676

1677
		lock_page(page);
1678
		if (unlikely(!PageUptodate(page))) {
1679 1680
			err = -EIO;
			goto fail;
1681
		}
1682
		if (unlikely(page->mapping != mapping)) {
1683 1684
			f2fs_put_page(page, 1);
			goto repeat;
1685
		}
1686 1687 1688

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1689
			err = fscrypt_decrypt_page(page);
1690
			if (err)
1691 1692
				goto fail;
		}
1693
	}
C
Chao Yu 已提交
1694
out_update:
1695
	SetPageUptodate(page);
C
Chao Yu 已提交
1696
out_clear:
1697 1698
	clear_cold_data(page);
	return 0;
1699

1700
fail:
1701
	f2fs_put_page(page, 1);
1702 1703
	f2fs_write_failed(mapping, pos + len);
	return err;
1704 1705
}

1706 1707 1708 1709 1710 1711 1712
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1713 1714
	trace_f2fs_write_end(inode, pos, len, copied);

1715
	set_page_dirty(page);
1716 1717 1718 1719 1720 1721

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
	}

1722
	f2fs_put_page(page, 1);
1723
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1724 1725 1726
	return copied;
}

1727 1728
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1729 1730 1731 1732 1733 1734
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1735 1736 1737
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1738 1739 1740
	return 0;
}

1741
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1742
{
1743
	struct address_space *mapping = iocb->ki_filp->f_mapping;
1744 1745
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
1746
	loff_t offset = iocb->ki_pos;
1747
	int err;
1748

1749
	err = check_direct_IO(inode, iter, offset);
1750 1751
	if (err)
		return err;
H
Huajun Li 已提交
1752

1753 1754 1755
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;

1756
	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
1757

1758
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
1759 1760 1761 1762 1763 1764
	if (iov_iter_rw(iter) == WRITE) {
		if (err > 0)
			set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
		else if (err < 0)
			f2fs_write_failed(mapping, offset + count);
	}
1765

1766
	trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
1767

1768
	return err;
1769 1770
}

1771 1772
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1773 1774
{
	struct inode *inode = page->mapping->host;
1775
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1776

1777
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1778
		(offset % PAGE_SIZE || length != PAGE_SIZE))
1779 1780
		return;

1781 1782 1783 1784 1785 1786 1787 1788
	if (PageDirty(page)) {
		if (inode->i_ino == F2FS_META_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_META);
		else if (inode->i_ino == F2FS_NODE_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_NODES);
		else
			inode_dec_dirty_pages(inode);
	}
C
Chao Yu 已提交
1789 1790 1791 1792 1793

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1794
	set_page_private(page, 0);
1795 1796 1797
	ClearPagePrivate(page);
}

1798
int f2fs_release_page(struct page *page, gfp_t wait)
1799
{
1800 1801 1802 1803
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1804 1805 1806 1807
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1808
	set_page_private(page, 0);
1809
	ClearPagePrivate(page);
1810
	return 1;
1811 1812 1813 1814 1815 1816 1817
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1818 1819
	trace_f2fs_set_page_dirty(page, DATA);

1820
	SetPageUptodate(page);
1821

1822
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1823 1824 1825 1826 1827 1828 1829 1830 1831
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1832 1833
	}

1834 1835
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
1836
		update_dirty_page(inode, page);
1837 1838 1839 1840 1841
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1842 1843
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1844 1845
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1846 1847 1848 1849 1850 1851 1852
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1853
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1854 1855
}

1856 1857 1858 1859 1860 1861
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1862
	.write_end	= f2fs_write_end,
1863
	.set_page_dirty	= f2fs_set_data_page_dirty,
1864 1865
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1866
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1867
	.bmap		= f2fs_bmap,
1868
};