data.c 53.7 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
22 23
#include <linux/mm.h>
#include <linux/memcontrol.h>
J
Jaegeuk Kim 已提交
24
#include <linux/cleancache.h>
25
#include <linux/sched/signal.h>
26 27 28 29

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
30
#include "trace.h"
31
#include <trace/events/f2fs.h>
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
static bool __is_cp_guaranteed(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct f2fs_sb_info *sbi;

	if (!mapping)
		return false;

	inode = mapping->host;
	sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_META_INO(sbi) ||
			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
			S_ISDIR(inode->i_mode) ||
			is_cold_data(page))
		return true;
	return false;
}

53
static void f2fs_read_end_io(struct bio *bio)
54
{
55 56
	struct bio_vec *bvec;
	int i;
57

C
Chao Yu 已提交
58
#ifdef CONFIG_F2FS_FAULT_INJECTION
59 60
	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
		f2fs_show_injection_info(FAULT_IO);
61
		bio->bi_status = BLK_STS_IOERR;
62
	}
C
Chao Yu 已提交
63 64
#endif

65
	if (f2fs_bio_encrypted(bio)) {
66
		if (bio->bi_status) {
67
			fscrypt_release_ctx(bio->bi_private);
68
		} else {
69
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
70 71 72 73
			return;
		}
	}

74 75
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
76

77
		if (!bio->bi_status) {
78 79
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
80 81 82 83 84 85 86 87 88
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

89
static void f2fs_write_end_io(struct bio *bio)
90
{
91
	struct f2fs_sb_info *sbi = bio->bi_private;
92 93
	struct bio_vec *bvec;
	int i;
94

95
	bio_for_each_segment_all(bvec, bio, i) {
96
		struct page *page = bvec->bv_page;
97
		enum count_type type = WB_DATA_TYPE(page);
98

99 100 101 102 103 104
		if (IS_DUMMY_WRITTEN_PAGE(page)) {
			set_page_private(page, (unsigned long)NULL);
			ClearPagePrivate(page);
			unlock_page(page);
			mempool_free(page, sbi->write_io_dummy);

105
			if (unlikely(bio->bi_status))
106 107 108 109
				f2fs_stop_checkpoint(sbi, true);
			continue;
		}

110
		fscrypt_pullback_bio_page(&page, true);
111

112
		if (unlikely(bio->bi_status)) {
113
			mapping_set_error(page->mapping, -EIO);
114
			f2fs_stop_checkpoint(sbi, true);
115
		}
116 117
		dec_page_count(sbi, type);
		clear_cold_data(page);
118
		end_page_writeback(page);
119
	}
120
	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
121
				wq_has_sleeper(&sbi->cp_wait))
122 123 124 125 126
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

J
Jaegeuk Kim 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
/*
 * Return true, if pre_bio's bdev is same as its target device.
 */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	int i;

	for (i = 0; i < sbi->s_ndevs; i++) {
		if (FDEV(i).start_blk <= blk_addr &&
					FDEV(i).end_blk >= blk_addr) {
			blk_addr -= FDEV(i).start_blk;
			bdev = FDEV(i).bdev;
			break;
		}
	}
	if (bio) {
		bio->bi_bdev = bdev;
		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
	}
	return bdev;
}

int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
	int i;

	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
			return i;
	return 0;
}

static bool __same_bdev(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
}

167 168 169 170 171 172 173 174
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
175
	bio = f2fs_bio_alloc(npages);
176

J
Jaegeuk Kim 已提交
177
	f2fs_target_device(sbi, blk_addr, bio);
178
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
179
	bio->bi_private = is_read ? NULL : sbi;
180 181 182 183

	return bio;
}

184 185
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
186
{
187
	if (!is_read_io(bio_op(bio))) {
188 189
		unsigned int start;

190
		if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
191
			current->plug && (type == DATA || type == NODE))
J
Jaegeuk Kim 已提交
192
			blk_finish_plug(current->plug);
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221

		if (type != DATA && type != NODE)
			goto submit_io;

		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
		start %= F2FS_IO_SIZE(sbi);

		if (start == 0)
			goto submit_io;

		/* fill dummy pages */
		for (; start < F2FS_IO_SIZE(sbi); start++) {
			struct page *page =
				mempool_alloc(sbi->write_io_dummy,
					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
			f2fs_bug_on(sbi, !page);

			SetPagePrivate(page);
			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
			lock_page(page);
			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
				f2fs_bug_on(sbi, 1);
		}
		/*
		 * In the NODE case, we lose next block address chain. So, we
		 * need to do checkpoint in f2fs_sync_file.
		 */
		if (type == NODE)
			set_sbi_flag(sbi, SBI_NEED_CP);
J
Jaegeuk Kim 已提交
222
	}
223
submit_io:
J
Jaegeuk Kim 已提交
224 225 226 227
	if (is_read_io(bio_op(bio)))
		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
	else
		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
228
	submit_bio(bio);
229 230
}

J
Jaegeuk Kim 已提交
231
static void __submit_merged_bio(struct f2fs_bio_info *io)
232
{
J
Jaegeuk Kim 已提交
233
	struct f2fs_io_info *fio = &io->fio;
234 235 236 237

	if (!io->bio)
		return;

J
Jaegeuk Kim 已提交
238 239
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

M
Mike Christie 已提交
240
	if (is_read_io(fio->op))
J
Jaegeuk Kim 已提交
241
		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
242
	else
J
Jaegeuk Kim 已提交
243
		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
M
Mike Christie 已提交
244

245
	__submit_bio(io->sbi, io->bio, fio->type);
246 247 248
	io->bio = NULL;
}

249 250
static bool __has_merged_page(struct f2fs_bio_info *io,
				struct inode *inode, nid_t ino, pgoff_t idx)
C
Chao Yu 已提交
251 252 253 254 255
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

256
	if (!io->bio)
C
Chao Yu 已提交
257
		return false;
258

259
	if (!inode && !ino)
260
		return true;
C
Chao Yu 已提交
261 262 263

	bio_for_each_segment_all(bvec, io->bio, i) {

264
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
265
			target = bvec->bv_page;
266 267
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
268

269 270 271
		if (idx != target->index)
			continue;

272 273 274
		if (inode && inode == target->mapping->host)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
275 276 277 278 279 280
			return true;
	}

	return false;
}

281
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
282
				nid_t ino, pgoff_t idx, enum page_type type)
283 284
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
285 286 287
	enum temp_type temp;
	struct f2fs_bio_info *io;
	bool ret = false;
288

J
Jaegeuk Kim 已提交
289 290 291 292 293 294
	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
		io = sbi->write_io[btype] + temp;

		down_read(&io->io_rwsem);
		ret = __has_merged_page(io, inode, ino, idx);
		up_read(&io->io_rwsem);
295

J
Jaegeuk Kim 已提交
296 297 298 299
		/* TODO: use HOT temp only for meta pages now. */
		if (ret || btype == META)
			break;
	}
300 301 302
	return ret;
}

303
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
304
				enum page_type type, enum temp_type temp)
305 306
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
307
	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
308

309
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
310 311 312 313

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
M
Mike Christie 已提交
314
		io->fio.op = REQ_OP_WRITE;
315
		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
316
		if (!test_opt(sbi, NOBARRIER))
317
			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
J
Jaegeuk Kim 已提交
318 319
	}
	__submit_merged_bio(io);
320
	up_write(&io->io_rwsem);
321 322
}

J
Jaegeuk Kim 已提交
323 324 325
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, nid_t ino, pgoff_t idx,
				enum page_type type, bool force)
326
{
J
Jaegeuk Kim 已提交
327 328 329 330 331 332 333 334 335 336 337 338 339
	enum temp_type temp;

	if (!force && !has_merged_page(sbi, inode, ino, idx, type))
		return;

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {

		__f2fs_submit_merged_write(sbi, type, temp);

		/* TODO: use HOT temp only for meta pages now. */
		if (type >= META)
			break;
	}
340 341
}

342
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
343
{
J
Jaegeuk Kim 已提交
344
	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
345 346
}

347
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
348
				struct inode *inode, nid_t ino, pgoff_t idx,
349
				enum page_type type)
350
{
J
Jaegeuk Kim 已提交
351
	__submit_merged_write_cond(sbi, inode, ino, idx, type, false);
352 353
}

354
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
355
{
356 357 358
	f2fs_submit_merged_write(sbi, DATA);
	f2fs_submit_merged_write(sbi, NODE);
	f2fs_submit_merged_write(sbi, META);
359 360
}

361 362
/*
 * Fill the locked page with data located in the block address.
363
 * A caller needs to unlock the page on failure.
364
 */
365
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
366 367
{
	struct bio *bio;
368 369
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
370

371
	trace_f2fs_submit_page_bio(page, fio);
372
	f2fs_trace_ios(fio, 0);
373 374

	/* Allocate a new bio */
M
Mike Christie 已提交
375
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
376

377
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
378 379 380
		bio_put(bio);
		return -EFAULT;
	}
M
Mike Christie 已提交
381
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
382

383
	__submit_bio(fio->sbi, bio, fio->type);
384 385 386

	if (!is_read_io(fio->op))
		inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
387 388 389
	return 0;
}

390
int f2fs_submit_page_write(struct f2fs_io_info *fio)
391
{
392
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
393
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
J
Jaegeuk Kim 已提交
394
	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
395
	struct page *bio_page;
396
	int err = 0;
397

398
	f2fs_bug_on(sbi, is_read_io(fio->op));
399

400 401 402 403 404 405 406 407 408 409 410 411 412
	down_write(&io->io_rwsem);
next:
	if (fio->in_list) {
		spin_lock(&io->io_lock);
		if (list_empty(&io->io_list)) {
			spin_unlock(&io->io_lock);
			goto out_fail;
		}
		fio = list_first_entry(&io->io_list,
						struct f2fs_io_info, list);
		list_del(&fio->list);
		spin_unlock(&io->io_lock);
	}
413

414 415 416
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
417

418 419
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

420 421 422
	/* set submitted = 1 as a return value */
	fio->submitted = 1;

423
	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
424

425
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
426 427
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
J
Jaegeuk Kim 已提交
428
		__submit_merged_bio(io);
429 430
alloc_new:
	if (io->bio == NULL) {
431 432 433
		if ((fio->type == DATA || fio->type == NODE) &&
				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
			err = -EAGAIN;
434
			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
435 436
			goto out_fail;
		}
437
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
438
						BIO_MAX_PAGES, false);
J
Jaegeuk Kim 已提交
439
		io->fio = *fio;
440 441
	}

J
Jaegeuk Kim 已提交
442
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
J
Jaegeuk Kim 已提交
443
		__submit_merged_bio(io);
444 445 446
		goto alloc_new;
	}

447
	io->last_block_in_bio = fio->new_blkaddr;
448
	f2fs_trace_ios(fio, 0);
449 450 451 452 453

	trace_f2fs_submit_page_write(fio->page, fio);

	if (fio->in_list)
		goto next;
454
out_fail:
455
	up_write(&io->io_rwsem);
456
	return err;
457 458
}

459 460 461 462 463 464 465 466 467 468
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}

J
Jaegeuk Kim 已提交
469
/*
470 471 472 473 474
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
475
void set_data_blkaddr(struct dnode_of_data *dn)
476
{
477 478 479
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
480
		dn->node_changed = true;
481 482
}

483 484 485 486 487 488 489
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

490 491
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
492
{
493
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
C
Chao Yu 已提交
494
	int err;
495

496 497 498
	if (!count)
		return 0;

499
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
500
		return -EPERM;
C
Chao Yu 已提交
501 502
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
503

504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr =
			datablock_addr(dn->node_page, dn->ofs_in_node);
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
521 522 523
	return 0;
}

524 525 526 527 528 529 530 531 532 533 534
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

535 536 537 538 539 540 541 542
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
543

544 545
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
546
	if (err || need_put)
547 548 549 550
		f2fs_put_dnode(dn);
	return err;
}

551
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
552
{
553
	struct extent_info ei  = {0,0,0};
554
	struct inode *inode = dn->inode;
555

556 557 558
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
559
	}
560

561
	return f2fs_reserve_block(dn, index);
562 563
}

564
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
M
Mike Christie 已提交
565
						int op_flags, bool for_write)
566 567 568 569
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
570
	struct extent_info ei = {0,0,0};
571
	int err;
572
	struct f2fs_io_info fio = {
573
		.sbi = F2FS_I_SB(inode),
574
		.type = DATA,
M
Mike Christie 已提交
575 576
		.op = REQ_OP_READ,
		.op_flags = op_flags,
577
		.encrypted_page = NULL,
578
	};
579

580 581 582
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

583
	page = f2fs_grab_cache_page(mapping, index, for_write);
584 585 586
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
587 588 589 590 591
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

592
	set_new_dnode(&dn, inode, NULL, NULL, 0);
593
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
594 595
	if (err)
		goto put_err;
596 597
	f2fs_put_dnode(&dn);

598
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
599 600
		err = -ENOENT;
		goto put_err;
601
	}
C
Chao Yu 已提交
602
got_it:
603 604
	if (PageUptodate(page)) {
		unlock_page(page);
605
		return page;
606
	}
607

J
Jaegeuk Kim 已提交
608 609 610 611 612 613 614
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
615
		zero_user_segment(page, 0, PAGE_SIZE);
616 617
		if (!PageUptodate(page))
			SetPageUptodate(page);
618
		unlock_page(page);
J
Jaegeuk Kim 已提交
619 620
		return page;
	}
621

622
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
623 624
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
625
	if (err)
626
		goto put_err;
627
	return page;
628 629 630 631

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
632 633 634 635 636 637 638 639 640 641 642 643
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

644
	page = get_read_data_page(inode, index, 0, false);
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
664 665
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
666 667 668 669
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
670
	page = get_read_data_page(inode, index, 0, for_write);
671 672
	if (IS_ERR(page))
		return page;
673

674
	/* wait for read completion */
675
	lock_page(page);
676
	if (unlikely(page->mapping != mapping)) {
677 678
		f2fs_put_page(page, 1);
		goto repeat;
679
	}
680 681 682 683
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
684 685 686
	return page;
}

J
Jaegeuk Kim 已提交
687
/*
688 689
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
690
 *
C
Chao Yu 已提交
691 692
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
693 694
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
695
 */
696
struct page *get_new_data_page(struct inode *inode,
697
		struct page *ipage, pgoff_t index, bool new_i_size)
698 699 700 701 702
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
703

704
	page = f2fs_grab_cache_page(mapping, index, true);
705 706 707 708 709 710
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
711
		return ERR_PTR(-ENOMEM);
712
	}
713

714
	set_new_dnode(&dn, inode, ipage, NULL, 0);
715
	err = f2fs_reserve_block(&dn, index);
716 717
	if (err) {
		f2fs_put_page(page, 1);
718
		return ERR_PTR(err);
719
	}
720 721
	if (!ipage)
		f2fs_put_dnode(&dn);
722 723

	if (PageUptodate(page))
724
		goto got_it;
725 726

	if (dn.data_blkaddr == NEW_ADDR) {
727
		zero_user_segment(page, 0, PAGE_SIZE);
728 729
		if (!PageUptodate(page))
			SetPageUptodate(page);
730
	} else {
731
		f2fs_put_page(page, 1);
732

733 734 735
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
736
		if (IS_ERR(page))
737
			return page;
738
	}
739
got_it:
C
Chao Yu 已提交
740
	if (new_i_size && i_size_read(inode) <
741
				((loff_t)(index + 1) << PAGE_SHIFT))
742
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
743 744 745
	return page;
}

746 747
static int __allocate_data_block(struct dnode_of_data *dn)
{
748
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
749 750
	struct f2fs_summary sum;
	struct node_info ni;
751
	pgoff_t fofs;
752
	blkcnt_t count = 1;
C
Chao Yu 已提交
753
	int err;
754

755
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
756
		return -EPERM;
757 758 759 760 761

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

C
Chao Yu 已提交
762 763
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
764

765
alloc:
766 767 768
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

769
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
770
					&sum, CURSEG_WARM_DATA, NULL, false);
771
	set_data_blkaddr(dn);
772

773
	/* update i_size */
774
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
775
							dn->ofs_in_node;
776
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
777
		f2fs_i_size_write(dn->inode,
778
				((loff_t)(fofs + 1) << PAGE_SHIFT));
779 780 781
	return 0;
}

J
Jaegeuk Kim 已提交
782 783 784 785 786 787 788
static inline bool __force_buffered_io(struct inode *inode, int rw)
{
	return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
			F2FS_I_SB(inode)->s_ndevs);
}

789
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
790
{
791
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
792
	struct f2fs_map_blocks map;
793
	int err = 0;
794

795 796 797
	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

798
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
799 800 801 802 803 804
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

805
	map.m_next_pgofs = NULL;
806

807
	if (iocb->ki_flags & IOCB_DIRECT) {
808 809 810
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
J
Jaegeuk Kim 已提交
811 812 813 814
		return f2fs_map_blocks(inode, &map, 1,
			__force_buffered_io(inode, WRITE) ?
				F2FS_GET_BLOCK_PRE_AIO :
				F2FS_GET_BLOCK_PRE_DIO);
815 816
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
817 818 819
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
820
	}
821 822
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
823
	return err;
824 825
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
		if (lock)
			down_read(&sbi->node_change);
		else
			up_read(&sbi->node_change);
	} else {
		if (lock)
			f2fs_lock_op(sbi);
		else
			f2fs_unlock_op(sbi);
	}
}

J
Jaegeuk Kim 已提交
841
/*
J
Jaegeuk Kim 已提交
842 843
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
844 845 846 847 848
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
849
 */
C
Chao Yu 已提交
850
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
851
						int create, int flag)
852
{
J
Jaegeuk Kim 已提交
853
	unsigned int maxblocks = map->m_len;
854
	struct dnode_of_data dn;
855
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
856
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
857
	pgoff_t pgofs, end_offset, end;
858
	int err = 0, ofs = 1;
859 860
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
861
	struct extent_info ei = {0,0,0};
862
	block_t blkaddr;
863

864 865 866
	if (!maxblocks)
		return 0;

J
Jaegeuk Kim 已提交
867 868 869 870 871
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
872
	end = pgofs + maxblocks;
873

874
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
875 876 877
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
878
		goto out;
879
	}
880

C
Chao Yu 已提交
881
next_dnode:
882
	if (create)
883
		__do_map_lock(sbi, flag, true);
884 885 886

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
887
	err = get_dnode_of_data(&dn, pgofs, mode);
888
	if (err) {
C
Chao Yu 已提交
889 890
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
891
		if (err == -ENOENT) {
892
			err = 0;
893 894 895 896
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
897
		goto unlock_out;
898
	}
C
Chao Yu 已提交
899

900
	prealloc = 0;
901
	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
902
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
903 904 905 906 907

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
908
		if (create) {
909 910
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
911
				goto sync_out;
912
			}
913
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
914 915 916 917
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
918 919
			} else {
				err = __allocate_data_block(&dn);
920
				if (!err)
921
					set_inode_flag(inode, FI_APPEND_WRITE);
922
			}
C
Chao Yu 已提交
923
			if (err)
C
Chao Yu 已提交
924
				goto sync_out;
925
			map->m_flags |= F2FS_MAP_NEW;
C
Chao Yu 已提交
926
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
927
		} else {
C
Chao Yu 已提交
928 929 930 931
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
932 933 934 935 936
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
937
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
938
						blkaddr != NEW_ADDR)
C
Chao Yu 已提交
939
				goto sync_out;
C
Chao Yu 已提交
940 941
		}
	}
942

943 944 945
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
946 947 948 949 950 951 952 953 954 955
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
956
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
957
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
958 959 960 961 962
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
963

964
skip:
965 966 967
	dn.ofs_in_node++;
	pgofs++;

968 969 970
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
971

972 973 974 975
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
976

977 978 979 980
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
981
		}
982 983 984 985 986 987 988 989 990 991 992
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

	f2fs_put_dnode(&dn);

	if (create) {
993
		__do_map_lock(sbi, flag, false);
994
		f2fs_balance_fs(sbi, dn.node_changed);
995
	}
996
	goto next_dnode;
997

998
sync_out:
999
	f2fs_put_dnode(&dn);
1000
unlock_out:
1001
	if (create) {
1002
		__do_map_lock(sbi, flag, false);
1003
		f2fs_balance_fs(sbi, dn.node_changed);
1004
	}
1005
out:
J
Jaegeuk Kim 已提交
1006
	trace_f2fs_map_blocks(inode, map, err);
1007
	return err;
1008 1009
}

J
Jaegeuk Kim 已提交
1010
static int __get_data_block(struct inode *inode, sector_t iblock,
1011 1012
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
1013 1014
{
	struct f2fs_map_blocks map;
1015
	int err;
J
Jaegeuk Kim 已提交
1016 1017 1018

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
1019
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
1020

1021 1022
	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
J
Jaegeuk Kim 已提交
1023 1024
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1025
		bh->b_size = (u64)map.m_len << inode->i_blkbits;
J
Jaegeuk Kim 已提交
1026
	}
1027
	return err;
J
Jaegeuk Kim 已提交
1028 1029
}

1030
static int get_data_block(struct inode *inode, sector_t iblock,
1031 1032
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
1033
{
1034 1035
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
1036 1037 1038
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
1039 1040
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
1041
	return __get_data_block(inode, iblock, bh_result, create,
1042
						F2FS_GET_BLOCK_DIO, NULL);
1043 1044
}

C
Chao Yu 已提交
1045
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1046 1047
			struct buffer_head *bh_result, int create)
{
1048
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
1049
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1050 1051
		return -EFBIG;

C
Chao Yu 已提交
1052
	return __get_data_block(inode, iblock, bh_result, create,
1053
						F2FS_GET_BLOCK_BMAP, NULL);
1054 1055
}

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
1066 1067 1068
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
1069 1070
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
1071
	pgoff_t next_pgofs;
1072 1073 1074 1075 1076 1077 1078 1079
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
1080 1081 1082 1083 1084 1085
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
1086
	inode_lock(inode);
1087

1088 1089 1090 1091 1092
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
1093

1094 1095 1096 1097
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
1098
	ret = get_data_block(inode, start_blk, &map_bh, 0,
1099
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1100 1101 1102 1103 1104
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
1105
		start_blk = next_pgofs;
1106 1107 1108

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
1109
			goto prep_next;
1110

1111 1112
		flags |= FIEMAP_EXTENT_LAST;
	}
1113

1114 1115 1116 1117
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

1118 1119
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
1120
	}
1121

1122 1123
	if (start_blk > last_blk || ret)
		goto out;
1124

1125 1126 1127 1128 1129 1130
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
1131

1132
	start_blk += logical_to_blk(inode, size);
1133

1134
prep_next:
1135 1136 1137 1138 1139 1140 1141 1142 1143
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
1144
	inode_unlock(inode);
1145
	return ret;
J
Jaegeuk Kim 已提交
1146 1147
}

1148 1149
static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
				 unsigned nr_pages)
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct fscrypt_ctx *ctx = NULL;
	struct bio *bio;

	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
		if (IS_ERR(ctx))
			return ERR_CAST(ctx);

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
	}

	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
	if (!bio) {
		if (ctx)
			fscrypt_release_ctx(ctx);
		return ERR_PTR(-ENOMEM);
	}
J
Jaegeuk Kim 已提交
1170
	f2fs_target_device(sbi, blkaddr, bio);
1171 1172 1173 1174 1175 1176
	bio->bi_end_io = f2fs_read_end_io;
	bio->bi_private = ctx;

	return bio;
}

J
Jaegeuk Kim 已提交
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1201
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
1202 1203 1204 1205

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		if (pages) {
1206
			page = list_last_entry(pages, struct page, lru);
1207 1208

			prefetchw(&page->flags);
J
Jaegeuk Kim 已提交
1209 1210
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1211 1212
						  page->index,
						  readahead_gfp_mask(mapping)))
J
Jaegeuk Kim 已提交
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1241
			if (f2fs_map_blocks(inode, &map, 0,
1242
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1255
			zero_user_segment(page, 0, PAGE_SIZE);
1256 1257
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
1258 1259 1260 1261 1262 1263 1264 1265
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
J
Jaegeuk Kim 已提交
1266 1267
		if (bio && (last_block_in_bio != block_nr - 1 ||
			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
J
Jaegeuk Kim 已提交
1268
submit_and_realloc:
1269
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1270 1271 1272
			bio = NULL;
		}
		if (bio == NULL) {
1273
			bio = f2fs_grab_bio(inode, block_nr, nr_pages);
J
Jaegeuk Kim 已提交
1274 1275
			if (IS_ERR(bio)) {
				bio = NULL;
J
Jaegeuk Kim 已提交
1276
				goto set_error_page;
1277
			}
M
Mike Christie 已提交
1278
			bio_set_op_attrs(bio, REQ_OP_READ, 0);
J
Jaegeuk Kim 已提交
1279 1280 1281 1282 1283 1284 1285 1286 1287
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1288
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1289 1290 1291 1292
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1293
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1294 1295 1296 1297 1298
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1299
			put_page(page);
J
Jaegeuk Kim 已提交
1300 1301 1302
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1303
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1304 1305 1306
	return 0;
}

1307 1308
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1309
	struct inode *inode = page->mapping->host;
1310
	int ret = -EAGAIN;
H
Huajun Li 已提交
1311

1312 1313
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1314
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1315 1316
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1317
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1318
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1319
	return ret;
1320 1321 1322 1323 1324 1325
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1326
	struct inode *inode = file->f_mapping->host;
1327
	struct page *page = list_last_entry(pages, struct page, lru);
1328 1329

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1330 1331 1332 1333 1334

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1335
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1336 1337
}

1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
static int encrypt_one_page(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;
	gfp_t gfp_flags = GFP_NOFS;

	if (!f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
		return 0;

	/* wait for GCed encrypted page writeback */
	f2fs_wait_on_encrypted_page_writeback(fio->sbi, fio->old_blkaddr);

retry_encrypt:
	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
			PAGE_SIZE, 0, fio->page->index, gfp_flags);
	if (!IS_ERR(fio->encrypted_page))
		return 0;

	/* flush pending IOs and wait for a while in the ENOMEM case */
	if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1357
		f2fs_flush_merged_writes(fio->sbi);
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
		congestion_wait(BLK_RW_ASYNC, HZ/50);
		gfp_flags |= __GFP_NOFAIL;
		goto retry_encrypt;
	}
	return PTR_ERR(fio->encrypted_page);
}

static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;

	if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
		return false;
	if (is_cold_data(fio->page))
		return false;
	if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
		return false;

	return need_inplace_update_policy(inode, fio);
}

1379 1380 1381 1382 1383 1384 1385 1386 1387
static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
{
	if (fio->old_blkaddr == NEW_ADDR)
		return false;
	if (fio->old_blkaddr == NULL_ADDR)
		return false;
	return true;
}

1388
int do_write_data_page(struct f2fs_io_info *fio)
1389
{
1390
	struct page *page = fio->page;
1391 1392
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
1393 1394
	struct extent_info ei = {0,0,0};
	bool ipu_force = false;
1395 1396 1397
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1398 1399 1400
	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1401 1402

		if (valid_ipu_blkaddr(fio)) {
1403
			ipu_force = true;
1404
			fio->need_lock = LOCK_DONE;
1405 1406 1407
			goto got_it;
		}
	}
1408

1409 1410 1411
	/* Deadlock due to between page->lock and f2fs_lock_op */
	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
		return -EAGAIN;
1412

1413
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1414
	if (err)
1415
		goto out;
1416

1417
	fio->old_blkaddr = dn.data_blkaddr;
1418 1419

	/* This page is already truncated */
1420
	if (fio->old_blkaddr == NULL_ADDR) {
1421
		ClearPageUptodate(page);
1422
		goto out_writepage;
1423
	}
1424
got_it:
1425 1426 1427 1428
	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1429
	if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
1430 1431 1432 1433 1434
		err = encrypt_one_page(fio);
		if (err)
			goto out_writepage;

		set_page_writeback(page);
1435
		f2fs_put_dnode(&dn);
1436
		if (fio->need_lock == LOCK_REQ)
1437
			f2fs_unlock_op(fio->sbi);
1438
		err = rewrite_data_page(fio);
1439
		trace_f2fs_do_write_data_page(fio->page, IPU);
1440
		set_inode_flag(inode, FI_UPDATE_WRITE);
1441
		return err;
1442
	}
1443

1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
	if (fio->need_lock == LOCK_RETRY) {
		if (!f2fs_trylock_op(fio->sbi)) {
			err = -EAGAIN;
			goto out_writepage;
		}
		fio->need_lock = LOCK_REQ;
	}

	err = encrypt_one_page(fio);
	if (err)
		goto out_writepage;

	set_page_writeback(page);

1458 1459 1460 1461 1462 1463
	/* LFS mode write path */
	write_data_page(&dn, fio);
	trace_f2fs_do_write_data_page(page, OPU);
	set_inode_flag(inode, FI_APPEND_WRITE);
	if (page->index == 0)
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1464 1465
out_writepage:
	f2fs_put_dnode(&dn);
1466
out:
1467
	if (fio->need_lock == LOCK_REQ)
1468
		f2fs_unlock_op(fio->sbi);
1469 1470 1471
	return err;
}

1472 1473
static int __write_data_page(struct page *page, bool *submitted,
				struct writeback_control *wbc)
1474 1475
{
	struct inode *inode = page->mapping->host;
1476
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1477 1478
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1479
							>> PAGE_SHIFT;
1480
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
H
Huajun Li 已提交
1481
	unsigned offset = 0;
1482
	bool need_balance_fs = false;
1483
	int err = 0;
J
Jaegeuk Kim 已提交
1484
	struct f2fs_io_info fio = {
1485
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1486
		.type = DATA,
M
Mike Christie 已提交
1487
		.op = REQ_OP_WRITE,
J
Jens Axboe 已提交
1488
		.op_flags = wbc_to_write_flags(wbc),
1489
		.old_blkaddr = NULL_ADDR,
1490
		.page = page,
1491
		.encrypted_page = NULL,
1492
		.submitted = false,
1493
		.need_lock = LOCK_RETRY,
J
Jaegeuk Kim 已提交
1494
	};
1495

1496 1497
	trace_f2fs_writepage(page, DATA);

1498 1499 1500
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;

1501
	if (page->index < end_index)
1502
		goto write;
1503 1504 1505 1506 1507

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1508
	offset = i_size & (PAGE_SIZE - 1);
1509
	if ((page->index >= end_index + 1) || !offset)
1510
		goto out;
1511

1512
	zero_user_segment(page, offset, PAGE_SIZE);
1513
write:
1514 1515
	if (f2fs_is_drop_cache(inode))
		goto out;
1516 1517 1518 1519
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1520
		goto redirty_out;
1521

1522 1523
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
J
Jaegeuk Kim 已提交
1524
		mapping_set_error(page->mapping, -EIO);
1525
		goto out;
1526 1527
	}

1528
	/* Dentry blocks are controlled by checkpoint */
1529
	if (S_ISDIR(inode->i_mode)) {
1530
		fio.need_lock = LOCK_DONE;
1531
		err = do_write_data_page(&fio);
1532 1533
		goto done;
	}
H
Huajun Li 已提交
1534

1535
	if (!wbc->for_reclaim)
1536
		need_balance_fs = true;
1537
	else if (has_not_enough_free_secs(sbi, 0, 0))
1538
		goto redirty_out;
1539 1540
	else
		set_inode_flag(inode, FI_HOT_DATA);
1541

1542
	err = -EAGAIN;
1543
	if (f2fs_has_inline_data(inode)) {
1544
		err = f2fs_write_inline_data(inode, page);
1545 1546 1547
		if (!err)
			goto out;
	}
1548

1549
	if (err == -EAGAIN) {
1550
		err = do_write_data_page(&fio);
1551 1552 1553 1554 1555
		if (err == -EAGAIN) {
			fio.need_lock = LOCK_REQ;
			err = do_write_data_page(&fio);
		}
	}
1556 1557
	if (F2FS_I(inode)->last_disk_size < psize)
		F2FS_I(inode)->last_disk_size = psize;
1558

1559 1560 1561
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1562

1563
out:
1564
	inode_dec_dirty_pages(inode);
1565 1566
	if (err)
		ClearPageUptodate(page);
1567 1568

	if (wbc->for_reclaim) {
1569
		f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
1570
		clear_inode_flag(inode, FI_HOT_DATA);
1571
		remove_dirty_inode(inode);
1572
		submitted = NULL;
1573 1574
	}

1575
	unlock_page(page);
J
Jaegeuk Kim 已提交
1576 1577
	if (!S_ISDIR(inode->i_mode))
		f2fs_balance_fs(sbi, need_balance_fs);
1578

1579
	if (unlikely(f2fs_cp_error(sbi))) {
1580
		f2fs_submit_merged_write(sbi, DATA);
1581 1582 1583 1584 1585
		submitted = NULL;
	}

	if (submitted)
		*submitted = fio.submitted;
1586

1587 1588 1589
	return 0;

redirty_out:
1590
	redirty_page_for_writepage(wbc, page);
1591 1592
	if (!err)
		return AOP_WRITEPAGE_ACTIVATE;
J
Jaegeuk Kim 已提交
1593 1594
	unlock_page(page);
	return err;
1595 1596
}

1597 1598 1599
static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
1600
	return __write_data_page(page, NULL, wbc);
1601 1602
}

C
Chao Yu 已提交
1603 1604 1605 1606 1607 1608
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
J
Jaegeuk Kim 已提交
1609
					struct writeback_control *wbc)
C
Chao Yu 已提交
1610 1611 1612 1613 1614 1615 1616 1617 1618
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
1619
	pgoff_t last_idx = ULONG_MAX;
C
Chao Yu 已提交
1620 1621 1622 1623 1624
	int cycled;
	int range_whole = 0;
	int tag;

	pagevec_init(&pvec, 0);
1625

1626 1627 1628 1629 1630 1631
	if (get_dirty_pages(mapping->host) <=
				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
		set_inode_flag(mapping->host, FI_HOT_DATA);
	else
		clear_inode_flag(mapping->host, FI_HOT_DATA);

C
Chao Yu 已提交
1632 1633 1634 1635 1636 1637 1638 1639 1640
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1641 1642
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
1665
			bool submitted = false;
C
Chao Yu 已提交
1666 1667 1668 1669 1670 1671 1672

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;
1673
retry_write:
C
Chao Yu 已提交
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1689 1690
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1691 1692 1693 1694 1695 1696 1697 1698
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

1699
			ret = __write_data_page(page, &submitted, wbc);
C
Chao Yu 已提交
1700
			if (unlikely(ret)) {
1701 1702 1703 1704 1705 1706 1707 1708
				/*
				 * keep nr_to_write, since vfs uses this to
				 * get # of written pages.
				 */
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
					continue;
1709 1710 1711 1712 1713 1714 1715 1716 1717
				} else if (ret == -EAGAIN) {
					ret = 0;
					if (wbc->sync_mode == WB_SYNC_ALL) {
						cond_resched();
						congestion_wait(BLK_RW_ASYNC,
									HZ/50);
						goto retry_write;
					}
					continue;
1718
				}
J
Jaegeuk Kim 已提交
1719 1720 1721
				done_index = page->index + 1;
				done = 1;
				break;
1722
			} else if (submitted) {
1723
				last_idx = page->index;
C
Chao Yu 已提交
1724 1725
			}

1726 1727 1728 1729
			/* give a priority to WB_SYNC threads */
			if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
					--wbc->nr_to_write <= 0) &&
					wbc->sync_mode == WB_SYNC_NONE) {
C
Chao Yu 已提交
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

1747
	if (last_idx != ULONG_MAX)
1748 1749
		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
						0, last_idx, DATA);
C
Chao Yu 已提交
1750

C
Chao Yu 已提交
1751 1752 1753
	return ret;
}

1754
static int f2fs_write_data_pages(struct address_space *mapping,
1755 1756 1757
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1758
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1759
	struct blk_plug plug;
1760 1761
	int ret;

P
P J P 已提交
1762 1763 1764 1765
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1766 1767 1768 1769
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1770 1771 1772 1773
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

1774 1775 1776 1777 1778
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1779
	/* skip writing during file defragment */
1780
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
C
Chao Yu 已提交
1781 1782
		goto skip_write;

Y
Yunlei He 已提交
1783 1784
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1785 1786 1787 1788 1789 1790
	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_inc(&sbi->wb_sync_req);
	else if (atomic_read(&sbi->wb_sync_req))
		goto skip_write;

1791
	blk_start_plug(&plug);
J
Jaegeuk Kim 已提交
1792
	ret = f2fs_write_cache_pages(mapping, wbc);
1793
	blk_finish_plug(&plug);
1794 1795 1796

	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_dec(&sbi->wb_sync_req);
1797 1798 1799 1800
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
J
Jaegeuk Kim 已提交
1801

1802
	remove_dirty_inode(inode);
1803
	return ret;
1804 1805

skip_write:
1806
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1807
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1808
	return 0;
1809 1810
}

1811 1812 1813
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1814
	loff_t i_size = i_size_read(inode);
1815

J
Jaegeuk Kim 已提交
1816
	if (to > i_size) {
1817
		down_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
1818 1819
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1820
		up_write(&F2FS_I(inode)->i_mmap_sem);
1821 1822 1823
	}
}

1824 1825 1826 1827 1828 1829 1830 1831
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1832
	bool locked = false;
1833
	struct extent_info ei = {0,0,0};
1834 1835
	int err = 0;

1836 1837 1838 1839
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
1840 1841
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
			!is_inode_flag_set(inode, FI_NO_PREALLOC))
1842 1843
		return 0;

1844
	if (f2fs_has_inline_data(inode) ||
1845
			(pos & PAGE_MASK) >= i_size_read(inode)) {
1846
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
1847 1848 1849
		locked = true;
	}
restart:
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
1862
			set_inode_flag(inode, FI_DATA_EXIST);
1863 1864
			if (inode->i_nlink)
				set_inline_node(ipage);
1865 1866 1867
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1880
			if (err || dn.data_blkaddr == NULL_ADDR) {
1881
				f2fs_put_dnode(&dn);
1882 1883
				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
								true);
1884 1885 1886
				locked = true;
				goto restart;
			}
1887 1888
		}
	}
1889

1890 1891 1892
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1893
out:
1894 1895
	f2fs_put_dnode(&dn);
unlock_out:
1896
	if (locked)
1897
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
1898 1899 1900
	return err;
}

1901 1902 1903 1904 1905
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1906
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1907
	struct page *page = NULL;
1908
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1909 1910
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1911 1912
	int err = 0;

1913 1914
	trace_f2fs_write_begin(inode, pos, len, flags);

1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1925
repeat:
1926 1927 1928 1929 1930 1931
	/*
	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
	 * wait_for_stable_page. Will wait that below with our IO control.
	 */
	page = pagecache_get_page(mapping, index,
				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
1932 1933 1934 1935
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1936

1937 1938
	*pagep = page;

1939 1940
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1941
	if (err)
1942
		goto fail;
1943

1944
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
1945
		unlock_page(page);
J
Jaegeuk Kim 已提交
1946
		f2fs_balance_fs(sbi, true);
1947 1948 1949 1950 1951 1952 1953 1954
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1955
	f2fs_wait_on_page_writeback(page, DATA, false);
1956

1957 1958
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1959
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1960

1961 1962
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
1963

1964 1965 1966 1967 1968
	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
		zero_user_segment(page, len, PAGE_SIZE);
		return 0;
	}

1969
	if (blkaddr == NEW_ADDR) {
1970
		zero_user_segment(page, 0, PAGE_SIZE);
1971
		SetPageUptodate(page);
1972
	} else {
1973
		struct bio *bio;
1974

1975 1976 1977
		bio = f2fs_grab_bio(inode, blkaddr, 1);
		if (IS_ERR(bio)) {
			err = PTR_ERR(bio);
1978
			goto fail;
1979
		}
1980
		bio->bi_opf = REQ_OP_READ;
1981 1982 1983 1984 1985 1986
		if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
			bio_put(bio);
			err = -EFAULT;
			goto fail;
		}

1987
		__submit_bio(sbi, bio, DATA);
1988

1989
		lock_page(page);
1990
		if (unlikely(page->mapping != mapping)) {
1991 1992
			f2fs_put_page(page, 1);
			goto repeat;
1993
		}
1994 1995 1996
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
1997
		}
1998 1999
	}
	return 0;
2000

2001
fail:
2002
	f2fs_put_page(page, 1);
2003 2004
	f2fs_write_failed(mapping, pos + len);
	return err;
2005 2006
}

2007 2008 2009 2010 2011 2012 2013
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

2014 2015
	trace_f2fs_write_end(inode, pos, len, copied);

2016 2017 2018 2019 2020 2021
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
2022
		if (unlikely(copied != len))
2023 2024 2025 2026 2027 2028 2029
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

2030
	set_page_dirty(page);
2031

2032 2033
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
2034
unlock_out:
2035
	f2fs_put_page(page, 1);
2036
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2037 2038 2039
	return copied;
}

2040 2041
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
2042 2043 2044 2045 2046 2047
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
2048 2049 2050
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

2051 2052 2053
	return 0;
}

2054
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2055
{
2056
	struct address_space *mapping = iocb->ki_filp->f_mapping;
2057 2058
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
2059
	loff_t offset = iocb->ki_pos;
2060
	int rw = iov_iter_rw(iter);
2061
	int err;
2062

2063
	err = check_direct_IO(inode, iter, offset);
2064 2065
	if (err)
		return err;
H
Huajun Li 已提交
2066

J
Jaegeuk Kim 已提交
2067
	if (__force_buffered_io(inode, rw))
2068
		return 0;
2069

2070
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2071

2072
	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
2073
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
2074 2075 2076
	up_read(&F2FS_I(inode)->dio_rwsem[rw]);

	if (rw == WRITE) {
2077
		if (err > 0)
2078
			set_inode_flag(inode, FI_UPDATE_WRITE);
2079 2080 2081
		else if (err < 0)
			f2fs_write_failed(mapping, offset + count);
	}
2082

2083
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2084

2085
	return err;
2086 2087
}

2088 2089
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
2090 2091
{
	struct inode *inode = page->mapping->host;
2092
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2093

2094
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2095
		(offset % PAGE_SIZE || length != PAGE_SIZE))
2096 2097
		return;

2098
	if (PageDirty(page)) {
2099
		if (inode->i_ino == F2FS_META_INO(sbi)) {
2100
			dec_page_count(sbi, F2FS_DIRTY_META);
2101
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2102
			dec_page_count(sbi, F2FS_DIRTY_NODES);
2103
		} else {
2104
			inode_dec_dirty_pages(inode);
2105 2106
			remove_dirty_inode(inode);
		}
2107
	}
C
Chao Yu 已提交
2108 2109 2110

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
2111
		return drop_inmem_page(inode, page);
C
Chao Yu 已提交
2112

2113
	set_page_private(page, 0);
2114 2115 2116
	ClearPagePrivate(page);
}

2117
int f2fs_release_page(struct page *page, gfp_t wait)
2118
{
2119 2120 2121 2122
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
2123 2124 2125 2126
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

2127
	set_page_private(page, 0);
2128
	ClearPagePrivate(page);
2129
	return 1;
2130 2131
}

2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
/*
 * This was copied from __set_page_dirty_buffers which gives higher performance
 * in very high speed storages. (e.g., pmem)
 */
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
	struct address_space *mapping = page->mapping;
	unsigned long flags;

	if (unlikely(!mapping))
		return;

	spin_lock(&mapping->private_lock);
	lock_page_memcg(page);
	SetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	spin_lock_irqsave(&mapping->tree_lock, flags);
	WARN_ON_ONCE(!PageUptodate(page));
	account_page_dirtied(page, mapping);
	radix_tree_tag_set(&mapping->page_tree,
			page_index(page), PAGECACHE_TAG_DIRTY);
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
	unlock_page_memcg(page);

	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	return;
}

2161 2162 2163 2164 2165
static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

2166 2167
	trace_f2fs_set_page_dirty(page, DATA);

2168 2169
	if (!PageUptodate(page))
		SetPageUptodate(page);
2170

C
Chao Yu 已提交
2171
	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
C
Chao Yu 已提交
2172 2173 2174 2175 2176 2177 2178 2179 2180
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
2181 2182
	}

2183
	if (!PageDirty(page)) {
2184
		f2fs_set_page_dirty_nobuffers(page);
2185
		update_dirty_page(inode, page);
2186 2187 2188 2189 2190
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
2191 2192
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
2193 2194
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
2195 2196 2197 2198 2199 2200 2201
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
2202
	return generic_block_bmap(mapping, block, get_data_block_bmap);
2203 2204
}

2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
2218 2219 2220 2221 2222 2223
	if (atomic_written) {
		if (mode != MIGRATE_SYNC)
			return -EBUSY;
		if (!mutex_trylock(&fi->inmem_lock))
			return -EAGAIN;
	}
2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

	migrate_page_copy(newpage, page);

	return MIGRATEPAGE_SUCCESS;
}
#endif

2261 2262 2263 2264 2265 2266
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
2267
	.write_end	= f2fs_write_end,
2268
	.set_page_dirty	= f2fs_set_data_page_dirty,
2269 2270
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2271
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
2272
	.bmap		= f2fs_bmap,
2273 2274 2275
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
2276
};