data.c 53.8 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
22 23
#include <linux/mm.h>
#include <linux/memcontrol.h>
J
Jaegeuk Kim 已提交
24
#include <linux/cleancache.h>
25
#include <linux/sched/signal.h>
26 27 28 29

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
30
#include "trace.h"
31
#include <trace/events/f2fs.h>
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
static bool __is_cp_guaranteed(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct f2fs_sb_info *sbi;

	if (!mapping)
		return false;

	inode = mapping->host;
	sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_META_INO(sbi) ||
			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
			S_ISDIR(inode->i_mode) ||
			is_cold_data(page))
		return true;
	return false;
}

53
static void f2fs_read_end_io(struct bio *bio)
54
{
55 56
	struct bio_vec *bvec;
	int i;
57

C
Chao Yu 已提交
58
#ifdef CONFIG_F2FS_FAULT_INJECTION
59 60
	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
		f2fs_show_injection_info(FAULT_IO);
61
		bio->bi_status = BLK_STS_IOERR;
62
	}
C
Chao Yu 已提交
63 64
#endif

65
	if (f2fs_bio_encrypted(bio)) {
66
		if (bio->bi_status) {
67
			fscrypt_release_ctx(bio->bi_private);
68
		} else {
69
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
70 71 72 73
			return;
		}
	}

74 75
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
76

77
		if (!bio->bi_status) {
78 79
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
80 81 82 83 84 85 86 87 88
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

89
static void f2fs_write_end_io(struct bio *bio)
90
{
91
	struct f2fs_sb_info *sbi = bio->bi_private;
92 93
	struct bio_vec *bvec;
	int i;
94

95
	bio_for_each_segment_all(bvec, bio, i) {
96
		struct page *page = bvec->bv_page;
97
		enum count_type type = WB_DATA_TYPE(page);
98

99 100 101 102 103 104
		if (IS_DUMMY_WRITTEN_PAGE(page)) {
			set_page_private(page, (unsigned long)NULL);
			ClearPagePrivate(page);
			unlock_page(page);
			mempool_free(page, sbi->write_io_dummy);

105
			if (unlikely(bio->bi_status))
106 107 108 109
				f2fs_stop_checkpoint(sbi, true);
			continue;
		}

110
		fscrypt_pullback_bio_page(&page, true);
111

112
		if (unlikely(bio->bi_status)) {
113
			mapping_set_error(page->mapping, -EIO);
114
			f2fs_stop_checkpoint(sbi, true);
115
		}
116 117
		dec_page_count(sbi, type);
		clear_cold_data(page);
118
		end_page_writeback(page);
119
	}
120
	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
121
				wq_has_sleeper(&sbi->cp_wait))
122 123 124 125 126
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

J
Jaegeuk Kim 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
/*
 * Return true, if pre_bio's bdev is same as its target device.
 */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	int i;

	for (i = 0; i < sbi->s_ndevs; i++) {
		if (FDEV(i).start_blk <= blk_addr &&
					FDEV(i).end_blk >= blk_addr) {
			blk_addr -= FDEV(i).start_blk;
			bdev = FDEV(i).bdev;
			break;
		}
	}
	if (bio) {
		bio->bi_bdev = bdev;
		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
	}
	return bdev;
}

int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
	int i;

	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
			return i;
	return 0;
}

static bool __same_bdev(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
}

167 168 169 170 171 172 173 174
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
175
	bio = f2fs_bio_alloc(npages);
176

J
Jaegeuk Kim 已提交
177
	f2fs_target_device(sbi, blk_addr, bio);
178
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
179
	bio->bi_private = is_read ? NULL : sbi;
180 181 182 183

	return bio;
}

184 185
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
186
{
187
	if (!is_read_io(bio_op(bio))) {
188 189
		unsigned int start;

190
		if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
191
			current->plug && (type == DATA || type == NODE))
J
Jaegeuk Kim 已提交
192
			blk_finish_plug(current->plug);
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221

		if (type != DATA && type != NODE)
			goto submit_io;

		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
		start %= F2FS_IO_SIZE(sbi);

		if (start == 0)
			goto submit_io;

		/* fill dummy pages */
		for (; start < F2FS_IO_SIZE(sbi); start++) {
			struct page *page =
				mempool_alloc(sbi->write_io_dummy,
					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
			f2fs_bug_on(sbi, !page);

			SetPagePrivate(page);
			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
			lock_page(page);
			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
				f2fs_bug_on(sbi, 1);
		}
		/*
		 * In the NODE case, we lose next block address chain. So, we
		 * need to do checkpoint in f2fs_sync_file.
		 */
		if (type == NODE)
			set_sbi_flag(sbi, SBI_NEED_CP);
J
Jaegeuk Kim 已提交
222
	}
223
submit_io:
J
Jaegeuk Kim 已提交
224 225 226 227
	if (is_read_io(bio_op(bio)))
		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
	else
		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
228
	submit_bio(bio);
229 230
}

J
Jaegeuk Kim 已提交
231
static void __submit_merged_bio(struct f2fs_bio_info *io)
232
{
J
Jaegeuk Kim 已提交
233
	struct f2fs_io_info *fio = &io->fio;
234 235 236 237

	if (!io->bio)
		return;

J
Jaegeuk Kim 已提交
238 239
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

M
Mike Christie 已提交
240
	if (is_read_io(fio->op))
J
Jaegeuk Kim 已提交
241
		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
242
	else
J
Jaegeuk Kim 已提交
243
		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
M
Mike Christie 已提交
244

245
	__submit_bio(io->sbi, io->bio, fio->type);
246 247 248
	io->bio = NULL;
}

249 250
static bool __has_merged_page(struct f2fs_bio_info *io,
				struct inode *inode, nid_t ino, pgoff_t idx)
C
Chao Yu 已提交
251 252 253 254 255
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

256
	if (!io->bio)
C
Chao Yu 已提交
257
		return false;
258

259
	if (!inode && !ino)
260
		return true;
C
Chao Yu 已提交
261 262 263

	bio_for_each_segment_all(bvec, io->bio, i) {

264
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
265
			target = bvec->bv_page;
266 267
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
268

269 270 271
		if (idx != target->index)
			continue;

272 273 274
		if (inode && inode == target->mapping->host)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
275 276 277 278 279 280
			return true;
	}

	return false;
}

281
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
282
				nid_t ino, pgoff_t idx, enum page_type type)
283 284
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
285 286 287
	enum temp_type temp;
	struct f2fs_bio_info *io;
	bool ret = false;
288

J
Jaegeuk Kim 已提交
289 290 291 292 293 294
	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
		io = sbi->write_io[btype] + temp;

		down_read(&io->io_rwsem);
		ret = __has_merged_page(io, inode, ino, idx);
		up_read(&io->io_rwsem);
295

J
Jaegeuk Kim 已提交
296 297 298 299
		/* TODO: use HOT temp only for meta pages now. */
		if (ret || btype == META)
			break;
	}
300 301 302
	return ret;
}

303
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
304
				enum page_type type, enum temp_type temp)
305 306
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
307
	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
308

309
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
310 311 312 313

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
M
Mike Christie 已提交
314
		io->fio.op = REQ_OP_WRITE;
315
		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
316
		if (!test_opt(sbi, NOBARRIER))
317
			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
J
Jaegeuk Kim 已提交
318 319
	}
	__submit_merged_bio(io);
320
	up_write(&io->io_rwsem);
321 322
}

J
Jaegeuk Kim 已提交
323 324 325
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, nid_t ino, pgoff_t idx,
				enum page_type type, bool force)
326
{
J
Jaegeuk Kim 已提交
327 328 329 330 331 332 333 334 335 336 337 338 339
	enum temp_type temp;

	if (!force && !has_merged_page(sbi, inode, ino, idx, type))
		return;

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {

		__f2fs_submit_merged_write(sbi, type, temp);

		/* TODO: use HOT temp only for meta pages now. */
		if (type >= META)
			break;
	}
340 341
}

342
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
343
{
J
Jaegeuk Kim 已提交
344
	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
345 346
}

347
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
348
				struct inode *inode, nid_t ino, pgoff_t idx,
349
				enum page_type type)
350
{
J
Jaegeuk Kim 已提交
351
	__submit_merged_write_cond(sbi, inode, ino, idx, type, false);
352 353
}

354
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
355
{
356 357 358
	f2fs_submit_merged_write(sbi, DATA);
	f2fs_submit_merged_write(sbi, NODE);
	f2fs_submit_merged_write(sbi, META);
359 360
}

361 362
/*
 * Fill the locked page with data located in the block address.
363
 * A caller needs to unlock the page on failure.
364
 */
365
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
366 367
{
	struct bio *bio;
368 369
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
370

371
	trace_f2fs_submit_page_bio(page, fio);
372
	f2fs_trace_ios(fio, 0);
373 374

	/* Allocate a new bio */
M
Mike Christie 已提交
375
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
376

377
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
378 379 380
		bio_put(bio);
		return -EFAULT;
	}
M
Mike Christie 已提交
381
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
382

383
	__submit_bio(fio->sbi, bio, fio->type);
384 385 386

	if (!is_read_io(fio->op))
		inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
387 388 389
	return 0;
}

390
int f2fs_submit_page_write(struct f2fs_io_info *fio)
391
{
392
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
393
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
J
Jaegeuk Kim 已提交
394
	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
395
	struct page *bio_page;
396
	int err = 0;
397

398
	f2fs_bug_on(sbi, is_read_io(fio->op));
399

400 401 402 403 404 405 406 407 408 409 410 411 412
	down_write(&io->io_rwsem);
next:
	if (fio->in_list) {
		spin_lock(&io->io_lock);
		if (list_empty(&io->io_list)) {
			spin_unlock(&io->io_lock);
			goto out_fail;
		}
		fio = list_first_entry(&io->io_list,
						struct f2fs_io_info, list);
		list_del(&fio->list);
		spin_unlock(&io->io_lock);
	}
413

414 415 416
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
417

418 419
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

420 421 422
	/* set submitted = 1 as a return value */
	fio->submitted = 1;

423
	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
424

425
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
426 427
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
J
Jaegeuk Kim 已提交
428
		__submit_merged_bio(io);
429 430
alloc_new:
	if (io->bio == NULL) {
431 432 433
		if ((fio->type == DATA || fio->type == NODE) &&
				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
			err = -EAGAIN;
434
			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
435 436
			goto out_fail;
		}
437
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
438
						BIO_MAX_PAGES, false);
J
Jaegeuk Kim 已提交
439
		io->fio = *fio;
440 441
	}

J
Jaegeuk Kim 已提交
442
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
J
Jaegeuk Kim 已提交
443
		__submit_merged_bio(io);
444 445 446
		goto alloc_new;
	}

447
	io->last_block_in_bio = fio->new_blkaddr;
448
	f2fs_trace_ios(fio, 0);
449 450 451 452 453

	trace_f2fs_submit_page_write(fio->page, fio);

	if (fio->in_list)
		goto next;
454
out_fail:
455
	up_write(&io->io_rwsem);
456
	return err;
457 458
}

459 460 461 462
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;
463 464 465 466
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
467 468 469

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
470
	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
471 472
}

J
Jaegeuk Kim 已提交
473
/*
474 475 476 477 478
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
479
void set_data_blkaddr(struct dnode_of_data *dn)
480
{
481 482 483
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
484
		dn->node_changed = true;
485 486
}

487 488 489 490 491 492 493
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

494 495
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
496
{
497
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
C
Chao Yu 已提交
498
	int err;
499

500 501 502
	if (!count)
		return 0;

503
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
504
		return -EPERM;
C
Chao Yu 已提交
505 506
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
507

508 509 510 511 512 513
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
514 515
		block_t blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
516 517 518 519 520 521 522 523 524
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
525 526 527
	return 0;
}

528 529 530 531 532 533 534 535 536 537 538
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

539 540 541 542 543 544 545 546
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
547

548 549
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
550
	if (err || need_put)
551 552 553 554
		f2fs_put_dnode(dn);
	return err;
}

555
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
556
{
557
	struct extent_info ei  = {0,0,0};
558
	struct inode *inode = dn->inode;
559

560 561 562
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
563
	}
564

565
	return f2fs_reserve_block(dn, index);
566 567
}

568
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
M
Mike Christie 已提交
569
						int op_flags, bool for_write)
570 571 572 573
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
574
	struct extent_info ei = {0,0,0};
575
	int err;
576
	struct f2fs_io_info fio = {
577
		.sbi = F2FS_I_SB(inode),
578
		.type = DATA,
M
Mike Christie 已提交
579 580
		.op = REQ_OP_READ,
		.op_flags = op_flags,
581
		.encrypted_page = NULL,
582
	};
583

584 585 586
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

587
	page = f2fs_grab_cache_page(mapping, index, for_write);
588 589 590
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
591 592 593 594 595
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

596
	set_new_dnode(&dn, inode, NULL, NULL, 0);
597
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
598 599
	if (err)
		goto put_err;
600 601
	f2fs_put_dnode(&dn);

602
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
603 604
		err = -ENOENT;
		goto put_err;
605
	}
C
Chao Yu 已提交
606
got_it:
607 608
	if (PageUptodate(page)) {
		unlock_page(page);
609
		return page;
610
	}
611

J
Jaegeuk Kim 已提交
612 613 614 615 616 617 618
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
619
		zero_user_segment(page, 0, PAGE_SIZE);
620 621
		if (!PageUptodate(page))
			SetPageUptodate(page);
622
		unlock_page(page);
J
Jaegeuk Kim 已提交
623 624
		return page;
	}
625

626
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
627 628
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
629
	if (err)
630
		goto put_err;
631
	return page;
632 633 634 635

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
636 637 638 639 640 641 642 643 644 645 646 647
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

648
	page = get_read_data_page(inode, index, 0, false);
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
668 669
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
670 671 672 673
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
674
	page = get_read_data_page(inode, index, 0, for_write);
675 676
	if (IS_ERR(page))
		return page;
677

678
	/* wait for read completion */
679
	lock_page(page);
680
	if (unlikely(page->mapping != mapping)) {
681 682
		f2fs_put_page(page, 1);
		goto repeat;
683
	}
684 685 686 687
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
688 689 690
	return page;
}

J
Jaegeuk Kim 已提交
691
/*
692 693
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
694
 *
C
Chao Yu 已提交
695 696
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
697 698
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
699
 */
700
struct page *get_new_data_page(struct inode *inode,
701
		struct page *ipage, pgoff_t index, bool new_i_size)
702 703 704 705 706
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
707

708
	page = f2fs_grab_cache_page(mapping, index, true);
709 710 711 712 713 714
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
715
		return ERR_PTR(-ENOMEM);
716
	}
717

718
	set_new_dnode(&dn, inode, ipage, NULL, 0);
719
	err = f2fs_reserve_block(&dn, index);
720 721
	if (err) {
		f2fs_put_page(page, 1);
722
		return ERR_PTR(err);
723
	}
724 725
	if (!ipage)
		f2fs_put_dnode(&dn);
726 727

	if (PageUptodate(page))
728
		goto got_it;
729 730

	if (dn.data_blkaddr == NEW_ADDR) {
731
		zero_user_segment(page, 0, PAGE_SIZE);
732 733
		if (!PageUptodate(page))
			SetPageUptodate(page);
734
	} else {
735
		f2fs_put_page(page, 1);
736

737 738 739
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
740
		if (IS_ERR(page))
741
			return page;
742
	}
743
got_it:
C
Chao Yu 已提交
744
	if (new_i_size && i_size_read(inode) <
745
				((loff_t)(index + 1) << PAGE_SHIFT))
746
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
747 748 749
	return page;
}

750 751
static int __allocate_data_block(struct dnode_of_data *dn)
{
752
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
753 754
	struct f2fs_summary sum;
	struct node_info ni;
755
	pgoff_t fofs;
756
	blkcnt_t count = 1;
C
Chao Yu 已提交
757
	int err;
758

759
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
760
		return -EPERM;
761

762 763
	dn->data_blkaddr = datablock_addr(dn->inode,
				dn->node_page, dn->ofs_in_node);
764 765 766
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

C
Chao Yu 已提交
767 768
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
769

770
alloc:
771 772 773
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

774
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
775
					&sum, CURSEG_WARM_DATA, NULL, false);
776
	set_data_blkaddr(dn);
777

778
	/* update i_size */
779
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
780
							dn->ofs_in_node;
781
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
782
		f2fs_i_size_write(dn->inode,
783
				((loff_t)(fofs + 1) << PAGE_SHIFT));
784 785 786
	return 0;
}

J
Jaegeuk Kim 已提交
787 788 789 790 791 792 793
static inline bool __force_buffered_io(struct inode *inode, int rw)
{
	return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
			F2FS_I_SB(inode)->s_ndevs);
}

794
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
795
{
796
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
797
	struct f2fs_map_blocks map;
798
	int err = 0;
799

800 801 802
	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

803
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
804 805 806 807 808 809
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

810
	map.m_next_pgofs = NULL;
811

812
	if (iocb->ki_flags & IOCB_DIRECT) {
813 814 815
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
J
Jaegeuk Kim 已提交
816 817 818 819
		return f2fs_map_blocks(inode, &map, 1,
			__force_buffered_io(inode, WRITE) ?
				F2FS_GET_BLOCK_PRE_AIO :
				F2FS_GET_BLOCK_PRE_DIO);
820
	}
C
Chao Yu 已提交
821
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
822 823 824
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
825
	}
826 827
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
828
	return err;
829 830
}

831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
		if (lock)
			down_read(&sbi->node_change);
		else
			up_read(&sbi->node_change);
	} else {
		if (lock)
			f2fs_lock_op(sbi);
		else
			f2fs_unlock_op(sbi);
	}
}

J
Jaegeuk Kim 已提交
846
/*
J
Jaegeuk Kim 已提交
847 848
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
849 850 851 852 853
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
854
 */
C
Chao Yu 已提交
855
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
856
						int create, int flag)
857
{
J
Jaegeuk Kim 已提交
858
	unsigned int maxblocks = map->m_len;
859
	struct dnode_of_data dn;
860
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
861
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
862
	pgoff_t pgofs, end_offset, end;
863
	int err = 0, ofs = 1;
864 865
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
866
	struct extent_info ei = {0,0,0};
867
	block_t blkaddr;
868

869 870 871
	if (!maxblocks)
		return 0;

J
Jaegeuk Kim 已提交
872 873 874 875 876
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
877
	end = pgofs + maxblocks;
878

879
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
880 881 882
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
883
		goto out;
884
	}
885

C
Chao Yu 已提交
886
next_dnode:
887
	if (create)
888
		__do_map_lock(sbi, flag, true);
889 890 891

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
892
	err = get_dnode_of_data(&dn, pgofs, mode);
893
	if (err) {
C
Chao Yu 已提交
894 895
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
896
		if (err == -ENOENT) {
897
			err = 0;
898 899 900 901
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
902
		goto unlock_out;
903
	}
C
Chao Yu 已提交
904

905
	prealloc = 0;
906
	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
907
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
908 909

next_block:
910
	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
C
Chao Yu 已提交
911 912

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
913
		if (create) {
914 915
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
916
				goto sync_out;
917
			}
918
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
919 920 921 922
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
923 924
			} else {
				err = __allocate_data_block(&dn);
925
				if (!err)
926
					set_inode_flag(inode, FI_APPEND_WRITE);
927
			}
C
Chao Yu 已提交
928
			if (err)
C
Chao Yu 已提交
929
				goto sync_out;
930
			map->m_flags |= F2FS_MAP_NEW;
C
Chao Yu 已提交
931
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
932
		} else {
C
Chao Yu 已提交
933 934 935 936
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
937 938 939 940 941
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
942
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
943
						blkaddr != NEW_ADDR)
C
Chao Yu 已提交
944
				goto sync_out;
C
Chao Yu 已提交
945 946
		}
	}
947

948 949 950
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
951 952 953 954 955 956 957 958 959 960
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
961
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
962
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
963 964 965 966 967
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
968

969
skip:
970 971 972
	dn.ofs_in_node++;
	pgofs++;

973 974 975
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
976

977 978 979 980
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
981

982 983 984 985
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
986
		}
987 988 989 990 991 992 993 994 995 996 997
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

	f2fs_put_dnode(&dn);

	if (create) {
998
		__do_map_lock(sbi, flag, false);
999
		f2fs_balance_fs(sbi, dn.node_changed);
1000
	}
1001
	goto next_dnode;
1002

1003
sync_out:
1004
	f2fs_put_dnode(&dn);
1005
unlock_out:
1006
	if (create) {
1007
		__do_map_lock(sbi, flag, false);
1008
		f2fs_balance_fs(sbi, dn.node_changed);
1009
	}
1010
out:
J
Jaegeuk Kim 已提交
1011
	trace_f2fs_map_blocks(inode, map, err);
1012
	return err;
1013 1014
}

J
Jaegeuk Kim 已提交
1015
static int __get_data_block(struct inode *inode, sector_t iblock,
1016 1017
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
1018 1019
{
	struct f2fs_map_blocks map;
1020
	int err;
J
Jaegeuk Kim 已提交
1021 1022 1023

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
1024
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
1025

1026 1027
	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
J
Jaegeuk Kim 已提交
1028 1029
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1030
		bh->b_size = (u64)map.m_len << inode->i_blkbits;
J
Jaegeuk Kim 已提交
1031
	}
1032
	return err;
J
Jaegeuk Kim 已提交
1033 1034
}

1035
static int get_data_block(struct inode *inode, sector_t iblock,
1036 1037
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
1038
{
1039 1040
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
1041 1042 1043
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
1044 1045
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
1046
	return __get_data_block(inode, iblock, bh_result, create,
1047
						F2FS_GET_BLOCK_DIO, NULL);
1048 1049
}

C
Chao Yu 已提交
1050
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1051 1052
			struct buffer_head *bh_result, int create)
{
1053
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
1054
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1055 1056
		return -EFBIG;

C
Chao Yu 已提交
1057
	return __get_data_block(inode, iblock, bh_result, create,
1058
						F2FS_GET_BLOCK_BMAP, NULL);
1059 1060
}

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
1071 1072 1073
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
1074 1075
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
1076
	pgoff_t next_pgofs;
1077 1078 1079 1080 1081 1082 1083 1084
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
1085 1086 1087 1088 1089 1090
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
1091
	inode_lock(inode);
1092

1093 1094 1095 1096 1097
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
1098

1099 1100 1101 1102
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
1103
	ret = get_data_block(inode, start_blk, &map_bh, 0,
1104
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1105 1106 1107 1108 1109
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
1110
		start_blk = next_pgofs;
1111 1112 1113

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
1114
			goto prep_next;
1115

1116 1117
		flags |= FIEMAP_EXTENT_LAST;
	}
1118

1119 1120 1121 1122
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

1123 1124
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
1125
	}
1126

1127 1128
	if (start_blk > last_blk || ret)
		goto out;
1129

1130 1131 1132 1133 1134 1135
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
1136

1137
	start_blk += logical_to_blk(inode, size);
1138

1139
prep_next:
1140 1141 1142 1143 1144 1145 1146 1147 1148
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
1149
	inode_unlock(inode);
1150
	return ret;
J
Jaegeuk Kim 已提交
1151 1152
}

1153 1154
static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
				 unsigned nr_pages)
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct fscrypt_ctx *ctx = NULL;
	struct bio *bio;

	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
		if (IS_ERR(ctx))
			return ERR_CAST(ctx);

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
	}

	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
	if (!bio) {
		if (ctx)
			fscrypt_release_ctx(ctx);
		return ERR_PTR(-ENOMEM);
	}
J
Jaegeuk Kim 已提交
1175
	f2fs_target_device(sbi, blkaddr, bio);
1176 1177 1178 1179 1180 1181
	bio->bi_end_io = f2fs_read_end_io;
	bio->bi_private = ctx;

	return bio;
}

J
Jaegeuk Kim 已提交
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1206
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
1207 1208 1209 1210

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		if (pages) {
1211
			page = list_last_entry(pages, struct page, lru);
1212 1213

			prefetchw(&page->flags);
J
Jaegeuk Kim 已提交
1214 1215
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1216 1217
						  page->index,
						  readahead_gfp_mask(mapping)))
J
Jaegeuk Kim 已提交
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1246
			if (f2fs_map_blocks(inode, &map, 0,
1247
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1260
			zero_user_segment(page, 0, PAGE_SIZE);
1261 1262
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
1263 1264 1265 1266 1267 1268 1269 1270
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
J
Jaegeuk Kim 已提交
1271 1272
		if (bio && (last_block_in_bio != block_nr - 1 ||
			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
J
Jaegeuk Kim 已提交
1273
submit_and_realloc:
1274
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1275 1276 1277
			bio = NULL;
		}
		if (bio == NULL) {
1278
			bio = f2fs_grab_bio(inode, block_nr, nr_pages);
J
Jaegeuk Kim 已提交
1279 1280
			if (IS_ERR(bio)) {
				bio = NULL;
J
Jaegeuk Kim 已提交
1281
				goto set_error_page;
1282
			}
M
Mike Christie 已提交
1283
			bio_set_op_attrs(bio, REQ_OP_READ, 0);
J
Jaegeuk Kim 已提交
1284 1285 1286 1287 1288 1289 1290 1291 1292
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1293
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1294 1295 1296 1297
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1298
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1299 1300 1301 1302 1303
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1304
			put_page(page);
J
Jaegeuk Kim 已提交
1305 1306 1307
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1308
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1309 1310 1311
	return 0;
}

1312 1313
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1314
	struct inode *inode = page->mapping->host;
1315
	int ret = -EAGAIN;
H
Huajun Li 已提交
1316

1317 1318
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1319
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1320 1321
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1322
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1323
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1324
	return ret;
1325 1326 1327 1328 1329 1330
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1331
	struct inode *inode = file->f_mapping->host;
1332
	struct page *page = list_last_entry(pages, struct page, lru);
1333 1334

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1335 1336 1337 1338 1339

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1340
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1341 1342
}

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
static int encrypt_one_page(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;
	gfp_t gfp_flags = GFP_NOFS;

	if (!f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
		return 0;

	/* wait for GCed encrypted page writeback */
	f2fs_wait_on_encrypted_page_writeback(fio->sbi, fio->old_blkaddr);

retry_encrypt:
	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
			PAGE_SIZE, 0, fio->page->index, gfp_flags);
	if (!IS_ERR(fio->encrypted_page))
		return 0;

	/* flush pending IOs and wait for a while in the ENOMEM case */
	if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1362
		f2fs_flush_merged_writes(fio->sbi);
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
		congestion_wait(BLK_RW_ASYNC, HZ/50);
		gfp_flags |= __GFP_NOFAIL;
		goto retry_encrypt;
	}
	return PTR_ERR(fio->encrypted_page);
}

static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;

	if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
		return false;
	if (is_cold_data(fio->page))
		return false;
	if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
		return false;

	return need_inplace_update_policy(inode, fio);
}

1384 1385 1386 1387 1388 1389 1390 1391 1392
static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
{
	if (fio->old_blkaddr == NEW_ADDR)
		return false;
	if (fio->old_blkaddr == NULL_ADDR)
		return false;
	return true;
}

1393
int do_write_data_page(struct f2fs_io_info *fio)
1394
{
1395
	struct page *page = fio->page;
1396 1397
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
1398 1399
	struct extent_info ei = {0,0,0};
	bool ipu_force = false;
1400 1401 1402
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1403 1404 1405
	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1406 1407

		if (valid_ipu_blkaddr(fio)) {
1408
			ipu_force = true;
1409
			fio->need_lock = LOCK_DONE;
1410 1411 1412
			goto got_it;
		}
	}
1413

1414 1415 1416
	/* Deadlock due to between page->lock and f2fs_lock_op */
	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
		return -EAGAIN;
1417

1418
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1419
	if (err)
1420
		goto out;
1421

1422
	fio->old_blkaddr = dn.data_blkaddr;
1423 1424

	/* This page is already truncated */
1425
	if (fio->old_blkaddr == NULL_ADDR) {
1426
		ClearPageUptodate(page);
1427
		goto out_writepage;
1428
	}
1429
got_it:
1430 1431 1432 1433
	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1434
	if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
1435 1436 1437 1438 1439
		err = encrypt_one_page(fio);
		if (err)
			goto out_writepage;

		set_page_writeback(page);
1440
		f2fs_put_dnode(&dn);
1441
		if (fio->need_lock == LOCK_REQ)
1442
			f2fs_unlock_op(fio->sbi);
1443
		err = rewrite_data_page(fio);
1444
		trace_f2fs_do_write_data_page(fio->page, IPU);
1445
		set_inode_flag(inode, FI_UPDATE_WRITE);
1446
		return err;
1447
	}
1448

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
	if (fio->need_lock == LOCK_RETRY) {
		if (!f2fs_trylock_op(fio->sbi)) {
			err = -EAGAIN;
			goto out_writepage;
		}
		fio->need_lock = LOCK_REQ;
	}

	err = encrypt_one_page(fio);
	if (err)
		goto out_writepage;

	set_page_writeback(page);

1463 1464 1465 1466 1467 1468
	/* LFS mode write path */
	write_data_page(&dn, fio);
	trace_f2fs_do_write_data_page(page, OPU);
	set_inode_flag(inode, FI_APPEND_WRITE);
	if (page->index == 0)
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1469 1470
out_writepage:
	f2fs_put_dnode(&dn);
1471
out:
1472
	if (fio->need_lock == LOCK_REQ)
1473
		f2fs_unlock_op(fio->sbi);
1474 1475 1476
	return err;
}

1477 1478
static int __write_data_page(struct page *page, bool *submitted,
				struct writeback_control *wbc)
1479 1480
{
	struct inode *inode = page->mapping->host;
1481
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1482 1483
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1484
							>> PAGE_SHIFT;
1485
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
H
Huajun Li 已提交
1486
	unsigned offset = 0;
1487
	bool need_balance_fs = false;
1488
	int err = 0;
J
Jaegeuk Kim 已提交
1489
	struct f2fs_io_info fio = {
1490
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1491
		.type = DATA,
M
Mike Christie 已提交
1492
		.op = REQ_OP_WRITE,
J
Jens Axboe 已提交
1493
		.op_flags = wbc_to_write_flags(wbc),
1494
		.old_blkaddr = NULL_ADDR,
1495
		.page = page,
1496
		.encrypted_page = NULL,
1497
		.submitted = false,
1498
		.need_lock = LOCK_RETRY,
J
Jaegeuk Kim 已提交
1499
	};
1500

1501 1502
	trace_f2fs_writepage(page, DATA);

1503 1504 1505
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;

1506
	if (page->index < end_index)
1507
		goto write;
1508 1509 1510 1511 1512

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1513
	offset = i_size & (PAGE_SIZE - 1);
1514
	if ((page->index >= end_index + 1) || !offset)
1515
		goto out;
1516

1517
	zero_user_segment(page, offset, PAGE_SIZE);
1518
write:
1519 1520
	if (f2fs_is_drop_cache(inode))
		goto out;
1521 1522 1523 1524
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1525
		goto redirty_out;
1526

1527 1528
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
J
Jaegeuk Kim 已提交
1529
		mapping_set_error(page->mapping, -EIO);
1530
		goto out;
1531 1532
	}

1533
	/* Dentry blocks are controlled by checkpoint */
1534
	if (S_ISDIR(inode->i_mode)) {
1535
		fio.need_lock = LOCK_DONE;
1536
		err = do_write_data_page(&fio);
1537 1538
		goto done;
	}
H
Huajun Li 已提交
1539

1540
	if (!wbc->for_reclaim)
1541
		need_balance_fs = true;
1542
	else if (has_not_enough_free_secs(sbi, 0, 0))
1543
		goto redirty_out;
1544 1545
	else
		set_inode_flag(inode, FI_HOT_DATA);
1546

1547
	err = -EAGAIN;
1548
	if (f2fs_has_inline_data(inode)) {
1549
		err = f2fs_write_inline_data(inode, page);
1550 1551 1552
		if (!err)
			goto out;
	}
1553

1554
	if (err == -EAGAIN) {
1555
		err = do_write_data_page(&fio);
1556 1557 1558 1559 1560
		if (err == -EAGAIN) {
			fio.need_lock = LOCK_REQ;
			err = do_write_data_page(&fio);
		}
	}
1561 1562
	if (F2FS_I(inode)->last_disk_size < psize)
		F2FS_I(inode)->last_disk_size = psize;
1563

1564 1565 1566
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1567

1568
out:
1569
	inode_dec_dirty_pages(inode);
1570 1571
	if (err)
		ClearPageUptodate(page);
1572 1573

	if (wbc->for_reclaim) {
1574
		f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
1575
		clear_inode_flag(inode, FI_HOT_DATA);
1576
		remove_dirty_inode(inode);
1577
		submitted = NULL;
1578 1579
	}

1580
	unlock_page(page);
J
Jaegeuk Kim 已提交
1581 1582
	if (!S_ISDIR(inode->i_mode))
		f2fs_balance_fs(sbi, need_balance_fs);
1583

1584
	if (unlikely(f2fs_cp_error(sbi))) {
1585
		f2fs_submit_merged_write(sbi, DATA);
1586 1587 1588 1589 1590
		submitted = NULL;
	}

	if (submitted)
		*submitted = fio.submitted;
1591

1592 1593 1594
	return 0;

redirty_out:
1595
	redirty_page_for_writepage(wbc, page);
1596 1597
	if (!err)
		return AOP_WRITEPAGE_ACTIVATE;
J
Jaegeuk Kim 已提交
1598 1599
	unlock_page(page);
	return err;
1600 1601
}

1602 1603 1604
static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
1605
	return __write_data_page(page, NULL, wbc);
1606 1607
}

C
Chao Yu 已提交
1608 1609 1610 1611 1612 1613
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
J
Jaegeuk Kim 已提交
1614
					struct writeback_control *wbc)
C
Chao Yu 已提交
1615 1616 1617 1618 1619 1620 1621 1622 1623
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
1624
	pgoff_t last_idx = ULONG_MAX;
C
Chao Yu 已提交
1625 1626 1627 1628 1629
	int cycled;
	int range_whole = 0;
	int tag;

	pagevec_init(&pvec, 0);
1630

1631 1632 1633 1634 1635 1636
	if (get_dirty_pages(mapping->host) <=
				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
		set_inode_flag(mapping->host, FI_HOT_DATA);
	else
		clear_inode_flag(mapping->host, FI_HOT_DATA);

C
Chao Yu 已提交
1637 1638 1639 1640 1641 1642 1643 1644 1645
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1646 1647
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
1670
			bool submitted = false;
C
Chao Yu 已提交
1671 1672 1673 1674 1675 1676 1677

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;
1678
retry_write:
C
Chao Yu 已提交
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1694 1695
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1696 1697 1698 1699 1700 1701 1702 1703
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

1704
			ret = __write_data_page(page, &submitted, wbc);
C
Chao Yu 已提交
1705
			if (unlikely(ret)) {
1706 1707 1708 1709 1710 1711 1712 1713
				/*
				 * keep nr_to_write, since vfs uses this to
				 * get # of written pages.
				 */
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
					continue;
1714 1715 1716 1717 1718 1719 1720 1721 1722
				} else if (ret == -EAGAIN) {
					ret = 0;
					if (wbc->sync_mode == WB_SYNC_ALL) {
						cond_resched();
						congestion_wait(BLK_RW_ASYNC,
									HZ/50);
						goto retry_write;
					}
					continue;
1723
				}
J
Jaegeuk Kim 已提交
1724 1725 1726
				done_index = page->index + 1;
				done = 1;
				break;
1727
			} else if (submitted) {
1728
				last_idx = page->index;
C
Chao Yu 已提交
1729 1730
			}

1731 1732 1733 1734
			/* give a priority to WB_SYNC threads */
			if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
					--wbc->nr_to_write <= 0) &&
					wbc->sync_mode == WB_SYNC_NONE) {
C
Chao Yu 已提交
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

1752
	if (last_idx != ULONG_MAX)
1753 1754
		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
						0, last_idx, DATA);
C
Chao Yu 已提交
1755

C
Chao Yu 已提交
1756 1757 1758
	return ret;
}

1759
static int f2fs_write_data_pages(struct address_space *mapping,
1760 1761 1762
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1763
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1764
	struct blk_plug plug;
1765 1766
	int ret;

P
P J P 已提交
1767 1768 1769 1770
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1771 1772 1773 1774
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1775 1776 1777 1778
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

1779 1780 1781 1782 1783
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1784
	/* skip writing during file defragment */
1785
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
C
Chao Yu 已提交
1786 1787
		goto skip_write;

Y
Yunlei He 已提交
1788 1789
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1790 1791 1792 1793 1794 1795
	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_inc(&sbi->wb_sync_req);
	else if (atomic_read(&sbi->wb_sync_req))
		goto skip_write;

1796
	blk_start_plug(&plug);
J
Jaegeuk Kim 已提交
1797
	ret = f2fs_write_cache_pages(mapping, wbc);
1798
	blk_finish_plug(&plug);
1799 1800 1801

	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_dec(&sbi->wb_sync_req);
1802 1803 1804 1805
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
J
Jaegeuk Kim 已提交
1806

1807
	remove_dirty_inode(inode);
1808
	return ret;
1809 1810

skip_write:
1811
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1812
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1813
	return 0;
1814 1815
}

1816 1817 1818
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1819
	loff_t i_size = i_size_read(inode);
1820

J
Jaegeuk Kim 已提交
1821
	if (to > i_size) {
1822
		down_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
1823 1824
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1825
		up_write(&F2FS_I(inode)->i_mmap_sem);
1826 1827 1828
	}
}

1829 1830 1831 1832 1833 1834 1835 1836
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1837
	bool locked = false;
1838
	struct extent_info ei = {0,0,0};
1839 1840
	int err = 0;

1841 1842 1843 1844
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
1845 1846
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
			!is_inode_flag_set(inode, FI_NO_PREALLOC))
1847 1848
		return 0;

1849
	if (f2fs_has_inline_data(inode) ||
1850
			(pos & PAGE_MASK) >= i_size_read(inode)) {
1851
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
1852 1853 1854
		locked = true;
	}
restart:
1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
C
Chao Yu 已提交
1865
		if (pos + len <= MAX_INLINE_DATA(inode)) {
1866
			read_inline_data(page, ipage);
1867
			set_inode_flag(inode, FI_DATA_EXIST);
1868 1869
			if (inode->i_nlink)
				set_inline_node(ipage);
1870 1871 1872
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1885
			if (err || dn.data_blkaddr == NULL_ADDR) {
1886
				f2fs_put_dnode(&dn);
1887 1888
				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
								true);
1889 1890 1891
				locked = true;
				goto restart;
			}
1892 1893
		}
	}
1894

1895 1896 1897
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1898
out:
1899 1900
	f2fs_put_dnode(&dn);
unlock_out:
1901
	if (locked)
1902
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
1903 1904 1905
	return err;
}

1906 1907 1908 1909 1910
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1911
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1912
	struct page *page = NULL;
1913
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1914 1915
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1916 1917
	int err = 0;

1918 1919
	trace_f2fs_write_begin(inode, pos, len, flags);

1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1930
repeat:
1931 1932 1933 1934 1935 1936
	/*
	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
	 * wait_for_stable_page. Will wait that below with our IO control.
	 */
	page = pagecache_get_page(mapping, index,
				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
1937 1938 1939 1940
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1941

1942 1943
	*pagep = page;

1944 1945
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1946
	if (err)
1947
		goto fail;
1948

1949
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
1950
		unlock_page(page);
J
Jaegeuk Kim 已提交
1951
		f2fs_balance_fs(sbi, true);
1952 1953 1954 1955 1956 1957 1958 1959
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1960
	f2fs_wait_on_page_writeback(page, DATA, false);
1961

1962 1963
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1964
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1965

1966 1967
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
1968

1969 1970 1971 1972 1973
	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
		zero_user_segment(page, len, PAGE_SIZE);
		return 0;
	}

1974
	if (blkaddr == NEW_ADDR) {
1975
		zero_user_segment(page, 0, PAGE_SIZE);
1976
		SetPageUptodate(page);
1977
	} else {
1978
		struct bio *bio;
1979

1980 1981 1982
		bio = f2fs_grab_bio(inode, blkaddr, 1);
		if (IS_ERR(bio)) {
			err = PTR_ERR(bio);
1983
			goto fail;
1984
		}
1985
		bio->bi_opf = REQ_OP_READ;
1986 1987 1988 1989 1990 1991
		if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
			bio_put(bio);
			err = -EFAULT;
			goto fail;
		}

1992
		__submit_bio(sbi, bio, DATA);
1993

1994
		lock_page(page);
1995
		if (unlikely(page->mapping != mapping)) {
1996 1997
			f2fs_put_page(page, 1);
			goto repeat;
1998
		}
1999 2000 2001
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
2002
		}
2003 2004
	}
	return 0;
2005

2006
fail:
2007
	f2fs_put_page(page, 1);
2008 2009
	f2fs_write_failed(mapping, pos + len);
	return err;
2010 2011
}

2012 2013 2014 2015 2016 2017 2018
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

2019 2020
	trace_f2fs_write_end(inode, pos, len, copied);

2021 2022 2023 2024 2025 2026
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
2027
		if (unlikely(copied != len))
2028 2029 2030 2031 2032 2033 2034
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

2035
	set_page_dirty(page);
2036

2037 2038
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
2039
unlock_out:
2040
	f2fs_put_page(page, 1);
2041
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2042 2043 2044
	return copied;
}

2045 2046
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
2047 2048 2049 2050 2051 2052
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
2053 2054 2055
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

2056 2057 2058
	return 0;
}

2059
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2060
{
2061
	struct address_space *mapping = iocb->ki_filp->f_mapping;
2062 2063
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
2064
	loff_t offset = iocb->ki_pos;
2065
	int rw = iov_iter_rw(iter);
2066
	int err;
2067

2068
	err = check_direct_IO(inode, iter, offset);
2069 2070
	if (err)
		return err;
H
Huajun Li 已提交
2071

J
Jaegeuk Kim 已提交
2072
	if (__force_buffered_io(inode, rw))
2073
		return 0;
2074

2075
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2076

2077
	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
2078
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
2079 2080 2081
	up_read(&F2FS_I(inode)->dio_rwsem[rw]);

	if (rw == WRITE) {
2082
		if (err > 0)
2083
			set_inode_flag(inode, FI_UPDATE_WRITE);
2084 2085 2086
		else if (err < 0)
			f2fs_write_failed(mapping, offset + count);
	}
2087

2088
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2089

2090
	return err;
2091 2092
}

2093 2094
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
2095 2096
{
	struct inode *inode = page->mapping->host;
2097
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2098

2099
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2100
		(offset % PAGE_SIZE || length != PAGE_SIZE))
2101 2102
		return;

2103
	if (PageDirty(page)) {
2104
		if (inode->i_ino == F2FS_META_INO(sbi)) {
2105
			dec_page_count(sbi, F2FS_DIRTY_META);
2106
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2107
			dec_page_count(sbi, F2FS_DIRTY_NODES);
2108
		} else {
2109
			inode_dec_dirty_pages(inode);
2110 2111
			remove_dirty_inode(inode);
		}
2112
	}
C
Chao Yu 已提交
2113 2114 2115

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
2116
		return drop_inmem_page(inode, page);
C
Chao Yu 已提交
2117

2118
	set_page_private(page, 0);
2119 2120 2121
	ClearPagePrivate(page);
}

2122
int f2fs_release_page(struct page *page, gfp_t wait)
2123
{
2124 2125 2126 2127
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
2128 2129 2130 2131
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

2132
	set_page_private(page, 0);
2133
	ClearPagePrivate(page);
2134
	return 1;
2135 2136
}

2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165
/*
 * This was copied from __set_page_dirty_buffers which gives higher performance
 * in very high speed storages. (e.g., pmem)
 */
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
	struct address_space *mapping = page->mapping;
	unsigned long flags;

	if (unlikely(!mapping))
		return;

	spin_lock(&mapping->private_lock);
	lock_page_memcg(page);
	SetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	spin_lock_irqsave(&mapping->tree_lock, flags);
	WARN_ON_ONCE(!PageUptodate(page));
	account_page_dirtied(page, mapping);
	radix_tree_tag_set(&mapping->page_tree,
			page_index(page), PAGECACHE_TAG_DIRTY);
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
	unlock_page_memcg(page);

	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	return;
}

2166 2167 2168 2169 2170
static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

2171 2172
	trace_f2fs_set_page_dirty(page, DATA);

2173 2174
	if (!PageUptodate(page))
		SetPageUptodate(page);
2175

C
Chao Yu 已提交
2176
	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
C
Chao Yu 已提交
2177 2178 2179 2180 2181 2182 2183 2184 2185
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
2186 2187
	}

2188
	if (!PageDirty(page)) {
2189
		f2fs_set_page_dirty_nobuffers(page);
2190
		update_dirty_page(inode, page);
2191 2192 2193 2194 2195
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
2196 2197
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
2198 2199
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
2200 2201 2202 2203 2204 2205 2206
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
2207
	return generic_block_bmap(mapping, block, get_data_block_bmap);
2208 2209
}

2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
2223 2224 2225 2226 2227 2228
	if (atomic_written) {
		if (mode != MIGRATE_SYNC)
			return -EBUSY;
		if (!mutex_trylock(&fi->inmem_lock))
			return -EAGAIN;
	}
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

	migrate_page_copy(newpage, page);

	return MIGRATEPAGE_SUCCESS;
}
#endif

2266 2267 2268 2269 2270 2271
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
2272
	.write_end	= f2fs_write_end,
2273
	.set_page_dirty	= f2fs_set_data_page_dirty,
2274 2275
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2276
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
2277
	.bmap		= f2fs_bmap,
2278 2279 2280
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
2281
};