data.c 60.2 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
22 23
#include <linux/mm.h>
#include <linux/memcontrol.h>
J
Jaegeuk Kim 已提交
24
#include <linux/cleancache.h>
25
#include <linux/sched/signal.h>
26 27 28 29

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
30
#include "trace.h"
31
#include <trace/events/f2fs.h>
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
static bool __is_cp_guaranteed(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct f2fs_sb_info *sbi;

	if (!mapping)
		return false;

	inode = mapping->host;
	sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_META_INO(sbi) ||
			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
			S_ISDIR(inode->i_mode) ||
			is_cold_data(page))
		return true;
	return false;
}

53
static void f2fs_read_end_io(struct bio *bio)
54
{
55 56
	struct bio_vec *bvec;
	int i;
57

C
Chao Yu 已提交
58
#ifdef CONFIG_F2FS_FAULT_INJECTION
59
	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
60
		f2fs_show_injection_info(FAULT_IO);
61
		bio->bi_status = BLK_STS_IOERR;
62
	}
C
Chao Yu 已提交
63 64
#endif

65
	if (f2fs_bio_encrypted(bio)) {
66
		if (bio->bi_status) {
67
			fscrypt_release_ctx(bio->bi_private);
68
		} else {
69
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
70 71 72 73
			return;
		}
	}

74 75
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
76

77
		if (!bio->bi_status) {
78 79
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
80 81 82 83 84 85 86 87 88
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

89
static void f2fs_write_end_io(struct bio *bio)
90
{
91
	struct f2fs_sb_info *sbi = bio->bi_private;
92 93
	struct bio_vec *bvec;
	int i;
94

95
	bio_for_each_segment_all(bvec, bio, i) {
96
		struct page *page = bvec->bv_page;
97
		enum count_type type = WB_DATA_TYPE(page);
98

99 100 101 102 103 104
		if (IS_DUMMY_WRITTEN_PAGE(page)) {
			set_page_private(page, (unsigned long)NULL);
			ClearPagePrivate(page);
			unlock_page(page);
			mempool_free(page, sbi->write_io_dummy);

105
			if (unlikely(bio->bi_status))
106 107 108 109
				f2fs_stop_checkpoint(sbi, true);
			continue;
		}

110
		fscrypt_pullback_bio_page(&page, true);
111

112
		if (unlikely(bio->bi_status)) {
113
			mapping_set_error(page->mapping, -EIO);
114 115
			if (type == F2FS_WB_CP_DATA)
				f2fs_stop_checkpoint(sbi, true);
116
		}
117 118 119 120

		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
					page->index != nid_of_node(page));

121 122
		dec_page_count(sbi, type);
		clear_cold_data(page);
123
		end_page_writeback(page);
124
	}
125
	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
126
				wq_has_sleeper(&sbi->cp_wait))
127 128 129 130 131
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

J
Jaegeuk Kim 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
/*
 * Return true, if pre_bio's bdev is same as its target device.
 */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	int i;

	for (i = 0; i < sbi->s_ndevs; i++) {
		if (FDEV(i).start_blk <= blk_addr &&
					FDEV(i).end_blk >= blk_addr) {
			blk_addr -= FDEV(i).start_blk;
			bdev = FDEV(i).bdev;
			break;
		}
	}
	if (bio) {
150
		bio_set_dev(bio, bdev);
J
Jaegeuk Kim 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
	}
	return bdev;
}

int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
	int i;

	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
			return i;
	return 0;
}

static bool __same_bdev(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
169 170
	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
J
Jaegeuk Kim 已提交
171 172
}

173 174 175 176
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
177
				struct writeback_control *wbc,
178 179
				int npages, bool is_read,
				enum page_type type, enum temp_type temp)
180 181 182
{
	struct bio *bio;

183
	bio = f2fs_bio_alloc(sbi, npages, true);
184

J
Jaegeuk Kim 已提交
185
	f2fs_target_device(sbi, blk_addr, bio);
186 187 188 189 190 191 192 193
	if (is_read) {
		bio->bi_end_io = f2fs_read_end_io;
		bio->bi_private = NULL;
	} else {
		bio->bi_end_io = f2fs_write_end_io;
		bio->bi_private = sbi;
		bio->bi_write_hint = io_type_to_rw_hint(sbi, type, temp);
	}
194 195
	if (wbc)
		wbc_init_bio(wbc, bio);
196 197 198 199

	return bio;
}

200 201
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
202
{
203
	if (!is_read_io(bio_op(bio))) {
204 205 206 207 208
		unsigned int start;

		if (type != DATA && type != NODE)
			goto submit_io;

209
		if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
210 211
			blk_finish_plug(current->plug);

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
		start %= F2FS_IO_SIZE(sbi);

		if (start == 0)
			goto submit_io;

		/* fill dummy pages */
		for (; start < F2FS_IO_SIZE(sbi); start++) {
			struct page *page =
				mempool_alloc(sbi->write_io_dummy,
					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
			f2fs_bug_on(sbi, !page);

			SetPagePrivate(page);
			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
			lock_page(page);
			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
				f2fs_bug_on(sbi, 1);
		}
		/*
		 * In the NODE case, we lose next block address chain. So, we
		 * need to do checkpoint in f2fs_sync_file.
		 */
		if (type == NODE)
			set_sbi_flag(sbi, SBI_NEED_CP);
J
Jaegeuk Kim 已提交
237
	}
238
submit_io:
J
Jaegeuk Kim 已提交
239 240 241 242
	if (is_read_io(bio_op(bio)))
		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
	else
		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
243
	submit_bio(bio);
244 245
}

J
Jaegeuk Kim 已提交
246
static void __submit_merged_bio(struct f2fs_bio_info *io)
247
{
J
Jaegeuk Kim 已提交
248
	struct f2fs_io_info *fio = &io->fio;
249 250 251 252

	if (!io->bio)
		return;

J
Jaegeuk Kim 已提交
253 254
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

M
Mike Christie 已提交
255
	if (is_read_io(fio->op))
J
Jaegeuk Kim 已提交
256
		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
257
	else
J
Jaegeuk Kim 已提交
258
		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
M
Mike Christie 已提交
259

260
	__submit_bio(io->sbi, io->bio, fio->type);
261 262 263
	io->bio = NULL;
}

264 265
static bool __has_merged_page(struct f2fs_bio_info *io,
				struct inode *inode, nid_t ino, pgoff_t idx)
C
Chao Yu 已提交
266 267 268 269 270
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

271
	if (!io->bio)
C
Chao Yu 已提交
272
		return false;
273

274
	if (!inode && !ino)
275
		return true;
C
Chao Yu 已提交
276 277 278

	bio_for_each_segment_all(bvec, io->bio, i) {

279
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
280
			target = bvec->bv_page;
281 282
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
283

284 285 286
		if (idx != target->index)
			continue;

287 288 289
		if (inode && inode == target->mapping->host)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
290 291 292 293 294 295
			return true;
	}

	return false;
}

296
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
297
				nid_t ino, pgoff_t idx, enum page_type type)
298 299
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
300 301 302
	enum temp_type temp;
	struct f2fs_bio_info *io;
	bool ret = false;
303

J
Jaegeuk Kim 已提交
304 305 306 307 308 309
	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
		io = sbi->write_io[btype] + temp;

		down_read(&io->io_rwsem);
		ret = __has_merged_page(io, inode, ino, idx);
		up_read(&io->io_rwsem);
310

J
Jaegeuk Kim 已提交
311 312 313 314
		/* TODO: use HOT temp only for meta pages now. */
		if (ret || btype == META)
			break;
	}
315 316 317
	return ret;
}

318
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
319
				enum page_type type, enum temp_type temp)
320 321
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
322
	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
323

324
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
325 326 327 328

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
M
Mike Christie 已提交
329
		io->fio.op = REQ_OP_WRITE;
330
		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
331
		if (!test_opt(sbi, NOBARRIER))
332
			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
J
Jaegeuk Kim 已提交
333 334
	}
	__submit_merged_bio(io);
335
	up_write(&io->io_rwsem);
336 337
}

J
Jaegeuk Kim 已提交
338 339 340
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, nid_t ino, pgoff_t idx,
				enum page_type type, bool force)
341
{
J
Jaegeuk Kim 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354
	enum temp_type temp;

	if (!force && !has_merged_page(sbi, inode, ino, idx, type))
		return;

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {

		__f2fs_submit_merged_write(sbi, type, temp);

		/* TODO: use HOT temp only for meta pages now. */
		if (type >= META)
			break;
	}
355 356
}

357
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
358
{
J
Jaegeuk Kim 已提交
359
	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
360 361
}

362
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
363
				struct inode *inode, nid_t ino, pgoff_t idx,
364
				enum page_type type)
365
{
J
Jaegeuk Kim 已提交
366
	__submit_merged_write_cond(sbi, inode, ino, idx, type, false);
367 368
}

369
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
370
{
371 372 373
	f2fs_submit_merged_write(sbi, DATA);
	f2fs_submit_merged_write(sbi, NODE);
	f2fs_submit_merged_write(sbi, META);
374 375
}

376 377
/*
 * Fill the locked page with data located in the block address.
378
 * A caller needs to unlock the page on failure.
379
 */
380
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
381 382
{
	struct bio *bio;
383 384
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
385

386
	trace_f2fs_submit_page_bio(page, fio);
387
	f2fs_trace_ios(fio, 0);
388 389

	/* Allocate a new bio */
390
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
391
				1, is_read_io(fio->op), fio->type, fio->temp);
392

393
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
394 395 396
		bio_put(bio);
		return -EFAULT;
	}
M
Mike Christie 已提交
397
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
398

399
	__submit_bio(fio->sbi, bio, fio->type);
400 401 402

	if (!is_read_io(fio->op))
		inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
403 404 405
	return 0;
}

406
int f2fs_submit_page_write(struct f2fs_io_info *fio)
407
{
408
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
409
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
J
Jaegeuk Kim 已提交
410
	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
411
	struct page *bio_page;
412
	int err = 0;
413

414
	f2fs_bug_on(sbi, is_read_io(fio->op));
415

416 417 418 419 420 421 422 423 424 425 426 427 428
	down_write(&io->io_rwsem);
next:
	if (fio->in_list) {
		spin_lock(&io->io_lock);
		if (list_empty(&io->io_list)) {
			spin_unlock(&io->io_lock);
			goto out_fail;
		}
		fio = list_first_entry(&io->io_list,
						struct f2fs_io_info, list);
		list_del(&fio->list);
		spin_unlock(&io->io_lock);
	}
429

430 431 432
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
433

434 435
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

436 437
	/* set submitted = true as a return value */
	fio->submitted = true;
438

439
	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
440

441
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
442 443
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
J
Jaegeuk Kim 已提交
444
		__submit_merged_bio(io);
445 446
alloc_new:
	if (io->bio == NULL) {
447 448 449
		if ((fio->type == DATA || fio->type == NODE) &&
				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
			err = -EAGAIN;
450
			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
451 452
			goto out_fail;
		}
453
		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
454 455
						BIO_MAX_PAGES, false,
						fio->type, fio->temp);
J
Jaegeuk Kim 已提交
456
		io->fio = *fio;
457 458
	}

J
Jaegeuk Kim 已提交
459
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
J
Jaegeuk Kim 已提交
460
		__submit_merged_bio(io);
461 462 463
		goto alloc_new;
	}

464 465 466
	if (fio->io_wbc)
		wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);

467
	io->last_block_in_bio = fio->new_blkaddr;
468
	f2fs_trace_ios(fio, 0);
469 470 471 472 473

	trace_f2fs_submit_page_write(fio->page, fio);

	if (fio->in_list)
		goto next;
474
out_fail:
475
	up_write(&io->io_rwsem);
476
	return err;
477 478
}

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
							 unsigned nr_pages)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct fscrypt_ctx *ctx = NULL;
	struct bio *bio;

	if (f2fs_encrypted_file(inode)) {
		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
		if (IS_ERR(ctx))
			return ERR_CAST(ctx);

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_block_writeback(sbi, blkaddr);
	}

495
	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
	if (!bio) {
		if (ctx)
			fscrypt_release_ctx(ctx);
		return ERR_PTR(-ENOMEM);
	}
	f2fs_target_device(sbi, blkaddr, bio);
	bio->bi_end_io = f2fs_read_end_io;
	bio->bi_private = ctx;
	bio_set_op_attrs(bio, REQ_OP_READ, 0);

	return bio;
}

/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
							block_t blkaddr)
{
	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}
	__submit_bio(F2FS_I_SB(inode), bio, DATA);
	return 0;
}

526 527 528 529
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;
530 531 532 533
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
534 535 536

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
537
	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
538 539
}

J
Jaegeuk Kim 已提交
540
/*
541 542 543 544 545
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
546
void set_data_blkaddr(struct dnode_of_data *dn)
547
{
548 549 550
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
551
		dn->node_changed = true;
552 553
}

554 555 556 557 558 559 560
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

561 562
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
563
{
564
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
C
Chao Yu 已提交
565
	int err;
566

567 568 569
	if (!count)
		return 0;

570
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
571
		return -EPERM;
C
Chao Yu 已提交
572 573
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
574

575 576 577 578 579 580
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
581 582
		block_t blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
583 584 585 586 587 588 589 590 591
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
592 593 594
	return 0;
}

595 596 597 598 599 600 601 602 603 604 605
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

606 607 608 609 610 611 612 613
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
614

615 616
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
617
	if (err || need_put)
618 619 620 621
		f2fs_put_dnode(dn);
	return err;
}

622
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
623
{
624
	struct extent_info ei  = {0,0,0};
625
	struct inode *inode = dn->inode;
626

627 628 629
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
630
	}
631

632
	return f2fs_reserve_block(dn, index);
633 634
}

635
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
M
Mike Christie 已提交
636
						int op_flags, bool for_write)
637 638 639 640
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
641
	struct extent_info ei = {0,0,0};
642
	int err;
643

644
	page = f2fs_grab_cache_page(mapping, index, for_write);
645 646 647
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
648 649 650 651 652
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

653
	set_new_dnode(&dn, inode, NULL, NULL, 0);
654
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
655 656
	if (err)
		goto put_err;
657 658
	f2fs_put_dnode(&dn);

659
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
660 661
		err = -ENOENT;
		goto put_err;
662
	}
C
Chao Yu 已提交
663
got_it:
664 665
	if (PageUptodate(page)) {
		unlock_page(page);
666
		return page;
667
	}
668

J
Jaegeuk Kim 已提交
669 670 671 672 673 674 675
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
676
		zero_user_segment(page, 0, PAGE_SIZE);
677 678
		if (!PageUptodate(page))
			SetPageUptodate(page);
679
		unlock_page(page);
J
Jaegeuk Kim 已提交
680 681
		return page;
	}
682

683
	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
684
	if (err)
685
		goto put_err;
686
	return page;
687 688 689 690

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
691 692 693 694 695 696 697 698 699 700 701 702
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

703
	page = get_read_data_page(inode, index, 0, false);
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
723 724
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
725 726 727 728
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
729
	page = get_read_data_page(inode, index, 0, for_write);
730 731
	if (IS_ERR(page))
		return page;
732

733
	/* wait for read completion */
734
	lock_page(page);
735
	if (unlikely(page->mapping != mapping)) {
736 737
		f2fs_put_page(page, 1);
		goto repeat;
738
	}
739 740 741 742
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
743 744 745
	return page;
}

J
Jaegeuk Kim 已提交
746
/*
747 748
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
749
 *
C
Chao Yu 已提交
750 751
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
752 753
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
754
 */
755
struct page *get_new_data_page(struct inode *inode,
756
		struct page *ipage, pgoff_t index, bool new_i_size)
757 758 759 760 761
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
762

763
	page = f2fs_grab_cache_page(mapping, index, true);
764 765 766 767 768 769
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
770
		return ERR_PTR(-ENOMEM);
771
	}
772

773
	set_new_dnode(&dn, inode, ipage, NULL, 0);
774
	err = f2fs_reserve_block(&dn, index);
775 776
	if (err) {
		f2fs_put_page(page, 1);
777
		return ERR_PTR(err);
778
	}
779 780
	if (!ipage)
		f2fs_put_dnode(&dn);
781 782

	if (PageUptodate(page))
783
		goto got_it;
784 785

	if (dn.data_blkaddr == NEW_ADDR) {
786
		zero_user_segment(page, 0, PAGE_SIZE);
787 788
		if (!PageUptodate(page))
			SetPageUptodate(page);
789
	} else {
790
		f2fs_put_page(page, 1);
791

792 793 794
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
795
		if (IS_ERR(page))
796
			return page;
797
	}
798
got_it:
C
Chao Yu 已提交
799
	if (new_i_size && i_size_read(inode) <
800
				((loff_t)(index + 1) << PAGE_SHIFT))
801
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
802 803 804
	return page;
}

805
static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
806
{
807
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
808 809
	struct f2fs_summary sum;
	struct node_info ni;
810
	pgoff_t fofs;
811
	blkcnt_t count = 1;
C
Chao Yu 已提交
812
	int err;
813

814
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
815
		return -EPERM;
816

817 818
	dn->data_blkaddr = datablock_addr(dn->inode,
				dn->node_page, dn->ofs_in_node);
819 820 821
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

C
Chao Yu 已提交
822 823
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
824

825
alloc:
826 827 828
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

829
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
830
					&sum, seg_type, NULL, false);
831
	set_data_blkaddr(dn);
832

833
	/* update i_size */
834
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
835
							dn->ofs_in_node;
836
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
837
		f2fs_i_size_write(dn->inode,
838
				((loff_t)(fofs + 1) << PAGE_SHIFT));
839 840 841
	return 0;
}

842
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
843
{
844
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
845
	struct f2fs_map_blocks map;
846
	int flag;
847
	int err = 0;
848
	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
849

850
	/* convert inline data for Direct I/O*/
851
	if (direct_io) {
852 853 854 855 856
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
	}

857 858 859
	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

860
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
861 862 863 864 865 866
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

867
	map.m_next_pgofs = NULL;
868
	map.m_next_extent = NULL;
869
	map.m_seg_type = NO_CHECK_TYPE;
870

871
	if (direct_io) {
872
		map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint);
H
Hyunchul Lee 已提交
873
		flag = f2fs_force_buffered_io(inode, WRITE) ?
874 875 876
					F2FS_GET_BLOCK_PRE_AIO :
					F2FS_GET_BLOCK_PRE_DIO;
		goto map_blocks;
877
	}
C
Chao Yu 已提交
878
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
879 880 881
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
882
	}
883
	if (f2fs_has_inline_data(inode))
884
		return err;
885 886 887 888 889 890 891 892 893

	flag = F2FS_GET_BLOCK_PRE_AIO;

map_blocks:
	err = f2fs_map_blocks(inode, &map, 1, flag);
	if (map.m_len > 0 && err == -ENOSPC) {
		if (!direct_io)
			set_inode_flag(inode, FI_NO_PREALLOC);
		err = 0;
894
	}
895
	return err;
896 897
}

898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
		if (lock)
			down_read(&sbi->node_change);
		else
			up_read(&sbi->node_change);
	} else {
		if (lock)
			f2fs_lock_op(sbi);
		else
			f2fs_unlock_op(sbi);
	}
}

J
Jaegeuk Kim 已提交
913
/*
J
Jaegeuk Kim 已提交
914 915
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
916 917 918 919 920
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
921
 */
C
Chao Yu 已提交
922
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
923
						int create, int flag)
924
{
J
Jaegeuk Kim 已提交
925
	unsigned int maxblocks = map->m_len;
926
	struct dnode_of_data dn;
927
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
928
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
929
	pgoff_t pgofs, end_offset, end;
930
	int err = 0, ofs = 1;
931 932
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
933
	struct extent_info ei = {0,0,0};
934
	block_t blkaddr;
935
	unsigned int start_pgofs;
936

937 938 939
	if (!maxblocks)
		return 0;

J
Jaegeuk Kim 已提交
940 941 942 943 944
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
945
	end = pgofs + maxblocks;
946

947
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
948 949 950
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
951 952
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + map->m_len;
953
		goto out;
954
	}
955

C
Chao Yu 已提交
956
next_dnode:
957
	if (create)
958
		__do_map_lock(sbi, flag, true);
959 960 961

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
962
	err = get_dnode_of_data(&dn, pgofs, mode);
963
	if (err) {
C
Chao Yu 已提交
964 965
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
966
		if (err == -ENOENT) {
967
			err = 0;
968 969 970
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
971 972 973
			if (map->m_next_extent)
				*map->m_next_extent =
					get_next_page_offset(&dn, pgofs);
974
		}
975
		goto unlock_out;
976
	}
C
Chao Yu 已提交
977

978
	start_pgofs = pgofs;
979
	prealloc = 0;
980
	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
981
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
982 983

next_block:
984
	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
C
Chao Yu 已提交
985 986

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
987
		if (create) {
988 989
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
990
				goto sync_out;
991
			}
992
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
993 994 995 996
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
997
			} else {
998 999
				err = __allocate_data_block(&dn,
							map->m_seg_type);
1000
				if (!err)
1001
					set_inode_flag(inode, FI_APPEND_WRITE);
1002
			}
C
Chao Yu 已提交
1003
			if (err)
C
Chao Yu 已提交
1004
				goto sync_out;
1005
			map->m_flags |= F2FS_MAP_NEW;
C
Chao Yu 已提交
1006
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
1007
		} else {
C
Chao Yu 已提交
1008 1009 1010 1011
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
1012 1013
			if (flag == F2FS_GET_BLOCK_PRECACHE)
				goto sync_out;
1014 1015 1016 1017
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
C
Chao Yu 已提交
1018
				goto sync_out;
1019
			}
1020 1021 1022 1023
			if (flag != F2FS_GET_BLOCK_FIEMAP) {
				/* for defragment case */
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
C
Chao Yu 已提交
1024
				goto sync_out;
1025
			}
C
Chao Yu 已提交
1026 1027
		}
	}
1028

1029 1030 1031
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
1042
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1043
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
1044 1045 1046 1047 1048
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
1049

1050
skip:
1051 1052 1053
	dn.ofs_in_node++;
	pgofs++;

1054 1055 1056
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
1057

1058 1059 1060 1061
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
1062

1063 1064 1065 1066
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
1067
		}
1068 1069 1070 1071 1072 1073 1074 1075
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
	}

1086 1087 1088
	f2fs_put_dnode(&dn);

	if (create) {
1089
		__do_map_lock(sbi, flag, false);
1090
		f2fs_balance_fs(sbi, dn.node_changed);
1091
	}
1092
	goto next_dnode;
1093

1094
sync_out:
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + 1;
	}
1106
	f2fs_put_dnode(&dn);
1107
unlock_out:
1108
	if (create) {
1109
		__do_map_lock(sbi, flag, false);
1110
		f2fs_balance_fs(sbi, dn.node_changed);
1111
	}
1112
out:
J
Jaegeuk Kim 已提交
1113
	trace_f2fs_map_blocks(inode, map, err);
1114
	return err;
1115 1116
}

H
Hyunchul Lee 已提交
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
{
	struct f2fs_map_blocks map;
	block_t last_lblk;
	int err;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
	map.m_next_pgofs = NULL;
	map.m_next_extent = NULL;
	map.m_seg_type = NO_CHECK_TYPE;
	last_lblk = F2FS_BLK_ALIGN(pos + len);

	while (map.m_lblk < last_lblk) {
		map.m_len = last_lblk - map.m_lblk;
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
		if (err || map.m_len == 0)
			return false;
		map.m_lblk += map.m_len;
	}
	return true;
}

J
Jaegeuk Kim 已提交
1142
static int __get_data_block(struct inode *inode, sector_t iblock,
1143
			struct buffer_head *bh, int create, int flag,
1144
			pgoff_t *next_pgofs, int seg_type)
J
Jaegeuk Kim 已提交
1145 1146
{
	struct f2fs_map_blocks map;
1147
	int err;
J
Jaegeuk Kim 已提交
1148 1149 1150

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
1151
	map.m_next_pgofs = next_pgofs;
1152
	map.m_next_extent = NULL;
1153
	map.m_seg_type = seg_type;
J
Jaegeuk Kim 已提交
1154

1155 1156
	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
J
Jaegeuk Kim 已提交
1157 1158
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1159
		bh->b_size = (u64)map.m_len << inode->i_blkbits;
J
Jaegeuk Kim 已提交
1160
	}
1161
	return err;
J
Jaegeuk Kim 已提交
1162 1163
}

1164
static int get_data_block(struct inode *inode, sector_t iblock,
1165 1166
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
1167
{
1168
	return __get_data_block(inode, iblock, bh_result, create,
1169 1170
							flag, next_pgofs,
							NO_CHECK_TYPE);
C
Chao Yu 已提交
1171 1172 1173
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
1174 1175
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
1176
	return __get_data_block(inode, iblock, bh_result, create,
1177 1178 1179
						F2FS_GET_BLOCK_DEFAULT, NULL,
						rw_hint_to_seg_type(
							inode->i_write_hint));
1180 1181
}

C
Chao Yu 已提交
1182
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1183 1184
			struct buffer_head *bh_result, int create)
{
1185
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
1186
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1187 1188
		return -EFBIG;

C
Chao Yu 已提交
1189
	return __get_data_block(inode, iblock, bh_result, create,
1190 1191
						F2FS_GET_BLOCK_BMAP, NULL,
						NO_CHECK_TYPE);
1192 1193
}

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

C
Chao Yu 已提交
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
static int f2fs_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page;
	struct node_info ni;
	__u64 phys = 0, len;
	__u32 flags;
	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
	int err = 0;

	if (f2fs_has_inline_xattr(inode)) {
		int offset;

		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
						inode->i_ino, false);
		if (!page)
			return -ENOMEM;

		get_node_info(sbi, inode->i_ino, &ni);

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		offset = offsetof(struct f2fs_inode, i_addr) +
					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1228
					get_inline_xattr_addrs(inode));
C
Chao Yu 已提交
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265

		phys += offset;
		len = inline_xattr_size(inode);

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;

		if (!xnid)
			flags |= FIEMAP_EXTENT_LAST;

		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
		if (err || err == 1)
			return err;
	}

	if (xnid) {
		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
		if (!page)
			return -ENOMEM;

		get_node_info(sbi, xnid, &ni);

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		len = inode->i_sb->s_blocksize;

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_LAST;
	}

	if (phys)
		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);

	return (err < 0 ? err : 0);
}

J
Jaegeuk Kim 已提交
1266 1267 1268
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
1269 1270
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
1271
	pgoff_t next_pgofs;
1272 1273 1274 1275
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

1276 1277 1278 1279 1280 1281
	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
		ret = f2fs_precache_extents(inode);
		if (ret)
			return ret;
	}

C
Chao Yu 已提交
1282
	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
1283 1284 1285
	if (ret)
		return ret;

1286 1287
	inode_lock(inode);

C
Chao Yu 已提交
1288 1289 1290 1291 1292
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		ret = f2fs_xattr_fiemap(inode, fieinfo);
		goto out;
	}

J
Jaegeuk Kim 已提交
1293 1294 1295
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
1296
			goto out;
J
Jaegeuk Kim 已提交
1297 1298
	}

1299 1300 1301 1302 1303
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
1304

1305 1306 1307 1308
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
1309
	ret = get_data_block(inode, start_blk, &map_bh, 0,
1310
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1311 1312 1313 1314 1315
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
1316
		start_blk = next_pgofs;
1317 1318 1319

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
1320
			goto prep_next;
1321

1322 1323
		flags |= FIEMAP_EXTENT_LAST;
	}
1324

1325 1326 1327 1328
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

1329 1330
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
1331
	}
1332

1333 1334
	if (start_blk > last_blk || ret)
		goto out;
1335

1336 1337 1338 1339 1340 1341
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
1342

1343
	start_blk += logical_to_blk(inode, size);
1344

1345
prep_next:
1346 1347 1348 1349 1350 1351 1352 1353 1354
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
1355
	inode_unlock(inode);
1356
	return ret;
J
Jaegeuk Kim 已提交
1357 1358
}

J
Jaegeuk Kim 已提交
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1382
	map.m_next_pgofs = NULL;
1383
	map.m_next_extent = NULL;
1384
	map.m_seg_type = NO_CHECK_TYPE;
J
Jaegeuk Kim 已提交
1385

L
LiFan 已提交
1386
	for (; nr_pages; nr_pages--) {
J
Jaegeuk Kim 已提交
1387
		if (pages) {
1388
			page = list_last_entry(pages, struct page, lru);
1389 1390

			prefetchw(&page->flags);
J
Jaegeuk Kim 已提交
1391 1392
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1393 1394
						  page->index,
						  readahead_gfp_mask(mapping)))
J
Jaegeuk Kim 已提交
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1423
			if (f2fs_map_blocks(inode, &map, 0,
1424
						F2FS_GET_BLOCK_DEFAULT))
J
Jaegeuk Kim 已提交
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1437
			zero_user_segment(page, 0, PAGE_SIZE);
1438 1439
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
1440 1441 1442 1443 1444 1445 1446 1447
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
J
Jaegeuk Kim 已提交
1448 1449
		if (bio && (last_block_in_bio != block_nr - 1 ||
			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
J
Jaegeuk Kim 已提交
1450
submit_and_realloc:
1451
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1452 1453 1454
			bio = NULL;
		}
		if (bio == NULL) {
1455
			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
J
Jaegeuk Kim 已提交
1456 1457
			if (IS_ERR(bio)) {
				bio = NULL;
J
Jaegeuk Kim 已提交
1458
				goto set_error_page;
1459
			}
J
Jaegeuk Kim 已提交
1460 1461 1462 1463 1464 1465 1466 1467 1468
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1469
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1470 1471 1472 1473
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1474
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1475 1476 1477 1478 1479
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1480
			put_page(page);
J
Jaegeuk Kim 已提交
1481 1482 1483
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1484
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1485 1486 1487
	return 0;
}

1488 1489
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1490
	struct inode *inode = page->mapping->host;
1491
	int ret = -EAGAIN;
H
Huajun Li 已提交
1492

1493 1494
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1495
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1496 1497
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1498
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1499
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1500
	return ret;
1501 1502 1503 1504 1505 1506
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
1507
	struct inode *inode = mapping->host;
1508
	struct page *page = list_last_entry(pages, struct page, lru);
1509 1510

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1511 1512 1513 1514 1515

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1516
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1517 1518
}

1519 1520 1521 1522 1523
static int encrypt_one_page(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;
	gfp_t gfp_flags = GFP_NOFS;

1524
	if (!f2fs_encrypted_file(inode))
1525 1526 1527
		return 0;

	/* wait for GCed encrypted page writeback */
1528
	f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
1529 1530 1531 1532 1533 1534 1535 1536 1537

retry_encrypt:
	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
			PAGE_SIZE, 0, fio->page->index, gfp_flags);
	if (!IS_ERR(fio->encrypted_page))
		return 0;

	/* flush pending IOs and wait for a while in the ENOMEM case */
	if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1538
		f2fs_flush_merged_writes(fio->sbi);
1539 1540 1541 1542 1543 1544 1545
		congestion_wait(BLK_RW_ASYNC, HZ/50);
		gfp_flags |= __GFP_NOFAIL;
		goto retry_encrypt;
	}
	return PTR_ERR(fio->encrypted_page);
}

C
Chao Yu 已提交
1546 1547
static inline bool check_inplace_update_policy(struct inode *inode,
				struct f2fs_io_info *fio)
1548
{
C
Chao Yu 已提交
1549 1550
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	unsigned int policy = SM_I(sbi)->ipu_policy;
1551

C
Chao Yu 已提交
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
	if (policy & (0x1 << F2FS_IPU_FORCE))
		return true;
	if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
		return true;
	if (policy & (0x1 << F2FS_IPU_UTIL) &&
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;
	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;

	/*
	 * IPU for rewrite async pages
	 */
	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
			fio && fio->op == REQ_OP_WRITE &&
			!(fio->op_flags & REQ_SYNC) &&
			!f2fs_encrypted_inode(inode))
		return true;

	/* this is only set during fdatasync */
	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
			is_inode_flag_set(inode, FI_NEED_IPU))
		return true;

	return false;
}

bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
{
1582 1583
	if (f2fs_is_pinned_file(inode))
		return true;
C
Chao Yu 已提交
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610

	/* if this is cold file, we should overwrite to avoid fragmentation */
	if (file_is_cold(inode))
		return true;

	return check_inplace_update_policy(inode, fio);
}

bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	if (test_opt(sbi, LFS))
		return true;
	if (S_ISDIR(inode->i_mode))
		return true;
	if (f2fs_is_atomic_file(inode))
		return true;
	if (fio) {
		if (is_cold_data(fio->page))
			return true;
		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
			return true;
	}
	return false;
}

1611 1612 1613 1614
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;

C
Chao Yu 已提交
1615
	if (should_update_outplace(inode, fio))
1616 1617
		return false;

C
Chao Yu 已提交
1618
	return should_update_inplace(inode, fio);
1619 1620
}

1621 1622 1623 1624 1625 1626 1627 1628 1629
static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
{
	if (fio->old_blkaddr == NEW_ADDR)
		return false;
	if (fio->old_blkaddr == NULL_ADDR)
		return false;
	return true;
}

1630
int do_write_data_page(struct f2fs_io_info *fio)
1631
{
1632
	struct page *page = fio->page;
1633 1634
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
1635 1636
	struct extent_info ei = {0,0,0};
	bool ipu_force = false;
1637 1638 1639
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1640 1641 1642
	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1643 1644

		if (valid_ipu_blkaddr(fio)) {
1645
			ipu_force = true;
1646
			fio->need_lock = LOCK_DONE;
1647 1648 1649
			goto got_it;
		}
	}
1650

1651 1652 1653
	/* Deadlock due to between page->lock and f2fs_lock_op */
	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
		return -EAGAIN;
1654

1655
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1656
	if (err)
1657
		goto out;
1658

1659
	fio->old_blkaddr = dn.data_blkaddr;
1660 1661

	/* This page is already truncated */
1662
	if (fio->old_blkaddr == NULL_ADDR) {
1663
		ClearPageUptodate(page);
1664
		goto out_writepage;
1665
	}
1666
got_it:
1667 1668 1669 1670
	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1671
	if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
1672 1673 1674 1675 1676
		err = encrypt_one_page(fio);
		if (err)
			goto out_writepage;

		set_page_writeback(page);
1677
		f2fs_put_dnode(&dn);
1678
		if (fio->need_lock == LOCK_REQ)
1679
			f2fs_unlock_op(fio->sbi);
1680
		err = rewrite_data_page(fio);
1681
		trace_f2fs_do_write_data_page(fio->page, IPU);
1682
		set_inode_flag(inode, FI_UPDATE_WRITE);
1683
		return err;
1684
	}
1685

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
	if (fio->need_lock == LOCK_RETRY) {
		if (!f2fs_trylock_op(fio->sbi)) {
			err = -EAGAIN;
			goto out_writepage;
		}
		fio->need_lock = LOCK_REQ;
	}

	err = encrypt_one_page(fio);
	if (err)
		goto out_writepage;

	set_page_writeback(page);

1700 1701 1702 1703 1704 1705
	/* LFS mode write path */
	write_data_page(&dn, fio);
	trace_f2fs_do_write_data_page(page, OPU);
	set_inode_flag(inode, FI_APPEND_WRITE);
	if (page->index == 0)
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1706 1707
out_writepage:
	f2fs_put_dnode(&dn);
1708
out:
1709
	if (fio->need_lock == LOCK_REQ)
1710
		f2fs_unlock_op(fio->sbi);
1711 1712 1713
	return err;
}

1714
static int __write_data_page(struct page *page, bool *submitted,
C
Chao Yu 已提交
1715 1716
				struct writeback_control *wbc,
				enum iostat_type io_type)
1717 1718
{
	struct inode *inode = page->mapping->host;
1719
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1720 1721
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1722
							>> PAGE_SHIFT;
1723
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
H
Huajun Li 已提交
1724
	unsigned offset = 0;
1725
	bool need_balance_fs = false;
1726
	int err = 0;
J
Jaegeuk Kim 已提交
1727
	struct f2fs_io_info fio = {
1728
		.sbi = sbi,
C
Chao Yu 已提交
1729
		.ino = inode->i_ino,
J
Jaegeuk Kim 已提交
1730
		.type = DATA,
M
Mike Christie 已提交
1731
		.op = REQ_OP_WRITE,
J
Jens Axboe 已提交
1732
		.op_flags = wbc_to_write_flags(wbc),
1733
		.old_blkaddr = NULL_ADDR,
1734
		.page = page,
1735
		.encrypted_page = NULL,
1736
		.submitted = false,
1737
		.need_lock = LOCK_RETRY,
C
Chao Yu 已提交
1738
		.io_type = io_type,
1739
		.io_wbc = wbc,
J
Jaegeuk Kim 已提交
1740
	};
1741

1742 1743
	trace_f2fs_writepage(page, DATA);

1744 1745 1746 1747 1748 1749
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		mapping_set_error(page->mapping, -EIO);
		goto out;
	}

1750 1751 1752
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;

1753
	if (page->index < end_index)
1754
		goto write;
1755 1756 1757 1758 1759

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1760
	offset = i_size & (PAGE_SIZE - 1);
1761
	if ((page->index >= end_index + 1) || !offset)
1762
		goto out;
1763

1764
	zero_user_segment(page, offset, PAGE_SIZE);
1765
write:
1766 1767
	if (f2fs_is_drop_cache(inode))
		goto out;
1768 1769 1770 1771
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1772
		goto redirty_out;
1773

1774
	/* Dentry blocks are controlled by checkpoint */
1775
	if (S_ISDIR(inode->i_mode)) {
1776
		fio.need_lock = LOCK_DONE;
1777
		err = do_write_data_page(&fio);
1778 1779
		goto done;
	}
H
Huajun Li 已提交
1780

1781
	if (!wbc->for_reclaim)
1782
		need_balance_fs = true;
1783
	else if (has_not_enough_free_secs(sbi, 0, 0))
1784
		goto redirty_out;
1785 1786
	else
		set_inode_flag(inode, FI_HOT_DATA);
1787

1788
	err = -EAGAIN;
1789
	if (f2fs_has_inline_data(inode)) {
1790
		err = f2fs_write_inline_data(inode, page);
1791 1792 1793
		if (!err)
			goto out;
	}
1794

1795
	if (err == -EAGAIN) {
1796
		err = do_write_data_page(&fio);
1797 1798 1799 1800 1801
		if (err == -EAGAIN) {
			fio.need_lock = LOCK_REQ;
			err = do_write_data_page(&fio);
		}
	}
1802

1803 1804 1805 1806 1807 1808 1809 1810
	if (err) {
		file_set_keep_isize(inode);
	} else {
		down_write(&F2FS_I(inode)->i_sem);
		if (F2FS_I(inode)->last_disk_size < psize)
			F2FS_I(inode)->last_disk_size = psize;
		up_write(&F2FS_I(inode)->i_sem);
	}
1811

1812 1813 1814
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1815

1816
out:
1817
	inode_dec_dirty_pages(inode);
1818 1819
	if (err)
		ClearPageUptodate(page);
1820 1821

	if (wbc->for_reclaim) {
1822
		f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
1823
		clear_inode_flag(inode, FI_HOT_DATA);
1824
		remove_dirty_inode(inode);
1825
		submitted = NULL;
1826 1827
	}

1828
	unlock_page(page);
J
Jaegeuk Kim 已提交
1829 1830
	if (!S_ISDIR(inode->i_mode))
		f2fs_balance_fs(sbi, need_balance_fs);
1831

1832
	if (unlikely(f2fs_cp_error(sbi))) {
1833
		f2fs_submit_merged_write(sbi, DATA);
1834 1835 1836 1837 1838
		submitted = NULL;
	}

	if (submitted)
		*submitted = fio.submitted;
1839

1840 1841 1842
	return 0;

redirty_out:
1843
	redirty_page_for_writepage(wbc, page);
1844 1845
	if (!err)
		return AOP_WRITEPAGE_ACTIVATE;
J
Jaegeuk Kim 已提交
1846 1847
	unlock_page(page);
	return err;
1848 1849
}

1850 1851 1852
static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
C
Chao Yu 已提交
1853
	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
1854 1855
}

C
Chao Yu 已提交
1856 1857 1858 1859 1860 1861
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
C
Chao Yu 已提交
1862 1863
					struct writeback_control *wbc,
					enum iostat_type io_type)
C
Chao Yu 已提交
1864 1865 1866 1867 1868 1869 1870 1871 1872
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
1873
	pgoff_t last_idx = ULONG_MAX;
C
Chao Yu 已提交
1874 1875 1876 1877
	int cycled;
	int range_whole = 0;
	int tag;

1878
	pagevec_init(&pvec);
1879

1880 1881 1882 1883 1884 1885
	if (get_dirty_pages(mapping->host) <=
				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
		set_inode_flag(mapping->host, FI_HOT_DATA);
	else
		clear_inode_flag(mapping->host, FI_HOT_DATA);

C
Chao Yu 已提交
1886 1887 1888 1889 1890 1891 1892 1893 1894
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1895 1896
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

J
Jan Kara 已提交
1912
		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
1913
				tag);
C
Chao Yu 已提交
1914 1915 1916 1917 1918
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
1919
			bool submitted = false;
C
Chao Yu 已提交
1920 1921

			done_index = page->index;
1922
retry_write:
C
Chao Yu 已提交
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1938 1939
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1940 1941 1942 1943 1944 1945 1946 1947
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

C
Chao Yu 已提交
1948
			ret = __write_data_page(page, &submitted, wbc, io_type);
C
Chao Yu 已提交
1949
			if (unlikely(ret)) {
1950 1951 1952 1953 1954 1955 1956 1957
				/*
				 * keep nr_to_write, since vfs uses this to
				 * get # of written pages.
				 */
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
					continue;
1958 1959 1960 1961 1962 1963 1964 1965 1966
				} else if (ret == -EAGAIN) {
					ret = 0;
					if (wbc->sync_mode == WB_SYNC_ALL) {
						cond_resched();
						congestion_wait(BLK_RW_ASYNC,
									HZ/50);
						goto retry_write;
					}
					continue;
1967
				}
J
Jaegeuk Kim 已提交
1968 1969 1970
				done_index = page->index + 1;
				done = 1;
				break;
1971
			} else if (submitted) {
1972
				last_idx = page->index;
C
Chao Yu 已提交
1973 1974
			}

1975 1976 1977 1978
			/* give a priority to WB_SYNC threads */
			if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
					--wbc->nr_to_write <= 0) &&
					wbc->sync_mode == WB_SYNC_NONE) {
C
Chao Yu 已提交
1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

1996
	if (last_idx != ULONG_MAX)
1997 1998
		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
						0, last_idx, DATA);
C
Chao Yu 已提交
1999

C
Chao Yu 已提交
2000 2001 2002
	return ret;
}

C
Chao Yu 已提交
2003 2004 2005
int __f2fs_write_data_pages(struct address_space *mapping,
						struct writeback_control *wbc,
						enum iostat_type io_type)
2006 2007
{
	struct inode *inode = mapping->host;
2008
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2009
	struct blk_plug plug;
2010 2011
	int ret;

P
P J P 已提交
2012 2013 2014 2015
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

2016 2017 2018 2019
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

2020 2021 2022 2023
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

2024 2025 2026 2027 2028
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
2029
	/* skip writing during file defragment */
2030
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
C
Chao Yu 已提交
2031 2032
		goto skip_write;

Y
Yunlei He 已提交
2033 2034
	trace_f2fs_writepages(mapping->host, wbc, DATA);

2035 2036 2037 2038 2039 2040
	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_inc(&sbi->wb_sync_req);
	else if (atomic_read(&sbi->wb_sync_req))
		goto skip_write;

2041
	blk_start_plug(&plug);
C
Chao Yu 已提交
2042
	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
2043
	blk_finish_plug(&plug);
2044 2045 2046

	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_dec(&sbi->wb_sync_req);
2047 2048 2049 2050
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
J
Jaegeuk Kim 已提交
2051

2052
	remove_dirty_inode(inode);
2053
	return ret;
2054 2055

skip_write:
2056
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
2057
	trace_f2fs_writepages(mapping->host, wbc, DATA);
2058
	return 0;
2059 2060
}

C
Chao Yu 已提交
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
static int f2fs_write_data_pages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;

	return __f2fs_write_data_pages(mapping, wbc,
			F2FS_I(inode)->cp_task == current ?
			FS_CP_DATA_IO : FS_DATA_IO);
}

2071 2072 2073
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
2074
	loff_t i_size = i_size_read(inode);
2075

J
Jaegeuk Kim 已提交
2076
	if (to > i_size) {
2077
		down_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
2078 2079
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
2080
		up_write(&F2FS_I(inode)->i_mmap_sem);
2081 2082 2083
	}
}

2084 2085 2086 2087 2088 2089 2090 2091
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
2092
	bool locked = false;
2093
	struct extent_info ei = {0,0,0};
2094 2095
	int err = 0;

2096 2097 2098 2099
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
2100 2101
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
			!is_inode_flag_set(inode, FI_NO_PREALLOC))
2102 2103
		return 0;

2104
	if (f2fs_has_inline_data(inode) ||
2105
			(pos & PAGE_MASK) >= i_size_read(inode)) {
2106
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
2107 2108 2109
		locked = true;
	}
restart:
2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
C
Chao Yu 已提交
2120
		if (pos + len <= MAX_INLINE_DATA(inode)) {
2121
			read_inline_data(page, ipage);
2122
			set_inode_flag(inode, FI_DATA_EXIST);
2123 2124
			if (inode->i_nlink)
				set_inline_node(ipage);
2125 2126 2127
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
2140
			if (err || dn.data_blkaddr == NULL_ADDR) {
2141
				f2fs_put_dnode(&dn);
2142 2143
				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
								true);
2144 2145 2146
				locked = true;
				goto restart;
			}
2147 2148
		}
	}
2149

2150 2151 2152
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
2153
out:
2154 2155
	f2fs_put_dnode(&dn);
unlock_out:
2156
	if (locked)
2157
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
2158 2159 2160
	return err;
}

2161 2162 2163 2164 2165
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
2166
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2167
	struct page *page = NULL;
2168
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2169
	bool need_balance = false, drop_atomic = false;
2170
	block_t blkaddr = NULL_ADDR;
2171 2172
	int err = 0;

2173 2174
	trace_f2fs_write_begin(inode, pos, len, flags);

J
Jaegeuk Kim 已提交
2175 2176 2177
	if (f2fs_is_atomic_file(inode) &&
			!available_free_memory(sbi, INMEM_PAGES)) {
		err = -ENOMEM;
2178
		drop_atomic = true;
J
Jaegeuk Kim 已提交
2179 2180 2181
		goto fail;
	}

2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
2192
repeat:
2193 2194 2195 2196
	/*
	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
	 * wait_for_stable_page. Will wait that below with our IO control.
	 */
C
Chao Yu 已提交
2197
	page = f2fs_pagecache_get_page(mapping, index,
2198
				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
2199 2200 2201 2202
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
2203

2204 2205
	*pagep = page;

2206 2207
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
2208
	if (err)
2209
		goto fail;
2210

2211
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
2212
		unlock_page(page);
J
Jaegeuk Kim 已提交
2213
		f2fs_balance_fs(sbi, true);
2214 2215 2216 2217 2218 2219 2220 2221
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

2222
	f2fs_wait_on_page_writeback(page, DATA, false);
2223

2224
	/* wait for GCed encrypted page writeback */
2225
	if (f2fs_encrypted_file(inode))
2226
		f2fs_wait_on_block_writeback(sbi, blkaddr);
2227

2228 2229
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
2230

2231 2232 2233 2234 2235
	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
		zero_user_segment(page, len, PAGE_SIZE);
		return 0;
	}

2236
	if (blkaddr == NEW_ADDR) {
2237
		zero_user_segment(page, 0, PAGE_SIZE);
2238
		SetPageUptodate(page);
2239
	} else {
2240 2241
		err = f2fs_submit_page_read(inode, page, blkaddr);
		if (err)
2242
			goto fail;
2243

2244
		lock_page(page);
2245
		if (unlikely(page->mapping != mapping)) {
2246 2247
			f2fs_put_page(page, 1);
			goto repeat;
2248
		}
2249 2250 2251
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
2252
		}
2253 2254
	}
	return 0;
2255

2256
fail:
2257
	f2fs_put_page(page, 1);
2258
	f2fs_write_failed(mapping, pos + len);
2259
	if (drop_atomic)
J
Jaegeuk Kim 已提交
2260
		drop_inmem_pages_all(sbi);
2261
	return err;
2262 2263
}

2264 2265 2266 2267 2268 2269 2270
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

2271 2272
	trace_f2fs_write_end(inode, pos, len, copied);

2273 2274 2275 2276 2277 2278
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
2279
		if (unlikely(copied != len))
2280 2281 2282 2283 2284 2285 2286
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

2287
	set_page_dirty(page);
2288

2289 2290
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
2291
unlock_out:
2292
	f2fs_put_page(page, 1);
2293
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2294 2295 2296
	return copied;
}

2297 2298
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
2299 2300 2301 2302 2303 2304
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
2305 2306 2307
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

2308 2309 2310
	return 0;
}

2311
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2312
{
2313
	struct address_space *mapping = iocb->ki_filp->f_mapping;
2314
	struct inode *inode = mapping->host;
2315
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2316
	size_t count = iov_iter_count(iter);
2317
	loff_t offset = iocb->ki_pos;
2318
	int rw = iov_iter_rw(iter);
2319
	int err;
2320
	enum rw_hint hint = iocb->ki_hint;
2321
	int whint_mode = F2FS_OPTION(sbi).whint_mode;
2322

2323
	err = check_direct_IO(inode, iter, offset);
2324 2325
	if (err)
		return err;
H
Huajun Li 已提交
2326

H
Hyunchul Lee 已提交
2327
	if (f2fs_force_buffered_io(inode, rw))
2328
		return 0;
2329

2330
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2331

2332 2333 2334
	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
		iocb->ki_hint = WRITE_LIFE_NOT_SET;

H
Hyunchul Lee 已提交
2335 2336 2337 2338 2339 2340 2341 2342 2343
	if (!down_read_trylock(&F2FS_I(inode)->dio_rwsem[rw])) {
		if (iocb->ki_flags & IOCB_NOWAIT) {
			iocb->ki_hint = hint;
			err = -EAGAIN;
			goto out;
		}
		down_read(&F2FS_I(inode)->dio_rwsem[rw]);
	}

2344
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
2345 2346 2347
	up_read(&F2FS_I(inode)->dio_rwsem[rw]);

	if (rw == WRITE) {
2348 2349
		if (whint_mode == WHINT_MODE_OFF)
			iocb->ki_hint = hint;
C
Chao Yu 已提交
2350 2351 2352
		if (err > 0) {
			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
									err);
2353
			set_inode_flag(inode, FI_UPDATE_WRITE);
C
Chao Yu 已提交
2354
		} else if (err < 0) {
2355
			f2fs_write_failed(mapping, offset + count);
C
Chao Yu 已提交
2356
		}
2357
	}
2358

H
Hyunchul Lee 已提交
2359
out:
2360
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2361

2362
	return err;
2363 2364
}

2365 2366
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
2367 2368
{
	struct inode *inode = page->mapping->host;
2369
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2370

2371
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2372
		(offset % PAGE_SIZE || length != PAGE_SIZE))
2373 2374
		return;

2375
	if (PageDirty(page)) {
2376
		if (inode->i_ino == F2FS_META_INO(sbi)) {
2377
			dec_page_count(sbi, F2FS_DIRTY_META);
2378
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2379
			dec_page_count(sbi, F2FS_DIRTY_NODES);
2380
		} else {
2381
			inode_dec_dirty_pages(inode);
2382 2383
			remove_dirty_inode(inode);
		}
2384
	}
C
Chao Yu 已提交
2385 2386 2387

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
2388
		return drop_inmem_page(inode, page);
C
Chao Yu 已提交
2389

2390
	set_page_private(page, 0);
2391 2392 2393
	ClearPagePrivate(page);
}

2394
int f2fs_release_page(struct page *page, gfp_t wait)
2395
{
2396 2397 2398 2399
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
2400 2401 2402 2403
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

2404
	set_page_private(page, 0);
2405
	ClearPagePrivate(page);
2406
	return 1;
2407 2408
}

2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
/*
 * This was copied from __set_page_dirty_buffers which gives higher performance
 * in very high speed storages. (e.g., pmem)
 */
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
	struct address_space *mapping = page->mapping;
	unsigned long flags;

	if (unlikely(!mapping))
		return;

	spin_lock(&mapping->private_lock);
	lock_page_memcg(page);
	SetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	spin_lock_irqsave(&mapping->tree_lock, flags);
	WARN_ON_ONCE(!PageUptodate(page));
	account_page_dirtied(page, mapping);
	radix_tree_tag_set(&mapping->page_tree,
			page_index(page), PAGECACHE_TAG_DIRTY);
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
	unlock_page_memcg(page);

	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	return;
}

2438 2439 2440 2441 2442
static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

2443 2444
	trace_f2fs_set_page_dirty(page, DATA);

2445 2446
	if (!PageUptodate(page))
		SetPageUptodate(page);
2447

C
Chao Yu 已提交
2448
	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
C
Chao Yu 已提交
2449 2450 2451 2452 2453 2454 2455 2456 2457
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
2458 2459
	}

2460
	if (!PageDirty(page)) {
2461
		f2fs_set_page_dirty_nobuffers(page);
2462
		update_dirty_page(inode, page);
2463 2464 2465 2466 2467
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
2468 2469
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
2470 2471
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
2472 2473 2474 2475 2476 2477 2478
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
2479
	return generic_block_bmap(mapping, block, get_data_block_bmap);
2480 2481
}

2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
2495 2496 2497 2498 2499 2500
	if (atomic_written) {
		if (mode != MIGRATE_SYNC)
			return -EBUSY;
		if (!mutex_trylock(&fi->inmem_lock))
			return -EAGAIN;
	}
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

2532 2533 2534 2535
	if (mode != MIGRATE_SYNC_NO_COPY)
		migrate_page_copy(newpage, page);
	else
		migrate_page_states(newpage, page);
2536 2537 2538 2539 2540

	return MIGRATEPAGE_SUCCESS;
}
#endif

2541 2542 2543 2544 2545 2546
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
2547
	.write_end	= f2fs_write_end,
2548
	.set_page_dirty	= f2fs_set_data_page_dirty,
2549 2550
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2551
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
2552
	.bmap		= f2fs_bmap,
2553 2554 2555
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
2556
};