data.c 57.7 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
22 23
#include <linux/mm.h>
#include <linux/memcontrol.h>
J
Jaegeuk Kim 已提交
24
#include <linux/cleancache.h>
25
#include <linux/sched/signal.h>
26 27 28 29

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
30
#include "trace.h"
31
#include <trace/events/f2fs.h>
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
static bool __is_cp_guaranteed(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct f2fs_sb_info *sbi;

	if (!mapping)
		return false;

	inode = mapping->host;
	sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_META_INO(sbi) ||
			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
			S_ISDIR(inode->i_mode) ||
			is_cold_data(page))
		return true;
	return false;
}

53
static void f2fs_read_end_io(struct bio *bio)
54
{
55 56
	struct bio_vec *bvec;
	int i;
57

C
Chao Yu 已提交
58
#ifdef CONFIG_F2FS_FAULT_INJECTION
59 60
	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
		f2fs_show_injection_info(FAULT_IO);
61
		bio->bi_status = BLK_STS_IOERR;
62
	}
C
Chao Yu 已提交
63 64
#endif

65
	if (f2fs_bio_encrypted(bio)) {
66
		if (bio->bi_status) {
67
			fscrypt_release_ctx(bio->bi_private);
68
		} else {
69
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
70 71 72 73
			return;
		}
	}

74 75
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
76

77
		if (!bio->bi_status) {
78 79
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
80 81 82 83 84 85 86 87 88
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

89
static void f2fs_write_end_io(struct bio *bio)
90
{
91
	struct f2fs_sb_info *sbi = bio->bi_private;
92 93
	struct bio_vec *bvec;
	int i;
94

95
	bio_for_each_segment_all(bvec, bio, i) {
96
		struct page *page = bvec->bv_page;
97
		enum count_type type = WB_DATA_TYPE(page);
98

99 100 101 102 103 104
		if (IS_DUMMY_WRITTEN_PAGE(page)) {
			set_page_private(page, (unsigned long)NULL);
			ClearPagePrivate(page);
			unlock_page(page);
			mempool_free(page, sbi->write_io_dummy);

105
			if (unlikely(bio->bi_status))
106 107 108 109
				f2fs_stop_checkpoint(sbi, true);
			continue;
		}

110
		fscrypt_pullback_bio_page(&page, true);
111

112
		if (unlikely(bio->bi_status)) {
113
			mapping_set_error(page->mapping, -EIO);
114 115
			if (type == F2FS_WB_CP_DATA)
				f2fs_stop_checkpoint(sbi, true);
116
		}
117 118 119 120

		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
					page->index != nid_of_node(page));

121 122
		dec_page_count(sbi, type);
		clear_cold_data(page);
123
		end_page_writeback(page);
124
	}
125
	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
126
				wq_has_sleeper(&sbi->cp_wait))
127 128 129 130 131
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

J
Jaegeuk Kim 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
/*
 * Return true, if pre_bio's bdev is same as its target device.
 */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	int i;

	for (i = 0; i < sbi->s_ndevs; i++) {
		if (FDEV(i).start_blk <= blk_addr &&
					FDEV(i).end_blk >= blk_addr) {
			blk_addr -= FDEV(i).start_blk;
			bdev = FDEV(i).bdev;
			break;
		}
	}
	if (bio) {
150
		bio_set_dev(bio, bdev);
J
Jaegeuk Kim 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
	}
	return bdev;
}

int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
	int i;

	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
			return i;
	return 0;
}

static bool __same_bdev(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
169 170
	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
J
Jaegeuk Kim 已提交
171 172
}

173 174 175 176
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
177
				struct writeback_control *wbc,
178 179 180 181
				int npages, bool is_read)
{
	struct bio *bio;

182
	bio = f2fs_bio_alloc(sbi, npages, true);
183

J
Jaegeuk Kim 已提交
184
	f2fs_target_device(sbi, blk_addr, bio);
185
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
186
	bio->bi_private = is_read ? NULL : sbi;
187 188
	if (wbc)
		wbc_init_bio(wbc, bio);
189 190 191 192

	return bio;
}

193 194
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
195
{
196
	if (!is_read_io(bio_op(bio))) {
197 198
		unsigned int start;

199
		if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
200
			current->plug && (type == DATA || type == NODE))
J
Jaegeuk Kim 已提交
201
			blk_finish_plug(current->plug);
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230

		if (type != DATA && type != NODE)
			goto submit_io;

		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
		start %= F2FS_IO_SIZE(sbi);

		if (start == 0)
			goto submit_io;

		/* fill dummy pages */
		for (; start < F2FS_IO_SIZE(sbi); start++) {
			struct page *page =
				mempool_alloc(sbi->write_io_dummy,
					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
			f2fs_bug_on(sbi, !page);

			SetPagePrivate(page);
			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
			lock_page(page);
			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
				f2fs_bug_on(sbi, 1);
		}
		/*
		 * In the NODE case, we lose next block address chain. So, we
		 * need to do checkpoint in f2fs_sync_file.
		 */
		if (type == NODE)
			set_sbi_flag(sbi, SBI_NEED_CP);
J
Jaegeuk Kim 已提交
231
	}
232
submit_io:
J
Jaegeuk Kim 已提交
233 234 235 236
	if (is_read_io(bio_op(bio)))
		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
	else
		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
237
	submit_bio(bio);
238 239
}

J
Jaegeuk Kim 已提交
240
static void __submit_merged_bio(struct f2fs_bio_info *io)
241
{
J
Jaegeuk Kim 已提交
242
	struct f2fs_io_info *fio = &io->fio;
243 244 245 246

	if (!io->bio)
		return;

J
Jaegeuk Kim 已提交
247 248
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

M
Mike Christie 已提交
249
	if (is_read_io(fio->op))
J
Jaegeuk Kim 已提交
250
		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
251
	else
J
Jaegeuk Kim 已提交
252
		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
M
Mike Christie 已提交
253

254
	__submit_bio(io->sbi, io->bio, fio->type);
255 256 257
	io->bio = NULL;
}

258 259
static bool __has_merged_page(struct f2fs_bio_info *io,
				struct inode *inode, nid_t ino, pgoff_t idx)
C
Chao Yu 已提交
260 261 262 263 264
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

265
	if (!io->bio)
C
Chao Yu 已提交
266
		return false;
267

268
	if (!inode && !ino)
269
		return true;
C
Chao Yu 已提交
270 271 272

	bio_for_each_segment_all(bvec, io->bio, i) {

273
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
274
			target = bvec->bv_page;
275 276
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
277

278 279 280
		if (idx != target->index)
			continue;

281 282 283
		if (inode && inode == target->mapping->host)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
284 285 286 287 288 289
			return true;
	}

	return false;
}

290
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
291
				nid_t ino, pgoff_t idx, enum page_type type)
292 293
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
294 295 296
	enum temp_type temp;
	struct f2fs_bio_info *io;
	bool ret = false;
297

J
Jaegeuk Kim 已提交
298 299 300 301 302 303
	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
		io = sbi->write_io[btype] + temp;

		down_read(&io->io_rwsem);
		ret = __has_merged_page(io, inode, ino, idx);
		up_read(&io->io_rwsem);
304

J
Jaegeuk Kim 已提交
305 306 307 308
		/* TODO: use HOT temp only for meta pages now. */
		if (ret || btype == META)
			break;
	}
309 310 311
	return ret;
}

312
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
313
				enum page_type type, enum temp_type temp)
314 315
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
316
	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
317

318
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
319 320 321 322

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
M
Mike Christie 已提交
323
		io->fio.op = REQ_OP_WRITE;
324
		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
325
		if (!test_opt(sbi, NOBARRIER))
326
			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
J
Jaegeuk Kim 已提交
327 328
	}
	__submit_merged_bio(io);
329
	up_write(&io->io_rwsem);
330 331
}

J
Jaegeuk Kim 已提交
332 333 334
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, nid_t ino, pgoff_t idx,
				enum page_type type, bool force)
335
{
J
Jaegeuk Kim 已提交
336 337 338 339 340 341 342 343 344 345 346 347 348
	enum temp_type temp;

	if (!force && !has_merged_page(sbi, inode, ino, idx, type))
		return;

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {

		__f2fs_submit_merged_write(sbi, type, temp);

		/* TODO: use HOT temp only for meta pages now. */
		if (type >= META)
			break;
	}
349 350
}

351
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
352
{
J
Jaegeuk Kim 已提交
353
	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
354 355
}

356
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
357
				struct inode *inode, nid_t ino, pgoff_t idx,
358
				enum page_type type)
359
{
J
Jaegeuk Kim 已提交
360
	__submit_merged_write_cond(sbi, inode, ino, idx, type, false);
361 362
}

363
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
364
{
365 366 367
	f2fs_submit_merged_write(sbi, DATA);
	f2fs_submit_merged_write(sbi, NODE);
	f2fs_submit_merged_write(sbi, META);
368 369
}

370 371
/*
 * Fill the locked page with data located in the block address.
372
 * A caller needs to unlock the page on failure.
373
 */
374
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
375 376
{
	struct bio *bio;
377 378
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
379

380
	trace_f2fs_submit_page_bio(page, fio);
381
	f2fs_trace_ios(fio, 0);
382 383

	/* Allocate a new bio */
384 385
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
				1, is_read_io(fio->op));
386

387
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
388 389 390
		bio_put(bio);
		return -EFAULT;
	}
M
Mike Christie 已提交
391
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
392

393
	__submit_bio(fio->sbi, bio, fio->type);
394 395 396

	if (!is_read_io(fio->op))
		inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
397 398 399
	return 0;
}

400
int f2fs_submit_page_write(struct f2fs_io_info *fio)
401
{
402
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
403
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
J
Jaegeuk Kim 已提交
404
	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
405
	struct page *bio_page;
406
	int err = 0;
407

408
	f2fs_bug_on(sbi, is_read_io(fio->op));
409

410 411 412 413 414 415 416 417 418 419 420 421 422
	down_write(&io->io_rwsem);
next:
	if (fio->in_list) {
		spin_lock(&io->io_lock);
		if (list_empty(&io->io_list)) {
			spin_unlock(&io->io_lock);
			goto out_fail;
		}
		fio = list_first_entry(&io->io_list,
						struct f2fs_io_info, list);
		list_del(&fio->list);
		spin_unlock(&io->io_lock);
	}
423

424 425 426
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
427

428 429
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

430 431
	/* set submitted = true as a return value */
	fio->submitted = true;
432

433
	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
434

435
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
436 437
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
J
Jaegeuk Kim 已提交
438
		__submit_merged_bio(io);
439 440
alloc_new:
	if (io->bio == NULL) {
441 442 443
		if ((fio->type == DATA || fio->type == NODE) &&
				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
			err = -EAGAIN;
444
			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
445 446
			goto out_fail;
		}
447
		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
448
						BIO_MAX_PAGES, false);
J
Jaegeuk Kim 已提交
449
		io->fio = *fio;
450 451
	}

J
Jaegeuk Kim 已提交
452
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
J
Jaegeuk Kim 已提交
453
		__submit_merged_bio(io);
454 455 456
		goto alloc_new;
	}

457 458 459
	if (fio->io_wbc)
		wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);

460
	io->last_block_in_bio = fio->new_blkaddr;
461
	f2fs_trace_ios(fio, 0);
462 463 464 465 466

	trace_f2fs_submit_page_write(fio->page, fio);

	if (fio->in_list)
		goto next;
467
out_fail:
468
	up_write(&io->io_rwsem);
469
	return err;
470 471
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
							 unsigned nr_pages)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct fscrypt_ctx *ctx = NULL;
	struct bio *bio;

	if (f2fs_encrypted_file(inode)) {
		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
		if (IS_ERR(ctx))
			return ERR_CAST(ctx);

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_block_writeback(sbi, blkaddr);
	}

488
	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	if (!bio) {
		if (ctx)
			fscrypt_release_ctx(ctx);
		return ERR_PTR(-ENOMEM);
	}
	f2fs_target_device(sbi, blkaddr, bio);
	bio->bi_end_io = f2fs_read_end_io;
	bio->bi_private = ctx;
	bio_set_op_attrs(bio, REQ_OP_READ, 0);

	return bio;
}

/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
							block_t blkaddr)
{
	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}
	__submit_bio(F2FS_I_SB(inode), bio, DATA);
	return 0;
}

519 520 521 522
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;
523 524 525 526
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
527 528 529

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
530
	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
531 532
}

J
Jaegeuk Kim 已提交
533
/*
534 535 536 537 538
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
539
void set_data_blkaddr(struct dnode_of_data *dn)
540
{
541 542 543
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
544
		dn->node_changed = true;
545 546
}

547 548 549 550 551 552 553
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

554 555
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
556
{
557
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
C
Chao Yu 已提交
558
	int err;
559

560 561 562
	if (!count)
		return 0;

563
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
564
		return -EPERM;
C
Chao Yu 已提交
565 566
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
567

568 569 570 571 572 573
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
574 575
		block_t blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
576 577 578 579 580 581 582 583 584
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
585 586 587
	return 0;
}

588 589 590 591 592 593 594 595 596 597 598
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

599 600 601 602 603 604 605 606
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
607

608 609
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
610
	if (err || need_put)
611 612 613 614
		f2fs_put_dnode(dn);
	return err;
}

615
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
616
{
617
	struct extent_info ei  = {0,0,0};
618
	struct inode *inode = dn->inode;
619

620 621 622
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
623
	}
624

625
	return f2fs_reserve_block(dn, index);
626 627
}

628
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
M
Mike Christie 已提交
629
						int op_flags, bool for_write)
630 631 632 633
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
634
	struct extent_info ei = {0,0,0};
635
	int err;
636

637
	page = f2fs_grab_cache_page(mapping, index, for_write);
638 639 640
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
641 642 643 644 645
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

646
	set_new_dnode(&dn, inode, NULL, NULL, 0);
647
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
648 649
	if (err)
		goto put_err;
650 651
	f2fs_put_dnode(&dn);

652
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
653 654
		err = -ENOENT;
		goto put_err;
655
	}
C
Chao Yu 已提交
656
got_it:
657 658
	if (PageUptodate(page)) {
		unlock_page(page);
659
		return page;
660
	}
661

J
Jaegeuk Kim 已提交
662 663 664 665 666 667 668
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
669
		zero_user_segment(page, 0, PAGE_SIZE);
670 671
		if (!PageUptodate(page))
			SetPageUptodate(page);
672
		unlock_page(page);
J
Jaegeuk Kim 已提交
673 674
		return page;
	}
675

676
	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
677
	if (err)
678
		goto put_err;
679
	return page;
680 681 682 683

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
684 685 686 687 688 689 690 691 692 693 694 695
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

696
	page = get_read_data_page(inode, index, 0, false);
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
716 717
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
718 719 720 721
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
722
	page = get_read_data_page(inode, index, 0, for_write);
723 724
	if (IS_ERR(page))
		return page;
725

726
	/* wait for read completion */
727
	lock_page(page);
728
	if (unlikely(page->mapping != mapping)) {
729 730
		f2fs_put_page(page, 1);
		goto repeat;
731
	}
732 733 734 735
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
736 737 738
	return page;
}

J
Jaegeuk Kim 已提交
739
/*
740 741
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
742
 *
C
Chao Yu 已提交
743 744
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
745 746
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
747
 */
748
struct page *get_new_data_page(struct inode *inode,
749
		struct page *ipage, pgoff_t index, bool new_i_size)
750 751 752 753 754
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
755

756
	page = f2fs_grab_cache_page(mapping, index, true);
757 758 759 760 761 762
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
763
		return ERR_PTR(-ENOMEM);
764
	}
765

766
	set_new_dnode(&dn, inode, ipage, NULL, 0);
767
	err = f2fs_reserve_block(&dn, index);
768 769
	if (err) {
		f2fs_put_page(page, 1);
770
		return ERR_PTR(err);
771
	}
772 773
	if (!ipage)
		f2fs_put_dnode(&dn);
774 775

	if (PageUptodate(page))
776
		goto got_it;
777 778

	if (dn.data_blkaddr == NEW_ADDR) {
779
		zero_user_segment(page, 0, PAGE_SIZE);
780 781
		if (!PageUptodate(page))
			SetPageUptodate(page);
782
	} else {
783
		f2fs_put_page(page, 1);
784

785 786 787
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
788
		if (IS_ERR(page))
789
			return page;
790
	}
791
got_it:
C
Chao Yu 已提交
792
	if (new_i_size && i_size_read(inode) <
793
				((loff_t)(index + 1) << PAGE_SHIFT))
794
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
795 796 797
	return page;
}

798
static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
799
{
800
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
801 802
	struct f2fs_summary sum;
	struct node_info ni;
803
	pgoff_t fofs;
804
	blkcnt_t count = 1;
C
Chao Yu 已提交
805
	int err;
806

807
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
808
		return -EPERM;
809

810 811
	dn->data_blkaddr = datablock_addr(dn->inode,
				dn->node_page, dn->ofs_in_node);
812 813 814
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

C
Chao Yu 已提交
815 816
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
817

818
alloc:
819 820 821
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

822
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
823
					&sum, seg_type, NULL, false);
824
	set_data_blkaddr(dn);
825

826
	/* update i_size */
827
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
828
							dn->ofs_in_node;
829
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
830
		f2fs_i_size_write(dn->inode,
831
				((loff_t)(fofs + 1) << PAGE_SHIFT));
832 833 834
	return 0;
}

J
Jaegeuk Kim 已提交
835 836
static inline bool __force_buffered_io(struct inode *inode, int rw)
{
837
	return (f2fs_encrypted_file(inode) ||
J
Jaegeuk Kim 已提交
838 839 840 841
			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
			F2FS_I_SB(inode)->s_ndevs);
}

842
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
843
{
844
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
845
	struct f2fs_map_blocks map;
846
	int flag;
847
	int err = 0;
848
	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
849

850
	/* convert inline data for Direct I/O*/
851
	if (direct_io) {
852 853 854 855 856
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
	}

857 858 859
	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

860
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
861 862 863 864 865 866
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

867
	map.m_next_pgofs = NULL;
868
	map.m_next_extent = NULL;
869
	map.m_seg_type = NO_CHECK_TYPE;
870

871
	if (direct_io) {
872
		map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint);
873 874 875 876
		flag = __force_buffered_io(inode, WRITE) ?
					F2FS_GET_BLOCK_PRE_AIO :
					F2FS_GET_BLOCK_PRE_DIO;
		goto map_blocks;
877
	}
C
Chao Yu 已提交
878
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
879 880 881
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
882
	}
883
	if (f2fs_has_inline_data(inode))
884
		return err;
885 886 887 888 889 890 891 892 893

	flag = F2FS_GET_BLOCK_PRE_AIO;

map_blocks:
	err = f2fs_map_blocks(inode, &map, 1, flag);
	if (map.m_len > 0 && err == -ENOSPC) {
		if (!direct_io)
			set_inode_flag(inode, FI_NO_PREALLOC);
		err = 0;
894
	}
895
	return err;
896 897
}

898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
		if (lock)
			down_read(&sbi->node_change);
		else
			up_read(&sbi->node_change);
	} else {
		if (lock)
			f2fs_lock_op(sbi);
		else
			f2fs_unlock_op(sbi);
	}
}

J
Jaegeuk Kim 已提交
913
/*
J
Jaegeuk Kim 已提交
914 915
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
916 917 918 919 920
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
921
 */
C
Chao Yu 已提交
922
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
923
						int create, int flag)
924
{
J
Jaegeuk Kim 已提交
925
	unsigned int maxblocks = map->m_len;
926
	struct dnode_of_data dn;
927
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
928
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
929
	pgoff_t pgofs, end_offset, end;
930
	int err = 0, ofs = 1;
931 932
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
933
	struct extent_info ei = {0,0,0};
934
	block_t blkaddr;
935
	unsigned int start_pgofs;
936

937 938 939
	if (!maxblocks)
		return 0;

J
Jaegeuk Kim 已提交
940 941 942 943 944
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
945
	end = pgofs + maxblocks;
946

947
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
948 949 950
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
951 952
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + map->m_len;
953
		goto out;
954
	}
955

C
Chao Yu 已提交
956
next_dnode:
957
	if (create)
958
		__do_map_lock(sbi, flag, true);
959 960 961

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
962
	err = get_dnode_of_data(&dn, pgofs, mode);
963
	if (err) {
C
Chao Yu 已提交
964 965
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
966
		if (err == -ENOENT) {
967
			err = 0;
968 969 970
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
971 972 973
			if (map->m_next_extent)
				*map->m_next_extent =
					get_next_page_offset(&dn, pgofs);
974
		}
975
		goto unlock_out;
976
	}
C
Chao Yu 已提交
977

978
	start_pgofs = pgofs;
979
	prealloc = 0;
980
	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
981
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
982 983

next_block:
984
	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
C
Chao Yu 已提交
985 986

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
987
		if (create) {
988 989
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
990
				goto sync_out;
991
			}
992
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
993 994 995 996
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
997
			} else {
998 999
				err = __allocate_data_block(&dn,
							map->m_seg_type);
1000
				if (!err)
1001
					set_inode_flag(inode, FI_APPEND_WRITE);
1002
			}
C
Chao Yu 已提交
1003
			if (err)
C
Chao Yu 已提交
1004
				goto sync_out;
1005
			map->m_flags |= F2FS_MAP_NEW;
C
Chao Yu 已提交
1006
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
1007
		} else {
C
Chao Yu 已提交
1008 1009 1010 1011
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
1012 1013
			if (flag == F2FS_GET_BLOCK_PRECACHE)
				goto sync_out;
1014 1015 1016 1017
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
C
Chao Yu 已提交
1018
				goto sync_out;
1019
			}
1020 1021 1022 1023
			if (flag != F2FS_GET_BLOCK_FIEMAP) {
				/* for defragment case */
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
C
Chao Yu 已提交
1024
				goto sync_out;
1025
			}
C
Chao Yu 已提交
1026 1027
		}
	}
1028

1029 1030 1031
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
1042
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1043
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
1044 1045 1046 1047 1048
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
1049

1050
skip:
1051 1052 1053
	dn.ofs_in_node++;
	pgofs++;

1054 1055 1056
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
1057

1058 1059 1060 1061
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
1062

1063 1064 1065 1066
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
1067
		}
1068 1069 1070 1071 1072 1073 1074 1075
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
	}

1086 1087 1088
	f2fs_put_dnode(&dn);

	if (create) {
1089
		__do_map_lock(sbi, flag, false);
1090
		f2fs_balance_fs(sbi, dn.node_changed);
1091
	}
1092
	goto next_dnode;
1093

1094
sync_out:
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + 1;
	}
1106
	f2fs_put_dnode(&dn);
1107
unlock_out:
1108
	if (create) {
1109
		__do_map_lock(sbi, flag, false);
1110
		f2fs_balance_fs(sbi, dn.node_changed);
1111
	}
1112
out:
J
Jaegeuk Kim 已提交
1113
	trace_f2fs_map_blocks(inode, map, err);
1114
	return err;
1115 1116
}

J
Jaegeuk Kim 已提交
1117
static int __get_data_block(struct inode *inode, sector_t iblock,
1118
			struct buffer_head *bh, int create, int flag,
1119
			pgoff_t *next_pgofs, int seg_type)
J
Jaegeuk Kim 已提交
1120 1121
{
	struct f2fs_map_blocks map;
1122
	int err;
J
Jaegeuk Kim 已提交
1123 1124 1125

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
1126
	map.m_next_pgofs = next_pgofs;
1127
	map.m_next_extent = NULL;
1128
	map.m_seg_type = seg_type;
J
Jaegeuk Kim 已提交
1129

1130 1131
	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
J
Jaegeuk Kim 已提交
1132 1133
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1134
		bh->b_size = (u64)map.m_len << inode->i_blkbits;
J
Jaegeuk Kim 已提交
1135
	}
1136
	return err;
J
Jaegeuk Kim 已提交
1137 1138
}

1139
static int get_data_block(struct inode *inode, sector_t iblock,
1140 1141
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
1142
{
1143
	return __get_data_block(inode, iblock, bh_result, create,
1144 1145
							flag, next_pgofs,
							NO_CHECK_TYPE);
C
Chao Yu 已提交
1146 1147 1148
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
1149 1150
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
1151
	return __get_data_block(inode, iblock, bh_result, create,
1152 1153 1154
						F2FS_GET_BLOCK_DEFAULT, NULL,
						rw_hint_to_seg_type(
							inode->i_write_hint));
1155 1156
}

C
Chao Yu 已提交
1157
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1158 1159
			struct buffer_head *bh_result, int create)
{
1160
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
1161
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1162 1163
		return -EFBIG;

C
Chao Yu 已提交
1164
	return __get_data_block(inode, iblock, bh_result, create,
1165 1166
						F2FS_GET_BLOCK_BMAP, NULL,
						NO_CHECK_TYPE);
1167 1168
}

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

C
Chao Yu 已提交
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
static int f2fs_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page;
	struct node_info ni;
	__u64 phys = 0, len;
	__u32 flags;
	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
	int err = 0;

	if (f2fs_has_inline_xattr(inode)) {
		int offset;

		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
						inode->i_ino, false);
		if (!page)
			return -ENOMEM;

		get_node_info(sbi, inode->i_ino, &ni);

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		offset = offsetof(struct f2fs_inode, i_addr) +
					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
					F2FS_INLINE_XATTR_ADDRS(inode));

		phys += offset;
		len = inline_xattr_size(inode);

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;

		if (!xnid)
			flags |= FIEMAP_EXTENT_LAST;

		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
		if (err || err == 1)
			return err;
	}

	if (xnid) {
		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
		if (!page)
			return -ENOMEM;

		get_node_info(sbi, xnid, &ni);

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		len = inode->i_sb->s_blocksize;

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_LAST;
	}

	if (phys)
		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);

	return (err < 0 ? err : 0);
}

J
Jaegeuk Kim 已提交
1241 1242 1243
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
1244 1245
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
1246
	pgoff_t next_pgofs;
1247 1248 1249 1250
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

1251 1252 1253 1254 1255 1256
	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
		ret = f2fs_precache_extents(inode);
		if (ret)
			return ret;
	}

C
Chao Yu 已提交
1257
	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
1258 1259 1260
	if (ret)
		return ret;

1261 1262
	inode_lock(inode);

C
Chao Yu 已提交
1263 1264 1265 1266 1267
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		ret = f2fs_xattr_fiemap(inode, fieinfo);
		goto out;
	}

J
Jaegeuk Kim 已提交
1268 1269 1270
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
1271
			goto out;
J
Jaegeuk Kim 已提交
1272 1273
	}

1274 1275 1276 1277 1278
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
1279

1280 1281 1282 1283
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
1284
	ret = get_data_block(inode, start_blk, &map_bh, 0,
1285
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1286 1287 1288 1289 1290
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
1291
		start_blk = next_pgofs;
1292 1293 1294

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
1295
			goto prep_next;
1296

1297 1298
		flags |= FIEMAP_EXTENT_LAST;
	}
1299

1300 1301 1302 1303
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

1304 1305
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
1306
	}
1307

1308 1309
	if (start_blk > last_blk || ret)
		goto out;
1310

1311 1312 1313 1314 1315 1316
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
1317

1318
	start_blk += logical_to_blk(inode, size);
1319

1320
prep_next:
1321 1322 1323 1324 1325 1326 1327 1328 1329
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
1330
	inode_unlock(inode);
1331
	return ret;
J
Jaegeuk Kim 已提交
1332 1333
}

J
Jaegeuk Kim 已提交
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1357
	map.m_next_pgofs = NULL;
1358
	map.m_next_extent = NULL;
1359
	map.m_seg_type = NO_CHECK_TYPE;
J
Jaegeuk Kim 已提交
1360

L
LiFan 已提交
1361
	for (; nr_pages; nr_pages--) {
J
Jaegeuk Kim 已提交
1362
		if (pages) {
1363
			page = list_last_entry(pages, struct page, lru);
1364 1365

			prefetchw(&page->flags);
J
Jaegeuk Kim 已提交
1366 1367
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1368 1369
						  page->index,
						  readahead_gfp_mask(mapping)))
J
Jaegeuk Kim 已提交
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1398
			if (f2fs_map_blocks(inode, &map, 0,
1399
						F2FS_GET_BLOCK_DEFAULT))
J
Jaegeuk Kim 已提交
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1412
			zero_user_segment(page, 0, PAGE_SIZE);
1413 1414
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
1415 1416 1417 1418 1419 1420 1421 1422
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
J
Jaegeuk Kim 已提交
1423 1424
		if (bio && (last_block_in_bio != block_nr - 1 ||
			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
J
Jaegeuk Kim 已提交
1425
submit_and_realloc:
1426
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1427 1428 1429
			bio = NULL;
		}
		if (bio == NULL) {
1430
			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
J
Jaegeuk Kim 已提交
1431 1432
			if (IS_ERR(bio)) {
				bio = NULL;
J
Jaegeuk Kim 已提交
1433
				goto set_error_page;
1434
			}
J
Jaegeuk Kim 已提交
1435 1436 1437 1438 1439 1440 1441 1442 1443
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1444
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1445 1446 1447 1448
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1449
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1450 1451 1452 1453 1454
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1455
			put_page(page);
J
Jaegeuk Kim 已提交
1456 1457 1458
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1459
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1460 1461 1462
	return 0;
}

1463 1464
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1465
	struct inode *inode = page->mapping->host;
1466
	int ret = -EAGAIN;
H
Huajun Li 已提交
1467

1468 1469
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1470
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1471 1472
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1473
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1474
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1475
	return ret;
1476 1477 1478 1479 1480 1481
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
1482
	struct inode *inode = mapping->host;
1483
	struct page *page = list_last_entry(pages, struct page, lru);
1484 1485

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1486 1487 1488 1489 1490

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1491
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1492 1493
}

1494 1495 1496 1497 1498
static int encrypt_one_page(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;
	gfp_t gfp_flags = GFP_NOFS;

1499
	if (!f2fs_encrypted_file(inode))
1500 1501 1502
		return 0;

	/* wait for GCed encrypted page writeback */
1503
	f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
1504 1505 1506 1507 1508 1509 1510 1511 1512

retry_encrypt:
	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
			PAGE_SIZE, 0, fio->page->index, gfp_flags);
	if (!IS_ERR(fio->encrypted_page))
		return 0;

	/* flush pending IOs and wait for a while in the ENOMEM case */
	if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1513
		f2fs_flush_merged_writes(fio->sbi);
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
		congestion_wait(BLK_RW_ASYNC, HZ/50);
		gfp_flags |= __GFP_NOFAIL;
		goto retry_encrypt;
	}
	return PTR_ERR(fio->encrypted_page);
}

static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;

1525 1526
	if (f2fs_is_pinned_file(inode))
		return true;
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
	if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
		return false;
	if (is_cold_data(fio->page))
		return false;
	if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
		return false;

	return need_inplace_update_policy(inode, fio);
}

1537 1538 1539 1540 1541 1542 1543 1544 1545
static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
{
	if (fio->old_blkaddr == NEW_ADDR)
		return false;
	if (fio->old_blkaddr == NULL_ADDR)
		return false;
	return true;
}

1546
int do_write_data_page(struct f2fs_io_info *fio)
1547
{
1548
	struct page *page = fio->page;
1549 1550
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
1551 1552
	struct extent_info ei = {0,0,0};
	bool ipu_force = false;
1553 1554 1555
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1556 1557 1558
	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1559 1560

		if (valid_ipu_blkaddr(fio)) {
1561
			ipu_force = true;
1562
			fio->need_lock = LOCK_DONE;
1563 1564 1565
			goto got_it;
		}
	}
1566

1567 1568 1569
	/* Deadlock due to between page->lock and f2fs_lock_op */
	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
		return -EAGAIN;
1570

1571
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1572
	if (err)
1573
		goto out;
1574

1575
	fio->old_blkaddr = dn.data_blkaddr;
1576 1577

	/* This page is already truncated */
1578
	if (fio->old_blkaddr == NULL_ADDR) {
1579
		ClearPageUptodate(page);
1580
		goto out_writepage;
1581
	}
1582
got_it:
1583 1584 1585 1586
	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1587
	if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
1588 1589 1590 1591 1592
		err = encrypt_one_page(fio);
		if (err)
			goto out_writepage;

		set_page_writeback(page);
1593
		f2fs_put_dnode(&dn);
1594
		if (fio->need_lock == LOCK_REQ)
1595
			f2fs_unlock_op(fio->sbi);
1596
		err = rewrite_data_page(fio);
1597
		trace_f2fs_do_write_data_page(fio->page, IPU);
1598
		set_inode_flag(inode, FI_UPDATE_WRITE);
1599
		return err;
1600
	}
1601

1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
	if (fio->need_lock == LOCK_RETRY) {
		if (!f2fs_trylock_op(fio->sbi)) {
			err = -EAGAIN;
			goto out_writepage;
		}
		fio->need_lock = LOCK_REQ;
	}

	err = encrypt_one_page(fio);
	if (err)
		goto out_writepage;

	set_page_writeback(page);

1616 1617 1618 1619 1620 1621
	/* LFS mode write path */
	write_data_page(&dn, fio);
	trace_f2fs_do_write_data_page(page, OPU);
	set_inode_flag(inode, FI_APPEND_WRITE);
	if (page->index == 0)
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1622 1623
out_writepage:
	f2fs_put_dnode(&dn);
1624
out:
1625
	if (fio->need_lock == LOCK_REQ)
1626
		f2fs_unlock_op(fio->sbi);
1627 1628 1629
	return err;
}

1630
static int __write_data_page(struct page *page, bool *submitted,
C
Chao Yu 已提交
1631 1632
				struct writeback_control *wbc,
				enum iostat_type io_type)
1633 1634
{
	struct inode *inode = page->mapping->host;
1635
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1636 1637
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1638
							>> PAGE_SHIFT;
1639
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
H
Huajun Li 已提交
1640
	unsigned offset = 0;
1641
	bool need_balance_fs = false;
1642
	int err = 0;
J
Jaegeuk Kim 已提交
1643
	struct f2fs_io_info fio = {
1644
		.sbi = sbi,
C
Chao Yu 已提交
1645
		.ino = inode->i_ino,
J
Jaegeuk Kim 已提交
1646
		.type = DATA,
M
Mike Christie 已提交
1647
		.op = REQ_OP_WRITE,
J
Jens Axboe 已提交
1648
		.op_flags = wbc_to_write_flags(wbc),
1649
		.old_blkaddr = NULL_ADDR,
1650
		.page = page,
1651
		.encrypted_page = NULL,
1652
		.submitted = false,
1653
		.need_lock = LOCK_RETRY,
C
Chao Yu 已提交
1654
		.io_type = io_type,
1655
		.io_wbc = wbc,
J
Jaegeuk Kim 已提交
1656
	};
1657

1658 1659
	trace_f2fs_writepage(page, DATA);

1660 1661 1662
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;

1663
	if (page->index < end_index)
1664
		goto write;
1665 1666 1667 1668 1669

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1670
	offset = i_size & (PAGE_SIZE - 1);
1671
	if ((page->index >= end_index + 1) || !offset)
1672
		goto out;
1673

1674
	zero_user_segment(page, offset, PAGE_SIZE);
1675
write:
1676 1677
	if (f2fs_is_drop_cache(inode))
		goto out;
1678 1679 1680 1681
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1682
		goto redirty_out;
1683

1684 1685
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
J
Jaegeuk Kim 已提交
1686
		mapping_set_error(page->mapping, -EIO);
1687
		goto out;
1688 1689
	}

1690
	/* Dentry blocks are controlled by checkpoint */
1691
	if (S_ISDIR(inode->i_mode)) {
1692
		fio.need_lock = LOCK_DONE;
1693
		err = do_write_data_page(&fio);
1694 1695
		goto done;
	}
H
Huajun Li 已提交
1696

1697
	if (!wbc->for_reclaim)
1698
		need_balance_fs = true;
1699
	else if (has_not_enough_free_secs(sbi, 0, 0))
1700
		goto redirty_out;
1701 1702
	else
		set_inode_flag(inode, FI_HOT_DATA);
1703

1704
	err = -EAGAIN;
1705
	if (f2fs_has_inline_data(inode)) {
1706
		err = f2fs_write_inline_data(inode, page);
1707 1708 1709
		if (!err)
			goto out;
	}
1710

1711
	if (err == -EAGAIN) {
1712
		err = do_write_data_page(&fio);
1713 1714 1715 1716 1717
		if (err == -EAGAIN) {
			fio.need_lock = LOCK_REQ;
			err = do_write_data_page(&fio);
		}
	}
1718 1719

	down_write(&F2FS_I(inode)->i_sem);
1720 1721
	if (F2FS_I(inode)->last_disk_size < psize)
		F2FS_I(inode)->last_disk_size = psize;
1722
	up_write(&F2FS_I(inode)->i_sem);
1723

1724 1725 1726
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1727

1728
out:
1729
	inode_dec_dirty_pages(inode);
1730 1731
	if (err)
		ClearPageUptodate(page);
1732 1733

	if (wbc->for_reclaim) {
1734
		f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
1735
		clear_inode_flag(inode, FI_HOT_DATA);
1736
		remove_dirty_inode(inode);
1737
		submitted = NULL;
1738 1739
	}

1740
	unlock_page(page);
J
Jaegeuk Kim 已提交
1741 1742
	if (!S_ISDIR(inode->i_mode))
		f2fs_balance_fs(sbi, need_balance_fs);
1743

1744
	if (unlikely(f2fs_cp_error(sbi))) {
1745
		f2fs_submit_merged_write(sbi, DATA);
1746 1747 1748 1749 1750
		submitted = NULL;
	}

	if (submitted)
		*submitted = fio.submitted;
1751

1752 1753 1754
	return 0;

redirty_out:
1755
	redirty_page_for_writepage(wbc, page);
1756 1757
	if (!err)
		return AOP_WRITEPAGE_ACTIVATE;
J
Jaegeuk Kim 已提交
1758 1759
	unlock_page(page);
	return err;
1760 1761
}

1762 1763 1764
static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
C
Chao Yu 已提交
1765
	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
1766 1767
}

C
Chao Yu 已提交
1768 1769 1770 1771 1772 1773
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
C
Chao Yu 已提交
1774 1775
					struct writeback_control *wbc,
					enum iostat_type io_type)
C
Chao Yu 已提交
1776 1777 1778 1779 1780 1781 1782 1783 1784
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
1785
	pgoff_t last_idx = ULONG_MAX;
C
Chao Yu 已提交
1786 1787 1788 1789
	int cycled;
	int range_whole = 0;
	int tag;

1790
	pagevec_init(&pvec);
1791

1792 1793 1794 1795 1796 1797
	if (get_dirty_pages(mapping->host) <=
				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
		set_inode_flag(mapping->host, FI_HOT_DATA);
	else
		clear_inode_flag(mapping->host, FI_HOT_DATA);

C
Chao Yu 已提交
1798 1799 1800 1801 1802 1803 1804 1805 1806
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1807 1808
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

J
Jan Kara 已提交
1824
		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
1825
				tag);
C
Chao Yu 已提交
1826 1827 1828 1829 1830
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
1831
			bool submitted = false;
C
Chao Yu 已提交
1832 1833

			done_index = page->index;
1834
retry_write:
C
Chao Yu 已提交
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1850 1851
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1852 1853 1854 1855 1856 1857 1858 1859
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

C
Chao Yu 已提交
1860
			ret = __write_data_page(page, &submitted, wbc, io_type);
C
Chao Yu 已提交
1861
			if (unlikely(ret)) {
1862 1863 1864 1865 1866 1867 1868 1869
				/*
				 * keep nr_to_write, since vfs uses this to
				 * get # of written pages.
				 */
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
					continue;
1870 1871 1872 1873 1874 1875 1876 1877 1878
				} else if (ret == -EAGAIN) {
					ret = 0;
					if (wbc->sync_mode == WB_SYNC_ALL) {
						cond_resched();
						congestion_wait(BLK_RW_ASYNC,
									HZ/50);
						goto retry_write;
					}
					continue;
1879
				}
J
Jaegeuk Kim 已提交
1880 1881 1882
				done_index = page->index + 1;
				done = 1;
				break;
1883
			} else if (submitted) {
1884
				last_idx = page->index;
C
Chao Yu 已提交
1885 1886
			}

1887 1888 1889 1890
			/* give a priority to WB_SYNC threads */
			if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
					--wbc->nr_to_write <= 0) &&
					wbc->sync_mode == WB_SYNC_NONE) {
C
Chao Yu 已提交
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

1908
	if (last_idx != ULONG_MAX)
1909 1910
		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
						0, last_idx, DATA);
C
Chao Yu 已提交
1911

C
Chao Yu 已提交
1912 1913 1914
	return ret;
}

C
Chao Yu 已提交
1915 1916 1917
int __f2fs_write_data_pages(struct address_space *mapping,
						struct writeback_control *wbc,
						enum iostat_type io_type)
1918 1919
{
	struct inode *inode = mapping->host;
1920
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1921
	struct blk_plug plug;
1922 1923
	int ret;

P
P J P 已提交
1924 1925 1926 1927
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1928 1929 1930 1931
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1932 1933 1934 1935
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

1936 1937 1938 1939 1940
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1941
	/* skip writing during file defragment */
1942
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
C
Chao Yu 已提交
1943 1944
		goto skip_write;

Y
Yunlei He 已提交
1945 1946
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1947 1948 1949 1950 1951 1952
	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_inc(&sbi->wb_sync_req);
	else if (atomic_read(&sbi->wb_sync_req))
		goto skip_write;

1953
	blk_start_plug(&plug);
C
Chao Yu 已提交
1954
	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
1955
	blk_finish_plug(&plug);
1956 1957 1958

	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_dec(&sbi->wb_sync_req);
1959 1960 1961 1962
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
J
Jaegeuk Kim 已提交
1963

1964
	remove_dirty_inode(inode);
1965
	return ret;
1966 1967

skip_write:
1968
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1969
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1970
	return 0;
1971 1972
}

C
Chao Yu 已提交
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
static int f2fs_write_data_pages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;

	return __f2fs_write_data_pages(mapping, wbc,
			F2FS_I(inode)->cp_task == current ?
			FS_CP_DATA_IO : FS_DATA_IO);
}

1983 1984 1985
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1986
	loff_t i_size = i_size_read(inode);
1987

J
Jaegeuk Kim 已提交
1988
	if (to > i_size) {
1989
		down_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
1990 1991
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1992
		up_write(&F2FS_I(inode)->i_mmap_sem);
1993 1994 1995
	}
}

1996 1997 1998 1999 2000 2001 2002 2003
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
2004
	bool locked = false;
2005
	struct extent_info ei = {0,0,0};
2006 2007
	int err = 0;

2008 2009 2010 2011
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
2012 2013
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
			!is_inode_flag_set(inode, FI_NO_PREALLOC))
2014 2015
		return 0;

2016
	if (f2fs_has_inline_data(inode) ||
2017
			(pos & PAGE_MASK) >= i_size_read(inode)) {
2018
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
2019 2020 2021
		locked = true;
	}
restart:
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
C
Chao Yu 已提交
2032
		if (pos + len <= MAX_INLINE_DATA(inode)) {
2033
			read_inline_data(page, ipage);
2034
			set_inode_flag(inode, FI_DATA_EXIST);
2035 2036
			if (inode->i_nlink)
				set_inline_node(ipage);
2037 2038 2039
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
2052
			if (err || dn.data_blkaddr == NULL_ADDR) {
2053
				f2fs_put_dnode(&dn);
2054 2055
				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
								true);
2056 2057 2058
				locked = true;
				goto restart;
			}
2059 2060
		}
	}
2061

2062 2063 2064
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
2065
out:
2066 2067
	f2fs_put_dnode(&dn);
unlock_out:
2068
	if (locked)
2069
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
2070 2071 2072
	return err;
}

2073 2074 2075 2076 2077
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
2078
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2079
	struct page *page = NULL;
2080
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2081 2082
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
2083 2084
	int err = 0;

2085 2086
	trace_f2fs_write_begin(inode, pos, len, flags);

J
Jaegeuk Kim 已提交
2087 2088 2089 2090 2091 2092
	if (f2fs_is_atomic_file(inode) &&
			!available_free_memory(sbi, INMEM_PAGES)) {
		err = -ENOMEM;
		goto fail;
	}

2093 2094 2095 2096 2097 2098 2099 2100 2101 2102
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
2103
repeat:
2104 2105 2106 2107
	/*
	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
	 * wait_for_stable_page. Will wait that below with our IO control.
	 */
C
Chao Yu 已提交
2108
	page = f2fs_pagecache_get_page(mapping, index,
2109
				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
2110 2111 2112 2113
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
2114

2115 2116
	*pagep = page;

2117 2118
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
2119
	if (err)
2120
		goto fail;
2121

2122
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
2123
		unlock_page(page);
J
Jaegeuk Kim 已提交
2124
		f2fs_balance_fs(sbi, true);
2125 2126 2127 2128 2129 2130 2131 2132
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

2133
	f2fs_wait_on_page_writeback(page, DATA, false);
2134

2135
	/* wait for GCed encrypted page writeback */
2136
	if (f2fs_encrypted_file(inode))
2137
		f2fs_wait_on_block_writeback(sbi, blkaddr);
2138

2139 2140
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
2141

2142 2143 2144 2145 2146
	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
		zero_user_segment(page, len, PAGE_SIZE);
		return 0;
	}

2147
	if (blkaddr == NEW_ADDR) {
2148
		zero_user_segment(page, 0, PAGE_SIZE);
2149
		SetPageUptodate(page);
2150
	} else {
2151 2152
		err = f2fs_submit_page_read(inode, page, blkaddr);
		if (err)
2153
			goto fail;
2154

2155
		lock_page(page);
2156
		if (unlikely(page->mapping != mapping)) {
2157 2158
			f2fs_put_page(page, 1);
			goto repeat;
2159
		}
2160 2161 2162
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
2163
		}
2164 2165
	}
	return 0;
2166

2167
fail:
2168
	f2fs_put_page(page, 1);
2169
	f2fs_write_failed(mapping, pos + len);
J
Jaegeuk Kim 已提交
2170 2171
	if (f2fs_is_atomic_file(inode))
		drop_inmem_pages_all(sbi);
2172
	return err;
2173 2174
}

2175 2176 2177 2178 2179 2180 2181
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

2182 2183
	trace_f2fs_write_end(inode, pos, len, copied);

2184 2185 2186 2187 2188 2189
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
2190
		if (unlikely(copied != len))
2191 2192 2193 2194 2195 2196 2197
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

2198
	set_page_dirty(page);
2199

2200 2201
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
2202
unlock_out:
2203
	f2fs_put_page(page, 1);
2204
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2205 2206 2207
	return copied;
}

2208 2209
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
2210 2211 2212 2213 2214 2215
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
2216 2217 2218
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

2219 2220 2221
	return 0;
}

2222
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2223
{
2224
	struct address_space *mapping = iocb->ki_filp->f_mapping;
2225 2226
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
2227
	loff_t offset = iocb->ki_pos;
2228
	int rw = iov_iter_rw(iter);
2229
	int err;
2230

2231
	err = check_direct_IO(inode, iter, offset);
2232 2233
	if (err)
		return err;
H
Huajun Li 已提交
2234

J
Jaegeuk Kim 已提交
2235
	if (__force_buffered_io(inode, rw))
2236
		return 0;
2237

2238
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2239

2240
	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
2241
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
2242 2243 2244
	up_read(&F2FS_I(inode)->dio_rwsem[rw]);

	if (rw == WRITE) {
C
Chao Yu 已提交
2245 2246 2247
		if (err > 0) {
			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
									err);
2248
			set_inode_flag(inode, FI_UPDATE_WRITE);
C
Chao Yu 已提交
2249
		} else if (err < 0) {
2250
			f2fs_write_failed(mapping, offset + count);
C
Chao Yu 已提交
2251
		}
2252
	}
2253

2254
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2255

2256
	return err;
2257 2258
}

2259 2260
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
2261 2262
{
	struct inode *inode = page->mapping->host;
2263
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2264

2265
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2266
		(offset % PAGE_SIZE || length != PAGE_SIZE))
2267 2268
		return;

2269
	if (PageDirty(page)) {
2270
		if (inode->i_ino == F2FS_META_INO(sbi)) {
2271
			dec_page_count(sbi, F2FS_DIRTY_META);
2272
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2273
			dec_page_count(sbi, F2FS_DIRTY_NODES);
2274
		} else {
2275
			inode_dec_dirty_pages(inode);
2276 2277
			remove_dirty_inode(inode);
		}
2278
	}
C
Chao Yu 已提交
2279 2280 2281

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
2282
		return drop_inmem_page(inode, page);
C
Chao Yu 已提交
2283

2284
	set_page_private(page, 0);
2285 2286 2287
	ClearPagePrivate(page);
}

2288
int f2fs_release_page(struct page *page, gfp_t wait)
2289
{
2290 2291 2292 2293
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
2294 2295 2296 2297
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

2298
	set_page_private(page, 0);
2299
	ClearPagePrivate(page);
2300
	return 1;
2301 2302
}

2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
/*
 * This was copied from __set_page_dirty_buffers which gives higher performance
 * in very high speed storages. (e.g., pmem)
 */
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
	struct address_space *mapping = page->mapping;
	unsigned long flags;

	if (unlikely(!mapping))
		return;

	spin_lock(&mapping->private_lock);
	lock_page_memcg(page);
	SetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	spin_lock_irqsave(&mapping->tree_lock, flags);
	WARN_ON_ONCE(!PageUptodate(page));
	account_page_dirtied(page, mapping);
	radix_tree_tag_set(&mapping->page_tree,
			page_index(page), PAGECACHE_TAG_DIRTY);
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
	unlock_page_memcg(page);

	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	return;
}

2332 2333 2334 2335 2336
static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

2337 2338
	trace_f2fs_set_page_dirty(page, DATA);

2339 2340
	if (!PageUptodate(page))
		SetPageUptodate(page);
2341

C
Chao Yu 已提交
2342
	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
C
Chao Yu 已提交
2343 2344 2345 2346 2347 2348 2349 2350 2351
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
2352 2353
	}

2354
	if (!PageDirty(page)) {
2355
		f2fs_set_page_dirty_nobuffers(page);
2356
		update_dirty_page(inode, page);
2357 2358 2359 2360 2361
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
2362 2363
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
2364 2365
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
2366 2367 2368 2369 2370 2371 2372
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
2373
	return generic_block_bmap(mapping, block, get_data_block_bmap);
2374 2375
}

2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
2389 2390 2391 2392 2393 2394
	if (atomic_written) {
		if (mode != MIGRATE_SYNC)
			return -EBUSY;
		if (!mutex_trylock(&fi->inmem_lock))
			return -EAGAIN;
	}
2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

2426 2427 2428 2429
	if (mode != MIGRATE_SYNC_NO_COPY)
		migrate_page_copy(newpage, page);
	else
		migrate_page_states(newpage, page);
2430 2431 2432 2433 2434

	return MIGRATEPAGE_SUCCESS;
}
#endif

2435 2436 2437 2438 2439 2440
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
2441
	.write_end	= f2fs_write_end,
2442
	.set_page_dirty	= f2fs_set_data_page_dirty,
2443 2444
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2445
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
2446
	.bmap		= f2fs_bmap,
2447 2448 2449
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
2450
};