data.c 45.8 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
22 23
#include <linux/mm.h>
#include <linux/memcontrol.h>
J
Jaegeuk Kim 已提交
24
#include <linux/cleancache.h>
25 26 27 28

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
29
#include "trace.h"
30
#include <trace/events/f2fs.h>
31

32
static void f2fs_read_end_io(struct bio *bio)
33
{
34 35
	struct bio_vec *bvec;
	int i;
36

C
Chao Yu 已提交
37
#ifdef CONFIG_F2FS_FAULT_INJECTION
38
	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
C
Chao Yu 已提交
39 40 41
		bio->bi_error = -EIO;
#endif

42
	if (f2fs_bio_encrypted(bio)) {
43
		if (bio->bi_error) {
44
			fscrypt_release_ctx(bio->bi_private);
45
		} else {
46
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
47 48 49 50
			return;
		}
	}

51 52
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
53

54
		if (!bio->bi_error) {
55 56
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
57 58 59 60 61 62 63 64 65
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

66
static void f2fs_write_end_io(struct bio *bio)
67
{
68
	struct f2fs_sb_info *sbi = bio->bi_private;
69 70
	struct bio_vec *bvec;
	int i;
71

72
	bio_for_each_segment_all(bvec, bio, i) {
73 74
		struct page *page = bvec->bv_page;

75
		fscrypt_pullback_bio_page(&page, true);
76

77
		if (unlikely(bio->bi_error)) {
78
			mapping_set_error(page->mapping, -EIO);
79
			f2fs_stop_checkpoint(sbi, true);
80 81
		}
		end_page_writeback(page);
82
	}
83 84
	if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
				wq_has_sleeper(&sbi->cp_wait))
85 86 87 88 89
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

90 91 92 93 94 95 96 97
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
98
	bio = f2fs_bio_alloc(npages);
99 100

	bio->bi_bdev = sbi->sb->s_bdev;
C
Chao Yu 已提交
101
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
102
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
103
	bio->bi_private = is_read ? NULL : sbi;
104 105 106 107

	return bio;
}

108 109
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
110
{
111
	if (!is_read_io(bio_op(bio))) {
112
		atomic_inc(&sbi->nr_wb_bios);
113 114
		if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
			current->plug && (type == DATA || type == NODE))
J
Jaegeuk Kim 已提交
115 116
			blk_finish_plug(current->plug);
	}
117
	submit_bio(bio);
118 119
}

J
Jaegeuk Kim 已提交
120
static void __submit_merged_bio(struct f2fs_bio_info *io)
121
{
J
Jaegeuk Kim 已提交
122
	struct f2fs_io_info *fio = &io->fio;
123 124 125 126

	if (!io->bio)
		return;

M
Mike Christie 已提交
127
	if (is_read_io(fio->op))
128
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
129
	else
130
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
131

M
Mike Christie 已提交
132 133
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

134
	__submit_bio(io->sbi, io->bio, fio->type);
135 136 137
	io->bio = NULL;
}

138 139
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
C
Chao Yu 已提交
140 141 142 143 144
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

145
	if (!io->bio)
C
Chao Yu 已提交
146
		return false;
147 148 149

	if (!inode && !page && !ino)
		return true;
C
Chao Yu 已提交
150 151 152

	bio_for_each_segment_all(bvec, io->bio, i) {

153
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
154
			target = bvec->bv_page;
155 156
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
157

158 159 160 161 162
		if (inode && inode == target->mapping->host)
			return true;
		if (page && page == target)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
163 164 165 166 167 168
			return true;
	}

	return false;
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	bool ret;

	down_read(&io->io_rwsem);
	ret = __has_merged_page(io, inode, page, ino);
	up_read(&io->io_rwsem);
	return ret;
}

static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
186 187 188 189 190 191
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

192
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
193

194 195 196
	if (!__has_merged_page(io, inode, page, ino))
		goto out;

J
Jaegeuk Kim 已提交
197 198 199
	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
M
Mike Christie 已提交
200
		io->fio.op = REQ_OP_WRITE;
J
Jaegeuk Kim 已提交
201
		if (test_opt(sbi, NOBARRIER))
M
Mike Christie 已提交
202
			io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
203
		else
M
Mike Christie 已提交
204 205
			io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
								REQ_PRIO;
J
Jaegeuk Kim 已提交
206 207
	}
	__submit_merged_bio(io);
208
out:
209
	up_write(&io->io_rwsem);
210 211
}

212 213 214 215 216 217 218 219 220 221 222 223 224 225
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
									int rw)
{
	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}

void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
{
	if (has_merged_page(sbi, inode, page, ino, type))
		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}

226 227 228 229 230 231 232
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, WRITE);
	f2fs_submit_merged_bio(sbi, META, WRITE);
}

233 234 235 236
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
237
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
238 239
{
	struct bio *bio;
240 241
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
242

243
	trace_f2fs_submit_page_bio(page, fio);
244
	f2fs_trace_ios(fio, 0);
245 246

	/* Allocate a new bio */
M
Mike Christie 已提交
247
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
248

249
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
250 251 252
		bio_put(bio);
		return -EFAULT;
	}
M
Mike Christie 已提交
253
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
254

255
	__submit_bio(fio->sbi, bio, fio->type);
256 257 258
	return 0;
}

259
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
260
{
261
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
262
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
263
	struct f2fs_bio_info *io;
M
Mike Christie 已提交
264
	bool is_read = is_read_io(fio->op);
265
	struct page *bio_page;
266

267
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
268

269 270 271
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
272

273
	down_write(&io->io_rwsem);
274

275
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
M
Mike Christie 已提交
276
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
J
Jaegeuk Kim 已提交
277
		__submit_merged_bio(io);
278 279
alloc_new:
	if (io->bio == NULL) {
280
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
281
						BIO_MAX_PAGES, is_read);
J
Jaegeuk Kim 已提交
282
		io->fio = *fio;
283 284
	}

285 286
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

287 288
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
							PAGE_SIZE) {
J
Jaegeuk Kim 已提交
289
		__submit_merged_bio(io);
290 291 292
		goto alloc_new;
	}

293
	io->last_block_in_bio = fio->new_blkaddr;
294
	f2fs_trace_ios(fio, 0);
295

296
	up_write(&io->io_rwsem);
297
	trace_f2fs_submit_page_mbio(fio->page, fio);
298 299
}

300 301 302 303 304 305 306 307 308 309
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}

J
Jaegeuk Kim 已提交
310
/*
311 312 313 314 315
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
316
void set_data_blkaddr(struct dnode_of_data *dn)
317
{
318 319 320
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
321
		dn->node_changed = true;
322 323
}

324 325 326 327 328 329 330
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

331 332
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
333
{
334
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
335

336 337 338
	if (!count)
		return 0;

339
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
340
		return -EPERM;
341
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
342 343
		return -ENOSPC;

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr =
			datablock_addr(dn->node_page, dn->ofs_in_node);
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
361 362 363
	return 0;
}

364 365 366 367 368 369 370 371 372 373 374
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

375 376 377 378 379 380 381 382
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
383

384 385
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
386
	if (err || need_put)
387 388 389 390
		f2fs_put_dnode(dn);
	return err;
}

391
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
392
{
393
	struct extent_info ei;
394
	struct inode *inode = dn->inode;
395

396 397 398
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
399
	}
400

401
	return f2fs_reserve_block(dn, index);
402 403
}

404
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
M
Mike Christie 已提交
405
						int op_flags, bool for_write)
406 407 408 409
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
410
	struct extent_info ei;
411
	int err;
412
	struct f2fs_io_info fio = {
413
		.sbi = F2FS_I_SB(inode),
414
		.type = DATA,
M
Mike Christie 已提交
415 416
		.op = REQ_OP_READ,
		.op_flags = op_flags,
417
		.encrypted_page = NULL,
418
	};
419

420 421 422
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

423
	page = f2fs_grab_cache_page(mapping, index, for_write);
424 425 426
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
427 428 429 430 431
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

432
	set_new_dnode(&dn, inode, NULL, NULL, 0);
433
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
434 435
	if (err)
		goto put_err;
436 437
	f2fs_put_dnode(&dn);

438
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
439 440
		err = -ENOENT;
		goto put_err;
441
	}
C
Chao Yu 已提交
442
got_it:
443 444
	if (PageUptodate(page)) {
		unlock_page(page);
445
		return page;
446
	}
447

J
Jaegeuk Kim 已提交
448 449 450 451 452 453 454
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
455
		zero_user_segment(page, 0, PAGE_SIZE);
456 457
		if (!PageUptodate(page))
			SetPageUptodate(page);
458
		unlock_page(page);
J
Jaegeuk Kim 已提交
459 460
		return page;
	}
461

462
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
463 464
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
465
	if (err)
466
		goto put_err;
467
	return page;
468 469 470 471

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
472 473 474 475 476 477 478 479 480 481 482 483
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

484
	page = get_read_data_page(inode, index, READ_SYNC, false);
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
504 505
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
506 507 508 509
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
510
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
511 512
	if (IS_ERR(page))
		return page;
513

514
	/* wait for read completion */
515
	lock_page(page);
516
	if (unlikely(page->mapping != mapping)) {
517 518
		f2fs_put_page(page, 1);
		goto repeat;
519
	}
520 521 522 523
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
524 525 526
	return page;
}

J
Jaegeuk Kim 已提交
527
/*
528 529
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
530
 *
C
Chao Yu 已提交
531 532
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
533 534
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
535
 */
536
struct page *get_new_data_page(struct inode *inode,
537
		struct page *ipage, pgoff_t index, bool new_i_size)
538 539 540 541 542
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
543

544
	page = f2fs_grab_cache_page(mapping, index, true);
545 546 547 548 549 550
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
551
		return ERR_PTR(-ENOMEM);
552
	}
553

554
	set_new_dnode(&dn, inode, ipage, NULL, 0);
555
	err = f2fs_reserve_block(&dn, index);
556 557
	if (err) {
		f2fs_put_page(page, 1);
558
		return ERR_PTR(err);
559
	}
560 561
	if (!ipage)
		f2fs_put_dnode(&dn);
562 563

	if (PageUptodate(page))
564
		goto got_it;
565 566

	if (dn.data_blkaddr == NEW_ADDR) {
567
		zero_user_segment(page, 0, PAGE_SIZE);
568 569
		if (!PageUptodate(page))
			SetPageUptodate(page);
570
	} else {
571
		f2fs_put_page(page, 1);
572

573 574 575
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
576
		if (IS_ERR(page))
577
			return page;
578
	}
579
got_it:
C
Chao Yu 已提交
580
	if (new_i_size && i_size_read(inode) <
581
				((loff_t)(index + 1) << PAGE_SHIFT))
582
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
583 584 585
	return page;
}

586 587
static int __allocate_data_block(struct dnode_of_data *dn)
{
588
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
589 590
	struct f2fs_summary sum;
	struct node_info ni;
591
	int seg = CURSEG_WARM_DATA;
592
	pgoff_t fofs;
593
	blkcnt_t count = 1;
594

595
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
596
		return -EPERM;
597 598 599 600 601

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

602
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
603 604
		return -ENOSPC;

605
alloc:
606 607 608
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

609 610 611
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

612 613
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
614
	set_data_blkaddr(dn);
615

616
	/* update i_size */
617
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
618
							dn->ofs_in_node;
619
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
620
		f2fs_i_size_write(dn->inode,
621
				((loff_t)(fofs + 1) << PAGE_SHIFT));
622 623 624
	return 0;
}

625
ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
626
{
627
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
628
	struct f2fs_map_blocks map;
629
	ssize_t ret = 0;
630

631
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
632 633 634 635 636 637
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

638
	map.m_next_pgofs = NULL;
639

640 641 642 643 644 645 646
	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
647 648 649 650
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
651 652
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
653
	return ret;
654 655
}

J
Jaegeuk Kim 已提交
656
/*
J
Jaegeuk Kim 已提交
657 658
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
659 660 661 662 663
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
664
 */
C
Chao Yu 已提交
665
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
666
						int create, int flag)
667
{
J
Jaegeuk Kim 已提交
668
	unsigned int maxblocks = map->m_len;
669
	struct dnode_of_data dn;
670
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
671
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
672
	pgoff_t pgofs, end_offset, end;
673
	int err = 0, ofs = 1;
674 675
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
676
	struct extent_info ei;
677
	block_t blkaddr;
678

679 680 681
	if (!maxblocks)
		return 0;

J
Jaegeuk Kim 已提交
682 683 684 685 686
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
687
	end = pgofs + maxblocks;
688

689
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
690 691 692
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
693
		goto out;
694
	}
695

C
Chao Yu 已提交
696
next_dnode:
697
	if (create)
698
		f2fs_lock_op(sbi);
699 700 701

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
702
	err = get_dnode_of_data(&dn, pgofs, mode);
703
	if (err) {
C
Chao Yu 已提交
704 705
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
706
		if (err == -ENOENT) {
707
			err = 0;
708 709 710 711
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
712
		goto unlock_out;
713
	}
C
Chao Yu 已提交
714

715 716
	prealloc = 0;
	ofs_in_node = dn.ofs_in_node;
717
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
718 719 720 721 722

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
723
		if (create) {
724 725
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
726
				goto sync_out;
727
			}
728
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
729 730 731 732
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
733 734
			} else {
				err = __allocate_data_block(&dn);
735
				if (!err)
736
					set_inode_flag(inode, FI_APPEND_WRITE);
737
			}
C
Chao Yu 已提交
738
			if (err)
C
Chao Yu 已提交
739
				goto sync_out;
C
Chao Yu 已提交
740
			map->m_flags = F2FS_MAP_NEW;
C
Chao Yu 已提交
741
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
742
		} else {
C
Chao Yu 已提交
743 744 745 746
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
747 748 749 750 751
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
752
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
753
						blkaddr != NEW_ADDR)
C
Chao Yu 已提交
754
				goto sync_out;
C
Chao Yu 已提交
755 756
		}
	}
757

758 759 760
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
761 762 763 764 765 766 767 768 769 770
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
771
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
772
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
773 774 775 776 777
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
778

779
skip:
780 781 782
	dn.ofs_in_node++;
	pgofs++;

783 784 785
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
786

787 788 789 790
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
791

792 793 794 795
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
796
		}
797 798 799 800 801 802 803 804 805 806 807 808
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

	f2fs_put_dnode(&dn);

	if (create) {
		f2fs_unlock_op(sbi);
809
		f2fs_balance_fs(sbi, dn.node_changed);
810
	}
811
	goto next_dnode;
812

813
sync_out:
814
	f2fs_put_dnode(&dn);
815
unlock_out:
816
	if (create) {
817
		f2fs_unlock_op(sbi);
818
		f2fs_balance_fs(sbi, dn.node_changed);
819
	}
820
out:
J
Jaegeuk Kim 已提交
821
	trace_f2fs_map_blocks(inode, map, err);
822
	return err;
823 824
}

J
Jaegeuk Kim 已提交
825
static int __get_data_block(struct inode *inode, sector_t iblock,
826 827
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
828 829 830 831 832 833
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
834
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
835

C
Chao Yu 已提交
836
	ret = f2fs_map_blocks(inode, &map, create, flag);
J
Jaegeuk Kim 已提交
837 838 839 840 841 842 843 844
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

845
static int get_data_block(struct inode *inode, sector_t iblock,
846 847
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
848
{
849 850
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
851 852 853
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
854 855
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
856
	return __get_data_block(inode, iblock, bh_result, create,
857
						F2FS_GET_BLOCK_DIO, NULL);
858 859
}

C
Chao Yu 已提交
860
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
861 862
			struct buffer_head *bh_result, int create)
{
863
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
864
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
865 866
		return -EFBIG;

C
Chao Yu 已提交
867
	return __get_data_block(inode, iblock, bh_result, create,
868
						F2FS_GET_BLOCK_BMAP, NULL);
869 870
}

871 872 873 874 875 876 877 878 879 880
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
881 882 883
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
884 885
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
886
	pgoff_t next_pgofs;
887 888 889 890 891 892 893 894
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
895 896 897 898 899 900
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
901
	inode_lock(inode);
902

903 904 905 906 907
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
908

909 910 911 912
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
913
	ret = get_data_block(inode, start_blk, &map_bh, 0,
914
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
915 916 917 918 919
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
920
		start_blk = next_pgofs;
921 922 923

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
924
			goto prep_next;
925

926 927
		flags |= FIEMAP_EXTENT_LAST;
	}
928

929 930 931 932
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

933 934
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
935
	}
936

937 938
	if (start_blk > last_blk || ret)
		goto out;
939

940 941 942 943 944 945
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
946

947
	start_blk += logical_to_blk(inode, size);
948

949
prep_next:
950 951 952 953 954 955 956 957 958
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
959
	inode_unlock(inode);
960
	return ret;
J
Jaegeuk Kim 已提交
961 962
}

963 964
static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
				 unsigned nr_pages)
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct fscrypt_ctx *ctx = NULL;
	struct block_device *bdev = sbi->sb->s_bdev;
	struct bio *bio;

	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
		if (IS_ERR(ctx))
			return ERR_CAST(ctx);

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
	}

	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
	if (!bio) {
		if (ctx)
			fscrypt_release_ctx(ctx);
		return ERR_PTR(-ENOMEM);
	}
	bio->bi_bdev = bdev;
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
	bio->bi_end_io = f2fs_read_end_io;
	bio->bi_private = ctx;

	return bio;
}

J
Jaegeuk Kim 已提交
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1018
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
1019 1020 1021 1022 1023 1024 1025 1026

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1027 1028
						  page->index,
						  readahead_gfp_mask(mapping)))
J
Jaegeuk Kim 已提交
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1057
			if (f2fs_map_blocks(inode, &map, 0,
1058
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1071
			zero_user_segment(page, 0, PAGE_SIZE);
1072 1073
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
1084
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1085 1086 1087
			bio = NULL;
		}
		if (bio == NULL) {
1088
			bio = f2fs_grab_bio(inode, block_nr, nr_pages);
J
Jaegeuk Kim 已提交
1089 1090
			if (IS_ERR(bio)) {
				bio = NULL;
J
Jaegeuk Kim 已提交
1091
				goto set_error_page;
1092
			}
M
Mike Christie 已提交
1093
			bio_set_op_attrs(bio, REQ_OP_READ, 0);
J
Jaegeuk Kim 已提交
1094 1095 1096 1097 1098 1099 1100 1101 1102
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1103
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1104 1105 1106 1107
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1108
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1109 1110 1111 1112 1113
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1114
			put_page(page);
J
Jaegeuk Kim 已提交
1115 1116 1117
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1118
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1119 1120 1121
	return 0;
}

1122 1123
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1124
	struct inode *inode = page->mapping->host;
1125
	int ret = -EAGAIN;
H
Huajun Li 已提交
1126

1127 1128
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1129
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1130 1131
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1132
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1133
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1134
	return ret;
1135 1136 1137 1138 1139 1140
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1141
	struct inode *inode = file->f_mapping->host;
1142 1143 1144
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1145 1146 1147 1148 1149

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1150
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1151 1152
}

1153
int do_write_data_page(struct f2fs_io_info *fio)
1154
{
1155
	struct page *page = fio->page;
1156 1157 1158 1159 1160
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1161
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1162 1163 1164
	if (err)
		return err;

1165
	fio->old_blkaddr = dn.data_blkaddr;
1166 1167

	/* This page is already truncated */
1168
	if (fio->old_blkaddr == NULL_ADDR) {
1169
		ClearPageUptodate(page);
1170
		goto out_writepage;
1171
	}
1172

1173
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1174
		gfp_t gfp_flags = GFP_NOFS;
1175 1176 1177

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
1178
							fio->old_blkaddr);
1179 1180 1181
retry_encrypt:
		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
								gfp_flags);
1182 1183
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
1184 1185 1186 1187 1188 1189 1190 1191
			if (err == -ENOMEM) {
				/* flush pending ios and wait for a while */
				f2fs_flush_merged_bios(F2FS_I_SB(inode));
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				gfp_flags |= __GFP_NOFAIL;
				err = 0;
				goto retry_encrypt;
			}
1192 1193 1194 1195
			goto out_writepage;
		}
	}

1196 1197 1198 1199 1200 1201
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1202
	if (unlikely(fio->old_blkaddr != NEW_ADDR &&
1203
			!is_cold_data(page) &&
1204
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1205
			need_inplace_update(inode))) {
1206
		rewrite_data_page(fio);
1207
		set_inode_flag(inode, FI_UPDATE_WRITE);
1208
		trace_f2fs_do_write_data_page(page, IPU);
1209
	} else {
1210
		write_data_page(&dn, fio);
1211
		trace_f2fs_do_write_data_page(page, OPU);
1212
		set_inode_flag(inode, FI_APPEND_WRITE);
1213
		if (page->index == 0)
1214
			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1225
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1226 1227
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1228
							>> PAGE_SHIFT;
1229
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
H
Huajun Li 已提交
1230
	unsigned offset = 0;
1231
	bool need_balance_fs = false;
1232
	int err = 0;
J
Jaegeuk Kim 已提交
1233
	struct f2fs_io_info fio = {
1234
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1235
		.type = DATA,
M
Mike Christie 已提交
1236 1237
		.op = REQ_OP_WRITE,
		.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
1238
		.page = page,
1239
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1240
	};
1241

1242 1243
	trace_f2fs_writepage(page, DATA);

1244
	if (page->index < end_index)
1245
		goto write;
1246 1247 1248 1249 1250

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1251
	offset = i_size & (PAGE_SIZE - 1);
1252
	if ((page->index >= end_index + 1) || !offset)
1253
		goto out;
1254

1255
	zero_user_segment(page, offset, PAGE_SIZE);
1256
write:
1257
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1258
		goto redirty_out;
1259 1260
	if (f2fs_is_drop_cache(inode))
		goto out;
1261 1262 1263 1264
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1265
		goto redirty_out;
1266

1267 1268
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
J
Jaegeuk Kim 已提交
1269
		mapping_set_error(page->mapping, -EIO);
1270
		goto out;
1271 1272
	}

1273
	/* Dentry blocks are controlled by checkpoint */
1274
	if (S_ISDIR(inode->i_mode)) {
1275
		err = do_write_data_page(&fio);
1276 1277
		goto done;
	}
H
Huajun Li 已提交
1278

1279
	if (!wbc->for_reclaim)
1280
		need_balance_fs = true;
1281
	else if (has_not_enough_free_secs(sbi, 0, 0))
1282
		goto redirty_out;
1283

1284
	err = -EAGAIN;
1285
	f2fs_lock_op(sbi);
1286 1287 1288
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1289
		err = do_write_data_page(&fio);
1290 1291
	if (F2FS_I(inode)->last_disk_size < psize)
		F2FS_I(inode)->last_disk_size = psize;
1292 1293 1294 1295
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1296 1297

	clear_cold_data(page);
1298
out:
1299
	inode_dec_dirty_pages(inode);
1300 1301
	if (err)
		ClearPageUptodate(page);
1302 1303 1304 1305 1306 1307

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
		remove_dirty_inode(inode);
	}

1308
	unlock_page(page);
J
Jaegeuk Kim 已提交
1309
	f2fs_balance_fs(sbi, need_balance_fs);
1310 1311

	if (unlikely(f2fs_cp_error(sbi)))
1312
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1313

1314 1315 1316
	return 0;

redirty_out:
1317
	redirty_page_for_writepage(wbc, page);
J
Jaegeuk Kim 已提交
1318 1319
	unlock_page(page);
	return err;
1320 1321
}

C
Chao Yu 已提交
1322 1323 1324 1325 1326 1327
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
J
Jaegeuk Kim 已提交
1328
					struct writeback_control *wbc)
C
Chao Yu 已提交
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
C
Chao Yu 已提交
1341
	int nwritten = 0;
C
Chao Yu 已提交
1342 1343

	pagevec_init(&pvec, 0);
1344

C
Chao Yu 已提交
1345 1346 1347 1348 1349 1350 1351 1352 1353
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1354 1355
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1401 1402
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1403 1404 1405 1406 1407 1408 1409 1410
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

J
Jaegeuk Kim 已提交
1411
			ret = mapping->a_ops->writepage(page, wbc);
C
Chao Yu 已提交
1412
			if (unlikely(ret)) {
J
Jaegeuk Kim 已提交
1413 1414 1415
				done_index = page->index + 1;
				done = 1;
				break;
C
Chao Yu 已提交
1416 1417
			} else {
				nwritten++;
C
Chao Yu 已提交
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

C
Chao Yu 已提交
1439 1440 1441 1442
	if (nwritten)
		f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
							NULL, 0, DATA, WRITE);

C
Chao Yu 已提交
1443 1444 1445
	return ret;
}

1446
static int f2fs_write_data_pages(struct address_space *mapping,
1447 1448 1449
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1450
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1451
	struct blk_plug plug;
1452 1453
	int ret;

P
P J P 已提交
1454 1455 1456 1457
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1458 1459 1460 1461
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1462 1463 1464 1465 1466
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1467
	/* skip writing during file defragment */
1468
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
C
Chao Yu 已提交
1469 1470
		goto skip_write;

1471 1472 1473 1474
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

Y
Yunlei He 已提交
1475 1476
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1477
	blk_start_plug(&plug);
J
Jaegeuk Kim 已提交
1478
	ret = f2fs_write_cache_pages(mapping, wbc);
1479
	blk_finish_plug(&plug);
1480 1481 1482 1483
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
J
Jaegeuk Kim 已提交
1484

1485
	remove_dirty_inode(inode);
1486
	return ret;
1487 1488

skip_write:
1489
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1490
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1491
	return 0;
1492 1493
}

1494 1495 1496
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1497
	loff_t i_size = i_size_read(inode);
1498

J
Jaegeuk Kim 已提交
1499 1500 1501
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1502 1503 1504
	}
}

1505 1506 1507 1508 1509 1510 1511 1512
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1513 1514
	bool locked = false;
	struct extent_info ei;
1515 1516
	int err = 0;

1517 1518 1519 1520
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
1521
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
1522 1523
		return 0;

1524
	if (f2fs_has_inline_data(inode) ||
1525
			(pos & PAGE_MASK) >= i_size_read(inode)) {
1526 1527 1528 1529
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
1542
			set_inode_flag(inode, FI_DATA_EXIST);
1543 1544
			if (inode->i_nlink)
				set_inline_node(ipage);
1545 1546 1547
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1560
			if (err || dn.data_blkaddr == NULL_ADDR) {
1561 1562 1563 1564 1565
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1566 1567
		}
	}
1568

1569 1570 1571
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1572
out:
1573 1574
	f2fs_put_dnode(&dn);
unlock_out:
1575 1576
	if (locked)
		f2fs_unlock_op(sbi);
1577 1578 1579
	return err;
}

1580 1581 1582 1583 1584
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1585
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1586
	struct page *page = NULL;
1587
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1588 1589
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1590 1591
	int err = 0;

1592 1593
	trace_f2fs_write_begin(inode, pos, len, flags);

1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1604
repeat:
1605
	page = grab_cache_page_write_begin(mapping, index, flags);
1606 1607 1608 1609
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1610

1611 1612
	*pagep = page;

1613 1614
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1615
	if (err)
1616
		goto fail;
1617

1618
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
1619
		unlock_page(page);
J
Jaegeuk Kim 已提交
1620
		f2fs_balance_fs(sbi, true);
1621 1622 1623 1624 1625 1626 1627 1628
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1629
	f2fs_wait_on_page_writeback(page, DATA, false);
1630

1631 1632
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1633
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1634

1635 1636
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
1637

1638
	if (blkaddr == NEW_ADDR) {
1639
		zero_user_segment(page, 0, PAGE_SIZE);
1640
		SetPageUptodate(page);
1641
	} else {
1642
		struct bio *bio;
1643

1644 1645 1646
		bio = f2fs_grab_bio(inode, blkaddr, 1);
		if (IS_ERR(bio)) {
			err = PTR_ERR(bio);
1647
			goto fail;
1648
		}
1649
		bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
1650 1651 1652 1653 1654 1655
		if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
			bio_put(bio);
			err = -EFAULT;
			goto fail;
		}

1656
		__submit_bio(sbi, bio, DATA);
1657

1658
		lock_page(page);
1659
		if (unlikely(page->mapping != mapping)) {
1660 1661
			f2fs_put_page(page, 1);
			goto repeat;
1662
		}
1663 1664 1665
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
1666
		}
1667 1668
	}
	return 0;
1669

1670
fail:
1671
	f2fs_put_page(page, 1);
1672 1673
	f2fs_write_failed(mapping, pos + len);
	return err;
1674 1675
}

1676 1677 1678 1679 1680 1681 1682
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1683 1684
	trace_f2fs_write_end(inode, pos, len, copied);

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
		if (unlikely(copied != PAGE_SIZE))
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

1699
	set_page_dirty(page);
1700
	clear_cold_data(page);
1701

1702 1703
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
1704
unlock_out:
1705
	f2fs_put_page(page, 1);
1706
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1707 1708 1709
	return copied;
}

1710 1711
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1712 1713 1714 1715 1716 1717
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1718 1719 1720
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1721 1722 1723
	return 0;
}

1724
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1725
{
1726
	struct address_space *mapping = iocb->ki_filp->f_mapping;
1727 1728
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
1729
	loff_t offset = iocb->ki_pos;
1730
	int rw = iov_iter_rw(iter);
1731
	int err;
1732

1733
	err = check_direct_IO(inode, iter, offset);
1734 1735
	if (err)
		return err;
H
Huajun Li 已提交
1736

1737 1738
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;
1739 1740
	if (test_opt(F2FS_I_SB(inode), LFS))
		return 0;
1741

1742
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1743

1744
	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
1745
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
1746 1747 1748
	up_read(&F2FS_I(inode)->dio_rwsem[rw]);

	if (rw == WRITE) {
1749
		if (err > 0)
1750
			set_inode_flag(inode, FI_UPDATE_WRITE);
1751 1752 1753
		else if (err < 0)
			f2fs_write_failed(mapping, offset + count);
	}
1754

1755
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
1756

1757
	return err;
1758 1759
}

1760 1761
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1762 1763
{
	struct inode *inode = page->mapping->host;
1764
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1765

1766
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1767
		(offset % PAGE_SIZE || length != PAGE_SIZE))
1768 1769
		return;

1770
	if (PageDirty(page)) {
1771
		if (inode->i_ino == F2FS_META_INO(sbi)) {
1772
			dec_page_count(sbi, F2FS_DIRTY_META);
1773
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
1774
			dec_page_count(sbi, F2FS_DIRTY_NODES);
1775
		} else {
1776
			inode_dec_dirty_pages(inode);
1777 1778
			remove_dirty_inode(inode);
		}
1779
	}
C
Chao Yu 已提交
1780 1781 1782 1783 1784

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1785
	set_page_private(page, 0);
1786 1787 1788
	ClearPagePrivate(page);
}

1789
int f2fs_release_page(struct page *page, gfp_t wait)
1790
{
1791 1792 1793 1794
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1795 1796 1797 1798
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1799
	set_page_private(page, 0);
1800
	ClearPagePrivate(page);
1801
	return 1;
1802 1803
}

1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
/*
 * This was copied from __set_page_dirty_buffers which gives higher performance
 * in very high speed storages. (e.g., pmem)
 */
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
	struct address_space *mapping = page->mapping;
	unsigned long flags;

	if (unlikely(!mapping))
		return;

	spin_lock(&mapping->private_lock);
	lock_page_memcg(page);
	SetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	spin_lock_irqsave(&mapping->tree_lock, flags);
	WARN_ON_ONCE(!PageUptodate(page));
	account_page_dirtied(page, mapping);
	radix_tree_tag_set(&mapping->page_tree,
			page_index(page), PAGECACHE_TAG_DIRTY);
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
	unlock_page_memcg(page);

	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	return;
}

1833 1834 1835 1836 1837
static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1838 1839
	trace_f2fs_set_page_dirty(page, DATA);

1840 1841
	if (!PageUptodate(page))
		SetPageUptodate(page);
1842

1843
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1844 1845 1846 1847 1848 1849 1850 1851 1852
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1853 1854
	}

1855
	if (!PageDirty(page)) {
1856
		f2fs_set_page_dirty_nobuffers(page);
1857
		update_dirty_page(inode, page);
1858 1859 1860 1861 1862
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1863 1864
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1865 1866
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1867 1868 1869 1870 1871 1872 1873
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1874
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1875 1876
}

1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
	if (atomic_written && !mutex_trylock(&fi->inmem_lock))
		return -EAGAIN;

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

	migrate_page_copy(newpage, page);

	return MIGRATEPAGE_SUCCESS;
}
#endif

1929 1930 1931 1932 1933 1934
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1935
	.write_end	= f2fs_write_end,
1936
	.set_page_dirty	= f2fs_set_data_page_dirty,
1937 1938
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1939
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1940
	.bmap		= f2fs_bmap,
1941 1942 1943
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
1944
};