data.c 47.5 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
22 23
#include <linux/mm.h>
#include <linux/memcontrol.h>
J
Jaegeuk Kim 已提交
24
#include <linux/cleancache.h>
25 26 27 28

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
29
#include "trace.h"
30
#include <trace/events/f2fs.h>
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
static bool __is_cp_guaranteed(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct f2fs_sb_info *sbi;

	if (!mapping)
		return false;

	inode = mapping->host;
	sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_META_INO(sbi) ||
			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
			S_ISDIR(inode->i_mode) ||
			is_cold_data(page))
		return true;
	return false;
}

52
static void f2fs_read_end_io(struct bio *bio)
53
{
54 55
	struct bio_vec *bvec;
	int i;
56

C
Chao Yu 已提交
57
#ifdef CONFIG_F2FS_FAULT_INJECTION
58
	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
C
Chao Yu 已提交
59 60 61
		bio->bi_error = -EIO;
#endif

62
	if (f2fs_bio_encrypted(bio)) {
63
		if (bio->bi_error) {
64
			fscrypt_release_ctx(bio->bi_private);
65
		} else {
66
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
67 68 69 70
			return;
		}
	}

71 72
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
73

74
		if (!bio->bi_error) {
75 76
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
77 78 79 80 81 82 83 84 85
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

86
static void f2fs_write_end_io(struct bio *bio)
87
{
88
	struct f2fs_sb_info *sbi = bio->bi_private;
89 90
	struct bio_vec *bvec;
	int i;
91

92
	bio_for_each_segment_all(bvec, bio, i) {
93
		struct page *page = bvec->bv_page;
94
		enum count_type type = WB_DATA_TYPE(page);
95

96
		fscrypt_pullback_bio_page(&page, true);
97

98
		if (unlikely(bio->bi_error)) {
99
			mapping_set_error(page->mapping, -EIO);
100
			f2fs_stop_checkpoint(sbi, true);
101
		}
102 103
		dec_page_count(sbi, type);
		clear_cold_data(page);
104
		end_page_writeback(page);
105
	}
106
	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
107
				wq_has_sleeper(&sbi->cp_wait))
108 109 110 111 112
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

J
Jaegeuk Kim 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
/*
 * Return true, if pre_bio's bdev is same as its target device.
 */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	int i;

	for (i = 0; i < sbi->s_ndevs; i++) {
		if (FDEV(i).start_blk <= blk_addr &&
					FDEV(i).end_blk >= blk_addr) {
			blk_addr -= FDEV(i).start_blk;
			bdev = FDEV(i).bdev;
			break;
		}
	}
	if (bio) {
		bio->bi_bdev = bdev;
		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
	}
	return bdev;
}

int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
	int i;

	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
			return i;
	return 0;
}

static bool __same_bdev(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
}

153 154 155 156 157 158 159 160
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
161
	bio = f2fs_bio_alloc(npages);
162

J
Jaegeuk Kim 已提交
163
	f2fs_target_device(sbi, blk_addr, bio);
164
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
165
	bio->bi_private = is_read ? NULL : sbi;
166 167 168 169

	return bio;
}

170 171
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
172
{
173
	if (!is_read_io(bio_op(bio))) {
174
		if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
175
			current->plug && (type == DATA || type == NODE))
J
Jaegeuk Kim 已提交
176 177
			blk_finish_plug(current->plug);
	}
178
	submit_bio(bio);
179 180
}

J
Jaegeuk Kim 已提交
181
static void __submit_merged_bio(struct f2fs_bio_info *io)
182
{
J
Jaegeuk Kim 已提交
183
	struct f2fs_io_info *fio = &io->fio;
184 185 186 187

	if (!io->bio)
		return;

M
Mike Christie 已提交
188
	if (is_read_io(fio->op))
189
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
190
	else
191
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
192

M
Mike Christie 已提交
193 194
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

195
	__submit_bio(io->sbi, io->bio, fio->type);
196 197 198
	io->bio = NULL;
}

199 200
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
C
Chao Yu 已提交
201 202 203 204 205
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

206
	if (!io->bio)
C
Chao Yu 已提交
207
		return false;
208 209 210

	if (!inode && !page && !ino)
		return true;
C
Chao Yu 已提交
211 212 213

	bio_for_each_segment_all(bvec, io->bio, i) {

214
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
215
			target = bvec->bv_page;
216 217
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
218

219 220 221 222 223
		if (inode && inode == target->mapping->host)
			return true;
		if (page && page == target)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
224 225 226 227 228 229
			return true;
	}

	return false;
}

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	bool ret;

	down_read(&io->io_rwsem);
	ret = __has_merged_page(io, inode, page, ino);
	up_read(&io->io_rwsem);
	return ret;
}

static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
247 248 249 250 251 252
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

253
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
254

255 256 257
	if (!__has_merged_page(io, inode, page, ino))
		goto out;

J
Jaegeuk Kim 已提交
258 259 260
	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
M
Mike Christie 已提交
261
		io->fio.op = REQ_OP_WRITE;
262 263 264
		io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO;
		if (!test_opt(sbi, NOBARRIER))
			io->fio.op_flags |= REQ_FUA;
J
Jaegeuk Kim 已提交
265 266
	}
	__submit_merged_bio(io);
267
out:
268
	up_write(&io->io_rwsem);
269 270
}

271 272 273 274 275 276 277 278 279 280 281 282 283 284
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
									int rw)
{
	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}

void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
{
	if (has_merged_page(sbi, inode, page, ino, type))
		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}

285 286 287 288 289 290 291
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, WRITE);
	f2fs_submit_merged_bio(sbi, META, WRITE);
}

292 293 294 295
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
296
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
297 298
{
	struct bio *bio;
299 300
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
301

302
	trace_f2fs_submit_page_bio(page, fio);
303
	f2fs_trace_ios(fio, 0);
304 305

	/* Allocate a new bio */
M
Mike Christie 已提交
306
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
307

308
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
309 310 311
		bio_put(bio);
		return -EFAULT;
	}
M
Mike Christie 已提交
312
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
313

314
	__submit_bio(fio->sbi, bio, fio->type);
315 316 317
	return 0;
}

318
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
319
{
320
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
321
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
322
	struct f2fs_bio_info *io;
M
Mike Christie 已提交
323
	bool is_read = is_read_io(fio->op);
324
	struct page *bio_page;
325

326
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
327

328 329 330
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
331

332 333 334 335 336
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

	if (!is_read)
		inc_page_count(sbi, WB_DATA_TYPE(bio_page));

337
	down_write(&io->io_rwsem);
338

339
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
340 341
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
J
Jaegeuk Kim 已提交
342
		__submit_merged_bio(io);
343 344
alloc_new:
	if (io->bio == NULL) {
345
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
346
						BIO_MAX_PAGES, is_read);
J
Jaegeuk Kim 已提交
347
		io->fio = *fio;
348 349
	}

350 351
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
							PAGE_SIZE) {
J
Jaegeuk Kim 已提交
352
		__submit_merged_bio(io);
353 354 355
		goto alloc_new;
	}

356
	io->last_block_in_bio = fio->new_blkaddr;
357
	f2fs_trace_ios(fio, 0);
358

359
	up_write(&io->io_rwsem);
360
	trace_f2fs_submit_page_mbio(fio->page, fio);
361 362
}

363 364 365 366 367 368 369 370 371 372
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}

J
Jaegeuk Kim 已提交
373
/*
374 375 376 377 378
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
379
void set_data_blkaddr(struct dnode_of_data *dn)
380
{
381 382 383
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
384
		dn->node_changed = true;
385 386
}

387 388 389 390 391 392 393
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

394 395
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
396
{
397
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
398

399 400 401
	if (!count)
		return 0;

402
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
403
		return -EPERM;
404
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
405 406
		return -ENOSPC;

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr =
			datablock_addr(dn->node_page, dn->ofs_in_node);
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
424 425 426
	return 0;
}

427 428 429 430 431 432 433 434 435 436 437
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

438 439 440 441 442 443 444 445
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
446

447 448
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
449
	if (err || need_put)
450 451 452 453
		f2fs_put_dnode(dn);
	return err;
}

454
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
455
{
456
	struct extent_info ei;
457
	struct inode *inode = dn->inode;
458

459 460 461
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
462
	}
463

464
	return f2fs_reserve_block(dn, index);
465 466
}

467
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
M
Mike Christie 已提交
468
						int op_flags, bool for_write)
469 470 471 472
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
473
	struct extent_info ei;
474
	int err;
475
	struct f2fs_io_info fio = {
476
		.sbi = F2FS_I_SB(inode),
477
		.type = DATA,
M
Mike Christie 已提交
478 479
		.op = REQ_OP_READ,
		.op_flags = op_flags,
480
		.encrypted_page = NULL,
481
	};
482

483 484 485
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

486
	page = f2fs_grab_cache_page(mapping, index, for_write);
487 488 489
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
490 491 492 493 494
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

495
	set_new_dnode(&dn, inode, NULL, NULL, 0);
496
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
497 498
	if (err)
		goto put_err;
499 500
	f2fs_put_dnode(&dn);

501
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
502 503
		err = -ENOENT;
		goto put_err;
504
	}
C
Chao Yu 已提交
505
got_it:
506 507
	if (PageUptodate(page)) {
		unlock_page(page);
508
		return page;
509
	}
510

J
Jaegeuk Kim 已提交
511 512 513 514 515 516 517
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
518
		zero_user_segment(page, 0, PAGE_SIZE);
519 520
		if (!PageUptodate(page))
			SetPageUptodate(page);
521
		unlock_page(page);
J
Jaegeuk Kim 已提交
522 523
		return page;
	}
524

525
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
526 527
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
528
	if (err)
529
		goto put_err;
530
	return page;
531 532 533 534

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
535 536 537 538 539 540 541 542 543 544 545 546
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

547
	page = get_read_data_page(inode, index, 0, false);
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
567 568
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
569 570 571 572
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
573
	page = get_read_data_page(inode, index, 0, for_write);
574 575
	if (IS_ERR(page))
		return page;
576

577
	/* wait for read completion */
578
	lock_page(page);
579
	if (unlikely(page->mapping != mapping)) {
580 581
		f2fs_put_page(page, 1);
		goto repeat;
582
	}
583 584 585 586
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
587 588 589
	return page;
}

J
Jaegeuk Kim 已提交
590
/*
591 592
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
593
 *
C
Chao Yu 已提交
594 595
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
596 597
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
598
 */
599
struct page *get_new_data_page(struct inode *inode,
600
		struct page *ipage, pgoff_t index, bool new_i_size)
601 602 603 604 605
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
606

607
	page = f2fs_grab_cache_page(mapping, index, true);
608 609 610 611 612 613
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
614
		return ERR_PTR(-ENOMEM);
615
	}
616

617
	set_new_dnode(&dn, inode, ipage, NULL, 0);
618
	err = f2fs_reserve_block(&dn, index);
619 620
	if (err) {
		f2fs_put_page(page, 1);
621
		return ERR_PTR(err);
622
	}
623 624
	if (!ipage)
		f2fs_put_dnode(&dn);
625 626

	if (PageUptodate(page))
627
		goto got_it;
628 629

	if (dn.data_blkaddr == NEW_ADDR) {
630
		zero_user_segment(page, 0, PAGE_SIZE);
631 632
		if (!PageUptodate(page))
			SetPageUptodate(page);
633
	} else {
634
		f2fs_put_page(page, 1);
635

636 637 638
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
639
		if (IS_ERR(page))
640
			return page;
641
	}
642
got_it:
C
Chao Yu 已提交
643
	if (new_i_size && i_size_read(inode) <
644
				((loff_t)(index + 1) << PAGE_SHIFT))
645
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
646 647 648
	return page;
}

649 650
static int __allocate_data_block(struct dnode_of_data *dn)
{
651
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
652 653
	struct f2fs_summary sum;
	struct node_info ni;
654
	pgoff_t fofs;
655
	blkcnt_t count = 1;
656

657
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
658
		return -EPERM;
659 660 661 662 663

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

664
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
665 666
		return -ENOSPC;

667
alloc:
668 669 670
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

671
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
672
						&sum, CURSEG_WARM_DATA);
673
	set_data_blkaddr(dn);
674

675
	/* update i_size */
676
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
677
							dn->ofs_in_node;
678
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
679
		f2fs_i_size_write(dn->inode,
680
				((loff_t)(fofs + 1) << PAGE_SHIFT));
681 682 683
	return 0;
}

J
Jaegeuk Kim 已提交
684 685 686 687 688 689 690
static inline bool __force_buffered_io(struct inode *inode, int rw)
{
	return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
			F2FS_I_SB(inode)->s_ndevs);
}

691
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
692
{
693
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
694
	struct f2fs_map_blocks map;
695
	int err = 0;
696

697
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
698 699 700 701 702 703
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

704
	map.m_next_pgofs = NULL;
705

706
	if (iocb->ki_flags & IOCB_DIRECT) {
707 708 709
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
J
Jaegeuk Kim 已提交
710 711 712 713
		return f2fs_map_blocks(inode, &map, 1,
			__force_buffered_io(inode, WRITE) ?
				F2FS_GET_BLOCK_PRE_AIO :
				F2FS_GET_BLOCK_PRE_DIO);
714 715
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
716 717 718
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
719
	}
720 721
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
722
	return err;
723 724
}

J
Jaegeuk Kim 已提交
725
/*
J
Jaegeuk Kim 已提交
726 727
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
728 729 730 731 732
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
733
 */
C
Chao Yu 已提交
734
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
735
						int create, int flag)
736
{
J
Jaegeuk Kim 已提交
737
	unsigned int maxblocks = map->m_len;
738
	struct dnode_of_data dn;
739
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
740
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
741
	pgoff_t pgofs, end_offset, end;
742
	int err = 0, ofs = 1;
743 744
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
745
	struct extent_info ei;
746
	block_t blkaddr;
747

748 749 750
	if (!maxblocks)
		return 0;

J
Jaegeuk Kim 已提交
751 752 753 754 755
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
756
	end = pgofs + maxblocks;
757

758
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
759 760 761
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
762
		goto out;
763
	}
764

C
Chao Yu 已提交
765
next_dnode:
766
	if (create)
767
		f2fs_lock_op(sbi);
768 769 770

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
771
	err = get_dnode_of_data(&dn, pgofs, mode);
772
	if (err) {
C
Chao Yu 已提交
773 774
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
775
		if (err == -ENOENT) {
776
			err = 0;
777 778 779 780
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
781
		goto unlock_out;
782
	}
C
Chao Yu 已提交
783

784
	prealloc = 0;
785
	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
786
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
787 788 789 790 791

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
792
		if (create) {
793 794
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
795
				goto sync_out;
796
			}
797
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
798 799 800 801
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
802 803
			} else {
				err = __allocate_data_block(&dn);
804
				if (!err)
805
					set_inode_flag(inode, FI_APPEND_WRITE);
806
			}
C
Chao Yu 已提交
807
			if (err)
C
Chao Yu 已提交
808
				goto sync_out;
C
Chao Yu 已提交
809
			map->m_flags = F2FS_MAP_NEW;
C
Chao Yu 已提交
810
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
811
		} else {
C
Chao Yu 已提交
812 813 814 815
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
816 817 818 819 820
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
821
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
822
						blkaddr != NEW_ADDR)
C
Chao Yu 已提交
823
				goto sync_out;
C
Chao Yu 已提交
824 825
		}
	}
826

827 828 829
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
830 831 832 833 834 835 836 837 838 839
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
840
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
841
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
842 843 844 845 846
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
847

848
skip:
849 850 851
	dn.ofs_in_node++;
	pgofs++;

852 853 854
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
855

856 857 858 859
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
860

861 862 863 864
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
865
		}
866 867 868 869 870 871 872 873 874 875 876 877
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

	f2fs_put_dnode(&dn);

	if (create) {
		f2fs_unlock_op(sbi);
878
		f2fs_balance_fs(sbi, dn.node_changed);
879
	}
880
	goto next_dnode;
881

882
sync_out:
883
	f2fs_put_dnode(&dn);
884
unlock_out:
885
	if (create) {
886
		f2fs_unlock_op(sbi);
887
		f2fs_balance_fs(sbi, dn.node_changed);
888
	}
889
out:
J
Jaegeuk Kim 已提交
890
	trace_f2fs_map_blocks(inode, map, err);
891
	return err;
892 893
}

J
Jaegeuk Kim 已提交
894
static int __get_data_block(struct inode *inode, sector_t iblock,
895 896
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
897 898
{
	struct f2fs_map_blocks map;
899
	int err;
J
Jaegeuk Kim 已提交
900 901 902

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
903
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
904

905 906
	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
J
Jaegeuk Kim 已提交
907 908 909 910
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
911
	return err;
J
Jaegeuk Kim 已提交
912 913
}

914
static int get_data_block(struct inode *inode, sector_t iblock,
915 916
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
917
{
918 919
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
920 921 922
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
923 924
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
925
	return __get_data_block(inode, iblock, bh_result, create,
926
						F2FS_GET_BLOCK_DIO, NULL);
927 928
}

C
Chao Yu 已提交
929
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
930 931
			struct buffer_head *bh_result, int create)
{
932
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
933
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
934 935
		return -EFBIG;

C
Chao Yu 已提交
936
	return __get_data_block(inode, iblock, bh_result, create,
937
						F2FS_GET_BLOCK_BMAP, NULL);
938 939
}

940 941 942 943 944 945 946 947 948 949
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
950 951 952
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
953 954
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
955
	pgoff_t next_pgofs;
956 957 958 959 960 961 962 963
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
964 965 966 967 968 969
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
970
	inode_lock(inode);
971

972 973 974 975 976
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
977

978 979 980 981
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
982
	ret = get_data_block(inode, start_blk, &map_bh, 0,
983
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
984 985 986 987 988
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
989
		start_blk = next_pgofs;
990 991 992

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
993
			goto prep_next;
994

995 996
		flags |= FIEMAP_EXTENT_LAST;
	}
997

998 999 1000 1001
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

1002 1003
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
1004
	}
1005

1006 1007
	if (start_blk > last_blk || ret)
		goto out;
1008

1009 1010 1011 1012 1013 1014
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
1015

1016
	start_blk += logical_to_blk(inode, size);
1017

1018
prep_next:
1019 1020 1021 1022 1023 1024 1025 1026 1027
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
1028
	inode_unlock(inode);
1029
	return ret;
J
Jaegeuk Kim 已提交
1030 1031
}

1032 1033
static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
				 unsigned nr_pages)
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct fscrypt_ctx *ctx = NULL;
	struct bio *bio;

	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
		if (IS_ERR(ctx))
			return ERR_CAST(ctx);

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
	}

	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
	if (!bio) {
		if (ctx)
			fscrypt_release_ctx(ctx);
		return ERR_PTR(-ENOMEM);
	}
J
Jaegeuk Kim 已提交
1054
	f2fs_target_device(sbi, blkaddr, bio);
1055 1056 1057 1058 1059 1060
	bio->bi_end_io = f2fs_read_end_io;
	bio->bi_private = ctx;

	return bio;
}

J
Jaegeuk Kim 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1085
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
1086 1087 1088 1089 1090 1091 1092 1093

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1094 1095
						  page->index,
						  readahead_gfp_mask(mapping)))
J
Jaegeuk Kim 已提交
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1124
			if (f2fs_map_blocks(inode, &map, 0,
1125
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1138
			zero_user_segment(page, 0, PAGE_SIZE);
1139 1140
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
1141 1142 1143 1144 1145 1146 1147 1148
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
J
Jaegeuk Kim 已提交
1149 1150
		if (bio && (last_block_in_bio != block_nr - 1 ||
			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
J
Jaegeuk Kim 已提交
1151
submit_and_realloc:
1152
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1153 1154 1155
			bio = NULL;
		}
		if (bio == NULL) {
1156
			bio = f2fs_grab_bio(inode, block_nr, nr_pages);
J
Jaegeuk Kim 已提交
1157 1158
			if (IS_ERR(bio)) {
				bio = NULL;
J
Jaegeuk Kim 已提交
1159
				goto set_error_page;
1160
			}
M
Mike Christie 已提交
1161
			bio_set_op_attrs(bio, REQ_OP_READ, 0);
J
Jaegeuk Kim 已提交
1162 1163 1164 1165 1166 1167 1168 1169 1170
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1171
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1172 1173 1174 1175
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1176
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1177 1178 1179 1180 1181
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1182
			put_page(page);
J
Jaegeuk Kim 已提交
1183 1184 1185
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1186
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1187 1188 1189
	return 0;
}

1190 1191
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1192
	struct inode *inode = page->mapping->host;
1193
	int ret = -EAGAIN;
H
Huajun Li 已提交
1194

1195 1196
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1197
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1198 1199
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1200
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1201
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1202
	return ret;
1203 1204 1205 1206 1207 1208
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1209
	struct inode *inode = file->f_mapping->host;
1210 1211 1212
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1213 1214 1215 1216 1217

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1218
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1219 1220
}

1221
int do_write_data_page(struct f2fs_io_info *fio)
1222
{
1223
	struct page *page = fio->page;
1224 1225 1226 1227 1228
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1229
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1230 1231 1232
	if (err)
		return err;

1233
	fio->old_blkaddr = dn.data_blkaddr;
1234 1235

	/* This page is already truncated */
1236
	if (fio->old_blkaddr == NULL_ADDR) {
1237
		ClearPageUptodate(page);
1238
		goto out_writepage;
1239
	}
1240

1241
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1242
		gfp_t gfp_flags = GFP_NOFS;
1243 1244 1245

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
1246
							fio->old_blkaddr);
1247 1248
retry_encrypt:
		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
1249 1250 1251
							PAGE_SIZE, 0,
							fio->page->index,
							gfp_flags);
1252 1253
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
1254 1255 1256 1257 1258 1259 1260 1261
			if (err == -ENOMEM) {
				/* flush pending ios and wait for a while */
				f2fs_flush_merged_bios(F2FS_I_SB(inode));
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				gfp_flags |= __GFP_NOFAIL;
				err = 0;
				goto retry_encrypt;
			}
1262 1263 1264 1265
			goto out_writepage;
		}
	}

1266 1267 1268 1269 1270 1271
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1272
	if (unlikely(fio->old_blkaddr != NEW_ADDR &&
1273
			!is_cold_data(page) &&
1274
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1275
			need_inplace_update(inode))) {
1276
		rewrite_data_page(fio);
1277
		set_inode_flag(inode, FI_UPDATE_WRITE);
1278
		trace_f2fs_do_write_data_page(page, IPU);
1279
	} else {
1280
		write_data_page(&dn, fio);
1281
		trace_f2fs_do_write_data_page(page, OPU);
1282
		set_inode_flag(inode, FI_APPEND_WRITE);
1283
		if (page->index == 0)
1284
			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1295
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1296 1297
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1298
							>> PAGE_SHIFT;
1299
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
H
Huajun Li 已提交
1300
	unsigned offset = 0;
1301
	bool need_balance_fs = false;
1302
	int err = 0;
J
Jaegeuk Kim 已提交
1303
	struct f2fs_io_info fio = {
1304
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1305
		.type = DATA,
M
Mike Christie 已提交
1306
		.op = REQ_OP_WRITE,
J
Jens Axboe 已提交
1307
		.op_flags = wbc_to_write_flags(wbc),
1308
		.page = page,
1309
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1310
	};
1311

1312 1313
	trace_f2fs_writepage(page, DATA);

1314
	if (page->index < end_index)
1315
		goto write;
1316 1317 1318 1319 1320

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1321
	offset = i_size & (PAGE_SIZE - 1);
1322
	if ((page->index >= end_index + 1) || !offset)
1323
		goto out;
1324

1325
	zero_user_segment(page, offset, PAGE_SIZE);
1326
write:
1327
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1328
		goto redirty_out;
1329 1330
	if (f2fs_is_drop_cache(inode))
		goto out;
1331 1332 1333 1334
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1335
		goto redirty_out;
1336

1337 1338
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
J
Jaegeuk Kim 已提交
1339
		mapping_set_error(page->mapping, -EIO);
1340
		goto out;
1341 1342
	}

1343
	/* Dentry blocks are controlled by checkpoint */
1344
	if (S_ISDIR(inode->i_mode)) {
1345
		err = do_write_data_page(&fio);
1346 1347
		goto done;
	}
H
Huajun Li 已提交
1348

1349
	if (!wbc->for_reclaim)
1350
		need_balance_fs = true;
1351
	else if (has_not_enough_free_secs(sbi, 0, 0))
1352
		goto redirty_out;
1353

1354
	err = -EAGAIN;
1355
	f2fs_lock_op(sbi);
1356 1357 1358
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1359
		err = do_write_data_page(&fio);
1360 1361
	if (F2FS_I(inode)->last_disk_size < psize)
		F2FS_I(inode)->last_disk_size = psize;
1362 1363 1364 1365
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1366

1367
out:
1368
	inode_dec_dirty_pages(inode);
1369 1370
	if (err)
		ClearPageUptodate(page);
1371 1372 1373 1374 1375 1376

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
		remove_dirty_inode(inode);
	}

1377
	unlock_page(page);
J
Jaegeuk Kim 已提交
1378
	f2fs_balance_fs(sbi, need_balance_fs);
1379 1380

	if (unlikely(f2fs_cp_error(sbi)))
1381
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1382

1383 1384 1385
	return 0;

redirty_out:
1386
	redirty_page_for_writepage(wbc, page);
1387 1388
	if (!err)
		return AOP_WRITEPAGE_ACTIVATE;
J
Jaegeuk Kim 已提交
1389 1390
	unlock_page(page);
	return err;
1391 1392
}

C
Chao Yu 已提交
1393 1394 1395 1396 1397 1398
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
J
Jaegeuk Kim 已提交
1399
					struct writeback_control *wbc)
C
Chao Yu 已提交
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
C
Chao Yu 已提交
1412
	int nwritten = 0;
C
Chao Yu 已提交
1413 1414

	pagevec_init(&pvec, 0);
1415

C
Chao Yu 已提交
1416 1417 1418 1419 1420 1421 1422 1423 1424
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1425 1426
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1472 1473
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1474 1475 1476 1477 1478 1479 1480 1481
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

J
Jaegeuk Kim 已提交
1482
			ret = mapping->a_ops->writepage(page, wbc);
C
Chao Yu 已提交
1483
			if (unlikely(ret)) {
1484 1485 1486 1487 1488 1489 1490 1491 1492
				/*
				 * keep nr_to_write, since vfs uses this to
				 * get # of written pages.
				 */
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
					continue;
				}
J
Jaegeuk Kim 已提交
1493 1494 1495
				done_index = page->index + 1;
				done = 1;
				break;
C
Chao Yu 已提交
1496 1497
			} else {
				nwritten++;
C
Chao Yu 已提交
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

C
Chao Yu 已提交
1519 1520 1521 1522
	if (nwritten)
		f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
							NULL, 0, DATA, WRITE);

C
Chao Yu 已提交
1523 1524 1525
	return ret;
}

1526
static int f2fs_write_data_pages(struct address_space *mapping,
1527 1528 1529
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1530
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1531
	struct blk_plug plug;
1532 1533
	int ret;

P
P J P 已提交
1534 1535 1536 1537
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1538 1539 1540 1541
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1542 1543 1544 1545 1546
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1547
	/* skip writing during file defragment */
1548
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
C
Chao Yu 已提交
1549 1550
		goto skip_write;

1551 1552 1553 1554
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

Y
Yunlei He 已提交
1555 1556
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1557
	blk_start_plug(&plug);
J
Jaegeuk Kim 已提交
1558
	ret = f2fs_write_cache_pages(mapping, wbc);
1559
	blk_finish_plug(&plug);
1560 1561 1562 1563
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
J
Jaegeuk Kim 已提交
1564

1565
	remove_dirty_inode(inode);
1566
	return ret;
1567 1568

skip_write:
1569
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1570
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1571
	return 0;
1572 1573
}

1574 1575 1576
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1577
	loff_t i_size = i_size_read(inode);
1578

J
Jaegeuk Kim 已提交
1579 1580 1581
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1582 1583 1584
	}
}

1585 1586 1587 1588 1589 1590 1591 1592
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1593 1594
	bool locked = false;
	struct extent_info ei;
1595 1596
	int err = 0;

1597 1598 1599 1600
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
1601
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
1602 1603
		return 0;

1604
	if (f2fs_has_inline_data(inode) ||
1605
			(pos & PAGE_MASK) >= i_size_read(inode)) {
1606 1607 1608 1609
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
1622
			set_inode_flag(inode, FI_DATA_EXIST);
1623 1624
			if (inode->i_nlink)
				set_inline_node(ipage);
1625 1626 1627
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1640
			if (err || dn.data_blkaddr == NULL_ADDR) {
1641 1642 1643 1644 1645
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1646 1647
		}
	}
1648

1649 1650 1651
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1652
out:
1653 1654
	f2fs_put_dnode(&dn);
unlock_out:
1655 1656
	if (locked)
		f2fs_unlock_op(sbi);
1657 1658 1659
	return err;
}

1660 1661 1662 1663 1664
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1665
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1666
	struct page *page = NULL;
1667
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1668 1669
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1670 1671
	int err = 0;

1672 1673
	trace_f2fs_write_begin(inode, pos, len, flags);

1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1684
repeat:
1685
	page = grab_cache_page_write_begin(mapping, index, flags);
1686 1687 1688 1689
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1690

1691 1692
	*pagep = page;

1693 1694
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1695
	if (err)
1696
		goto fail;
1697

1698
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
1699
		unlock_page(page);
J
Jaegeuk Kim 已提交
1700
		f2fs_balance_fs(sbi, true);
1701 1702 1703 1704 1705 1706 1707 1708
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1709
	f2fs_wait_on_page_writeback(page, DATA, false);
1710

1711 1712
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1713
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1714

1715 1716
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
1717

1718
	if (blkaddr == NEW_ADDR) {
1719
		zero_user_segment(page, 0, PAGE_SIZE);
1720
		SetPageUptodate(page);
1721
	} else {
1722
		struct bio *bio;
1723

1724 1725 1726
		bio = f2fs_grab_bio(inode, blkaddr, 1);
		if (IS_ERR(bio)) {
			err = PTR_ERR(bio);
1727
			goto fail;
1728
		}
1729
		bio->bi_opf = REQ_OP_READ;
1730 1731 1732 1733 1734 1735
		if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
			bio_put(bio);
			err = -EFAULT;
			goto fail;
		}

1736
		__submit_bio(sbi, bio, DATA);
1737

1738
		lock_page(page);
1739
		if (unlikely(page->mapping != mapping)) {
1740 1741
			f2fs_put_page(page, 1);
			goto repeat;
1742
		}
1743 1744 1745
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
1746
		}
1747 1748
	}
	return 0;
1749

1750
fail:
1751
	f2fs_put_page(page, 1);
1752 1753
	f2fs_write_failed(mapping, pos + len);
	return err;
1754 1755
}

1756 1757 1758 1759 1760 1761 1762
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1763 1764
	trace_f2fs_write_end(inode, pos, len, copied);

1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
		if (unlikely(copied != PAGE_SIZE))
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

1779
	set_page_dirty(page);
1780

1781 1782
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
1783
unlock_out:
1784
	f2fs_put_page(page, 1);
1785
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1786 1787 1788
	return copied;
}

1789 1790
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1791 1792 1793 1794 1795 1796
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1797 1798 1799
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1800 1801 1802
	return 0;
}

1803
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1804
{
1805
	struct address_space *mapping = iocb->ki_filp->f_mapping;
1806 1807
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
1808
	loff_t offset = iocb->ki_pos;
1809
	int rw = iov_iter_rw(iter);
1810
	int err;
1811

1812
	err = check_direct_IO(inode, iter, offset);
1813 1814
	if (err)
		return err;
H
Huajun Li 已提交
1815

J
Jaegeuk Kim 已提交
1816
	if (__force_buffered_io(inode, rw))
1817
		return 0;
1818

1819
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1820

1821
	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
1822
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
1823 1824 1825
	up_read(&F2FS_I(inode)->dio_rwsem[rw]);

	if (rw == WRITE) {
1826
		if (err > 0)
1827
			set_inode_flag(inode, FI_UPDATE_WRITE);
1828 1829 1830
		else if (err < 0)
			f2fs_write_failed(mapping, offset + count);
	}
1831

1832
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
1833

1834
	return err;
1835 1836
}

1837 1838
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1839 1840
{
	struct inode *inode = page->mapping->host;
1841
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1842

1843
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1844
		(offset % PAGE_SIZE || length != PAGE_SIZE))
1845 1846
		return;

1847
	if (PageDirty(page)) {
1848
		if (inode->i_ino == F2FS_META_INO(sbi)) {
1849
			dec_page_count(sbi, F2FS_DIRTY_META);
1850
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
1851
			dec_page_count(sbi, F2FS_DIRTY_NODES);
1852
		} else {
1853
			inode_dec_dirty_pages(inode);
1854 1855
			remove_dirty_inode(inode);
		}
1856
	}
C
Chao Yu 已提交
1857 1858 1859 1860 1861

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1862
	set_page_private(page, 0);
1863 1864 1865
	ClearPagePrivate(page);
}

1866
int f2fs_release_page(struct page *page, gfp_t wait)
1867
{
1868 1869 1870 1871
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1872 1873 1874 1875
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1876
	set_page_private(page, 0);
1877
	ClearPagePrivate(page);
1878
	return 1;
1879 1880
}

1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
/*
 * This was copied from __set_page_dirty_buffers which gives higher performance
 * in very high speed storages. (e.g., pmem)
 */
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
	struct address_space *mapping = page->mapping;
	unsigned long flags;

	if (unlikely(!mapping))
		return;

	spin_lock(&mapping->private_lock);
	lock_page_memcg(page);
	SetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	spin_lock_irqsave(&mapping->tree_lock, flags);
	WARN_ON_ONCE(!PageUptodate(page));
	account_page_dirtied(page, mapping);
	radix_tree_tag_set(&mapping->page_tree,
			page_index(page), PAGECACHE_TAG_DIRTY);
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
	unlock_page_memcg(page);

	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
	return;
}

1910 1911 1912 1913 1914
static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1915 1916
	trace_f2fs_set_page_dirty(page, DATA);

1917 1918
	if (!PageUptodate(page))
		SetPageUptodate(page);
1919

1920
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1921 1922 1923 1924 1925 1926 1927 1928 1929
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1930 1931
	}

1932
	if (!PageDirty(page)) {
1933
		f2fs_set_page_dirty_nobuffers(page);
1934
		update_dirty_page(inode, page);
1935 1936 1937 1938 1939
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1940 1941
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1942 1943
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1944 1945 1946 1947 1948 1949 1950
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1951
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1952 1953
}

1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
	if (atomic_written && !mutex_trylock(&fi->inmem_lock))
		return -EAGAIN;

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

	migrate_page_copy(newpage, page);

	return MIGRATEPAGE_SUCCESS;
}
#endif

2006 2007 2008 2009 2010 2011
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
2012
	.write_end	= f2fs_write_end,
2013
	.set_page_dirty	= f2fs_set_data_page_dirty,
2014 2015
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2016
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
2017
	.bmap		= f2fs_bmap,
2018 2019 2020
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
2021
};