data.c 39.2 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
J
Jaegeuk Kim 已提交
22
#include <linux/cleancache.h>
23 24 25 26

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
27
#include "trace.h"
28
#include <trace/events/f2fs.h>
29

30
static void f2fs_read_end_io(struct bio *bio)
31
{
32 33
	struct bio_vec *bvec;
	int i;
34

35
	if (f2fs_bio_encrypted(bio)) {
36
		if (bio->bi_error) {
37 38 39 40 41 42 43
			f2fs_release_crypto_ctx(bio->bi_private);
		} else {
			f2fs_end_io_crypto_work(bio->bi_private, bio);
			return;
		}
	}

44 45
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
46

47
		if (!bio->bi_error) {
J
Jaegeuk Kim 已提交
48 49 50 51 52 53 54 55 56 57
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

58
static void f2fs_write_end_io(struct bio *bio)
59
{
60
	struct f2fs_sb_info *sbi = bio->bi_private;
61 62
	struct bio_vec *bvec;
	int i;
63

64
	bio_for_each_segment_all(bvec, bio, i) {
65 66
		struct page *page = bvec->bv_page;

67 68
		f2fs_restore_and_release_control_page(&page);

69
		if (unlikely(bio->bi_error)) {
70
			set_page_dirty(page);
71
			set_bit(AS_EIO, &page->mapping->flags);
72
			f2fs_stop_checkpoint(sbi);
73 74 75
		}
		end_page_writeback(page);
		dec_page_count(sbi, F2FS_WRITEBACK);
76
	}
77 78 79 80 81 82 83 84

	if (!get_pages(sbi, F2FS_WRITEBACK) &&
			!list_empty(&sbi->cp_wait.task_list))
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

85 86 87 88 89 90 91 92
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
93
	bio = f2fs_bio_alloc(npages);
94 95

	bio->bi_bdev = sbi->sb->s_bdev;
C
Chao Yu 已提交
96
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
97
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
98
	bio->bi_private = is_read ? NULL : sbi;
99 100 101 102

	return bio;
}

J
Jaegeuk Kim 已提交
103
static void __submit_merged_bio(struct f2fs_bio_info *io)
104
{
J
Jaegeuk Kim 已提交
105
	struct f2fs_io_info *fio = &io->fio;
106 107 108 109

	if (!io->bio)
		return;

110
	if (is_read_io(fio->rw))
111
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
112
	else
113
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
114

115
	submit_bio(fio->rw, io->bio);
116 117 118 119
	io->bio = NULL;
}

void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
120
				enum page_type type, int rw)
121 122 123 124 125 126
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

127
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
128 129 130 131

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
J
Jaegeuk Kim 已提交
132 133 134 135
		if (test_opt(sbi, NOBARRIER))
			io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
		else
			io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
136 137
	}
	__submit_merged_bio(io);
138
	up_write(&io->io_rwsem);
139 140 141 142 143 144
}

/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
145
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
146 147
{
	struct bio *bio;
148
	struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
149

150
	trace_f2fs_submit_page_bio(page, fio);
151
	f2fs_trace_ios(fio, 0);
152 153

	/* Allocate a new bio */
154
	bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
155 156 157 158 159 160

	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}

161
	submit_bio(fio->rw, bio);
162 163 164
	return 0;
}

165
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
166
{
167
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
168
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
169
	struct f2fs_bio_info *io;
170
	bool is_read = is_read_io(fio->rw);
171
	struct page *bio_page;
172

173
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
174

175
	verify_block_addr(sbi, fio->blk_addr);
176

177
	down_write(&io->io_rwsem);
178

179
	if (!is_read)
180 181
		inc_page_count(sbi, F2FS_WRITEBACK);

182
	if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
J
Jaegeuk Kim 已提交
183 184
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
185 186
alloc_new:
	if (io->bio == NULL) {
J
Jaegeuk Kim 已提交
187
		int bio_blocks = MAX_BIO_BLOCKS(sbi);
188

189
		io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
J
Jaegeuk Kim 已提交
190
		io->fio = *fio;
191 192
	}

193 194 195
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

	if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
196
							PAGE_CACHE_SIZE) {
J
Jaegeuk Kim 已提交
197
		__submit_merged_bio(io);
198 199 200
		goto alloc_new;
	}

201
	io->last_block_in_bio = fio->blk_addr;
202
	f2fs_trace_ios(fio, 0);
203

204
	up_write(&io->io_rwsem);
205
	trace_f2fs_submit_page_mbio(fio->page, fio);
206 207
}

J
Jaegeuk Kim 已提交
208
/*
209 210 211 212 213
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
214
void set_data_blkaddr(struct dnode_of_data *dn)
215 216 217 218 219 220
{
	struct f2fs_node *rn;
	__le32 *addr_array;
	struct page *node_page = dn->node_page;
	unsigned int ofs_in_node = dn->ofs_in_node;

221
	f2fs_wait_on_page_writeback(node_page, NODE);
222

223
	rn = F2FS_NODE(node_page);
224 225 226

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
J
Jaegeuk Kim 已提交
227
	addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
228
	set_page_dirty(node_page);
229
	dn->node_changed = true;
230 231 232 233
}

int reserve_new_block(struct dnode_of_data *dn)
{
234
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
235

236
	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
237
		return -EPERM;
238
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
239 240
		return -ENOSPC;

241 242
	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);

243
	dn->data_blkaddr = NEW_ADDR;
244
	set_data_blkaddr(dn);
245
	mark_inode_dirty(dn->inode);
246 247 248 249
	sync_inode_page(dn);
	return 0;
}

250 251 252 253 254 255 256 257
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
258

259 260
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
261
	if (err || need_put)
262 263 264 265
		f2fs_put_dnode(dn);
	return err;
}

266
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
267
{
268
	struct extent_info ei;
269
	struct inode *inode = dn->inode;
270

271 272 273
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
274
	}
275

276
	return f2fs_reserve_block(dn, index);
277 278
}

279 280
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
						int rw, bool for_write)
281 282 283 284
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
285
	struct extent_info ei;
286
	int err;
287
	struct f2fs_io_info fio = {
288
		.sbi = F2FS_I_SB(inode),
289
		.type = DATA,
290
		.rw = rw,
291
		.encrypted_page = NULL,
292
	};
293

294 295 296
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

297
	page = f2fs_grab_cache_page(mapping, index, for_write);
298 299 300
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
301 302 303 304 305
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

306
	set_new_dnode(&dn, inode, NULL, NULL, 0);
307
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
308 309
	if (err)
		goto put_err;
310 311
	f2fs_put_dnode(&dn);

312
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
313 314
		err = -ENOENT;
		goto put_err;
315
	}
C
Chao Yu 已提交
316
got_it:
317 318
	if (PageUptodate(page)) {
		unlock_page(page);
319
		return page;
320
	}
321

J
Jaegeuk Kim 已提交
322 323 324 325 326 327 328 329 330
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		SetPageUptodate(page);
331
		unlock_page(page);
J
Jaegeuk Kim 已提交
332 333
		return page;
	}
334

335
	fio.blk_addr = dn.data_blkaddr;
336 337
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
338
	if (err)
339
		goto put_err;
340
	return page;
341 342 343 344

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
345 346 347 348 349 350 351 352 353 354 355 356
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

357
	page = get_read_data_page(inode, index, READ_SYNC, false);
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
377 378
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
379 380 381 382
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
383
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
384 385
	if (IS_ERR(page))
		return page;
386

387
	/* wait for read completion */
388
	lock_page(page);
389
	if (unlikely(!PageUptodate(page))) {
390 391
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
392
	}
393
	if (unlikely(page->mapping != mapping)) {
394 395
		f2fs_put_page(page, 1);
		goto repeat;
396 397 398 399
	}
	return page;
}

J
Jaegeuk Kim 已提交
400
/*
401 402
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
403
 *
C
Chao Yu 已提交
404 405
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
406 407
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
408
 */
409
struct page *get_new_data_page(struct inode *inode,
410
		struct page *ipage, pgoff_t index, bool new_i_size)
411 412 413 414 415
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
416
repeat:
417
	page = f2fs_grab_cache_page(mapping, index, true);
418 419 420 421 422 423
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
424
		return ERR_PTR(-ENOMEM);
425
	}
426

427
	set_new_dnode(&dn, inode, ipage, NULL, 0);
428
	err = f2fs_reserve_block(&dn, index);
429 430
	if (err) {
		f2fs_put_page(page, 1);
431
		return ERR_PTR(err);
432
	}
433 434
	if (!ipage)
		f2fs_put_dnode(&dn);
435 436

	if (PageUptodate(page))
437
		goto got_it;
438 439 440

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
441
		SetPageUptodate(page);
442
	} else {
443
		f2fs_put_page(page, 1);
444

445
		page = get_read_data_page(inode, index, READ_SYNC, true);
446
		if (IS_ERR(page))
447
			goto repeat;
448 449 450

		/* wait for read completion */
		lock_page(page);
451
	}
452
got_it:
C
Chao Yu 已提交
453 454 455
	if (new_i_size && i_size_read(inode) <
				((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
		i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
456 457
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
458 459 460 461
	}
	return page;
}

462 463
static int __allocate_data_block(struct dnode_of_data *dn)
{
464
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
465
	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
466 467
	struct f2fs_summary sum;
	struct node_info ni;
468
	int seg = CURSEG_WARM_DATA;
469
	pgoff_t fofs;
470 471 472

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;
473 474 475 476 477

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

478 479 480
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
		return -ENOSPC;

481
alloc:
482 483 484
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

485 486 487
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

488 489
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
490
	set_data_blkaddr(dn);
491

492 493 494
	/* update i_size */
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
							dn->ofs_in_node;
C
Chao Yu 已提交
495 496 497
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
		i_size_write(dn->inode,
				((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
498 499 500
	return 0;
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
static void __allocate_data_blocks(struct inode *inode, loff_t offset,
							size_t count)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	u64 start = F2FS_BYTES_TO_BLK(offset);
	u64 len = F2FS_BYTES_TO_BLK(count);
	bool allocated;
	u64 end_offset;

	while (len) {
		f2fs_balance_fs(sbi);
		f2fs_lock_op(sbi);

		/* When reading holes, we need its node page */
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		if (get_dnode_of_data(&dn, start, ALLOC_NODE))
			goto out;

		allocated = false;
		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));

		while (dn.ofs_in_node < end_offset && len) {
524 525
			block_t blkaddr;

526 527 528
			if (unlikely(f2fs_cp_error(sbi)))
				goto sync_out;

529
			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
530
			if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
				if (__allocate_data_block(&dn))
					goto sync_out;
				allocated = true;
			}
			len--;
			start++;
			dn.ofs_in_node++;
		}

		if (allocated)
			sync_inode_page(&dn);

		f2fs_put_dnode(&dn);
		f2fs_unlock_op(sbi);
	}
	return;

sync_out:
	if (allocated)
		sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
out:
	f2fs_unlock_op(sbi);
	return;
}

J
Jaegeuk Kim 已提交
557
/*
J
Jaegeuk Kim 已提交
558 559
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
560 561 562 563 564
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
565
 */
C
Chao Yu 已提交
566
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
567
						int create, int flag)
568
{
J
Jaegeuk Kim 已提交
569
	unsigned int maxblocks = map->m_len;
570
	struct dnode_of_data dn;
571
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
572 573 574
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
	pgoff_t pgofs, end_offset;
	int err = 0, ofs = 1;
575
	struct extent_info ei;
576
	bool allocated = false;
577
	block_t blkaddr;
578

J
Jaegeuk Kim 已提交
579 580 581 582 583
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
584

585
	if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
586 587 588
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
589
		goto out;
590
	}
591

592
	if (create)
593
		f2fs_lock_op(sbi);
594 595 596

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
597
	err = get_dnode_of_data(&dn, pgofs, mode);
598
	if (err) {
599 600 601
		if (err == -ENOENT)
			err = 0;
		goto unlock_out;
602
	}
C
Chao Yu 已提交
603 604 605

	if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
		if (create) {
606 607 608 609
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
				goto put_out;
			}
C
Chao Yu 已提交
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
			err = __allocate_data_block(&dn);
			if (err)
				goto put_out;
			allocated = true;
			map->m_flags = F2FS_MAP_NEW;
		} else {
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
						dn.data_blkaddr != NEW_ADDR) {
				if (flag == F2FS_GET_BLOCK_BMAP)
					err = -ENOENT;
				goto put_out;
			}

			/*
			 * preallocated unwritten block should be mapped
			 * for fiemap.
			 */
			if (dn.data_blkaddr == NEW_ADDR)
				map->m_flags = F2FS_MAP_UNWRITTEN;
C
Chao Yu 已提交
629 630
		}
	}
631

C
Chao Yu 已提交
632 633 634
	map->m_flags |= F2FS_MAP_MAPPED;
	map->m_pblk = dn.data_blkaddr;
	map->m_len = 1;
635

636
	end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
637 638 639 640
	dn.ofs_in_node++;
	pgofs++;

get_next:
641 642 643
	if (map->m_len >= maxblocks)
		goto sync_out;

644 645 646 647 648 649
	if (dn.ofs_in_node >= end_offset) {
		if (allocated)
			sync_inode_page(&dn);
		allocated = false;
		f2fs_put_dnode(&dn);

650 651 652 653 654
		if (create) {
			f2fs_unlock_op(sbi);
			f2fs_lock_op(sbi);
		}

655 656
		set_new_dnode(&dn, inode, NULL, NULL, 0);
		err = get_dnode_of_data(&dn, pgofs, mode);
657
		if (err) {
658 659 660 661
			if (err == -ENOENT)
				err = 0;
			goto unlock_out;
		}
C
Chao Yu 已提交
662

663
		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
664
	}
665

666
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
C
Chao Yu 已提交
667

668 669 670 671 672
	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
		if (create) {
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
				goto sync_out;
C
Chao Yu 已提交
673
			}
674 675 676 677 678 679 680 681 682 683 684 685 686 687
			err = __allocate_data_block(&dn);
			if (err)
				goto sync_out;
			allocated = true;
			map->m_flags |= F2FS_MAP_NEW;
			blkaddr = dn.data_blkaddr;
		} else {
			/*
			 * we only merge preallocated unwritten blocks
			 * for fiemap.
			 */
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
					blkaddr != NEW_ADDR)
				goto sync_out;
688
		}
689
	}
C
Chao Yu 已提交
690

691 692 693 694 695 696 697 698 699 700
	/* Give more consecutive addresses for the readahead */
	if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
			(map->m_pblk == NEW_ADDR &&
			blkaddr == NEW_ADDR)) {
		ofs++;
		dn.ofs_in_node++;
		pgofs++;
		map->m_len++;
		goto get_next;
701
	}
702

703 704 705 706
sync_out:
	if (allocated)
		sync_inode_page(&dn);
put_out:
707
	f2fs_put_dnode(&dn);
708 709
unlock_out:
	if (create)
710
		f2fs_unlock_op(sbi);
711
out:
J
Jaegeuk Kim 已提交
712
	trace_f2fs_map_blocks(inode, map, err);
713
	return err;
714 715
}

J
Jaegeuk Kim 已提交
716
static int __get_data_block(struct inode *inode, sector_t iblock,
C
Chao Yu 已提交
717
			struct buffer_head *bh, int create, int flag)
J
Jaegeuk Kim 已提交
718 719 720 721 722 723 724
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;

C
Chao Yu 已提交
725
	ret = f2fs_map_blocks(inode, &map, create, flag);
J
Jaegeuk Kim 已提交
726 727 728 729 730 731 732 733
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

734
static int get_data_block(struct inode *inode, sector_t iblock,
C
Chao Yu 已提交
735 736 737 738 739 740
			struct buffer_head *bh_result, int create, int flag)
{
	return __get_data_block(inode, iblock, bh_result, create, flag);
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
741 742
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
743 744
	return __get_data_block(inode, iblock, bh_result, create,
						F2FS_GET_BLOCK_DIO);
745 746
}

C
Chao Yu 已提交
747
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
748 749
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
750 751
	return __get_data_block(inode, iblock, bh_result, create,
						F2FS_GET_BLOCK_BMAP);
752 753
}

754 755 756 757 758 759 760 761 762 763
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
764 765 766
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
767 768 769 770 771 772 773 774 775 776 777 778
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
	loff_t isize = i_size_read(inode);
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	bool past_eof = false, whole_file = false;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
779 780 781 782 783 784
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
	mutex_lock(&inode->i_mutex);

	if (len >= isize) {
		whole_file = true;
		len = isize;
	}

	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
801 802
	ret = get_data_block(inode, start_blk, &map_bh, 0,
					F2FS_GET_BLOCK_FIEMAP);
803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
		start_blk++;

		if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
			past_eof = 1;

		if (past_eof && size) {
			flags |= FIEMAP_EXTENT_LAST;
			ret = fiemap_fill_next_extent(fieinfo, logical,
					phys, size, flags);
		} else if (size) {
			ret = fiemap_fill_next_extent(fieinfo, logical,
					phys, size, flags);
			size = 0;
		}

		/* if we have holes up to/past EOF then we're done */
		if (start_blk > last_blk || past_eof || ret)
			goto out;
	} else {
		if (start_blk > last_blk && !whole_file) {
			ret = fiemap_fill_next_extent(fieinfo, logical,
					phys, size, flags);
			goto out;
		}

		/*
		 * if size != 0 then we know we already have an extent
		 * to add, so add it.
		 */
		if (size) {
			ret = fiemap_fill_next_extent(fieinfo, logical,
					phys, size, flags);
			if (ret)
				goto out;
		}

		logical = blk_to_logical(inode, start_blk);
		phys = blk_to_logical(inode, map_bh.b_blocknr);
		size = map_bh.b_size;
		flags = 0;
		if (buffer_unwritten(&map_bh))
			flags = FIEMAP_EXTENT_UNWRITTEN;

		start_blk += logical_to_blk(inode, size);

		/*
		 * If we are past the EOF, then we need to make sure as
		 * soon as we find a hole that the last extent we found
		 * is marked with FIEMAP_EXTENT_LAST
		 */
		if (!past_eof && logical + size >= isize)
			past_eof = true;
	}
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

	mutex_unlock(&inode->i_mutex);
	return ret;
J
Jaegeuk Kim 已提交
872 873
}

J
Jaegeuk Kim 已提交
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

936 937
			if (f2fs_map_blocks(inode, &map, 0,
							F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
			submit_bio(READ, bio);
			bio = NULL;
		}
		if (bio == NULL) {
966 967 968 969 970 971 972 973 974 975
			struct f2fs_crypto_ctx *ctx = NULL;

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {

				ctx = f2fs_get_crypto_ctx(inode);
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
976 977
				f2fs_wait_on_encrypted_page_writeback(
						F2FS_I_SB(inode), block_nr);
978 979
			}

J
Jaegeuk Kim 已提交
980
			bio = bio_alloc(GFP_KERNEL,
981
				min_t(int, nr_pages, BIO_MAX_PAGES));
982 983 984
			if (!bio) {
				if (ctx)
					f2fs_release_crypto_ctx(ctx);
J
Jaegeuk Kim 已提交
985
				goto set_error_page;
986
			}
J
Jaegeuk Kim 已提交
987 988
			bio->bi_bdev = bdev;
			bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
989
			bio->bi_end_io = f2fs_read_end_io;
990
			bio->bi_private = ctx;
J
Jaegeuk Kim 已提交
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
			submit_bio(READ, bio);
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
			page_cache_release(page);
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(READ, bio);
	return 0;
}

1019 1020
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1021
	struct inode *inode = page->mapping->host;
1022
	int ret = -EAGAIN;
H
Huajun Li 已提交
1023

1024 1025
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1026
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1027 1028
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1029
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1030
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1031
	return ret;
1032 1033 1034 1035 1036 1037
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1038
	struct inode *inode = file->f_mapping->host;
1039 1040 1041
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1042 1043 1044 1045 1046

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1047
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1048 1049
}

1050
int do_write_data_page(struct f2fs_io_info *fio)
1051
{
1052
	struct page *page = fio->page;
1053 1054 1055 1056 1057
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1058
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1059 1060 1061
	if (err)
		return err;

1062
	fio->blk_addr = dn.data_blkaddr;
1063 1064

	/* This page is already truncated */
1065 1066
	if (fio->blk_addr == NULL_ADDR) {
		ClearPageUptodate(page);
1067
		goto out_writepage;
1068
	}
1069

1070
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1071 1072 1073 1074 1075

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
							fio->blk_addr);

1076 1077 1078 1079 1080 1081 1082
		fio->encrypted_page = f2fs_encrypt(inode, fio->page);
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
			goto out_writepage;
		}
	}

1083 1084 1085 1086 1087 1088
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1089
	if (unlikely(fio->blk_addr != NEW_ADDR &&
1090
			!is_cold_data(page) &&
1091
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1092
			need_inplace_update(inode))) {
1093
		rewrite_data_page(fio);
1094
		set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1095
		trace_f2fs_do_write_data_page(page, IPU);
1096
	} else {
1097
		write_data_page(&dn, fio);
1098
		set_data_blkaddr(&dn);
1099
		f2fs_update_extent_cache(&dn);
1100
		trace_f2fs_do_write_data_page(page, OPU);
1101
		set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
1102 1103
		if (page->index == 0)
			set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1114
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1115 1116 1117
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
							>> PAGE_CACHE_SHIFT;
H
Huajun Li 已提交
1118
	unsigned offset = 0;
1119
	bool need_balance_fs = false;
1120
	int err = 0;
J
Jaegeuk Kim 已提交
1121
	struct f2fs_io_info fio = {
1122
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1123
		.type = DATA,
C
Chris Fries 已提交
1124
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1125
		.page = page,
1126
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1127
	};
1128

1129 1130
	trace_f2fs_writepage(page, DATA);

1131
	if (page->index < end_index)
1132
		goto write;
1133 1134 1135 1136 1137 1138

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
	offset = i_size & (PAGE_CACHE_SIZE - 1);
1139
	if ((page->index >= end_index + 1) || !offset)
1140
		goto out;
1141 1142

	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1143
write:
1144
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1145
		goto redirty_out;
1146 1147 1148 1149 1150
	if (f2fs_is_drop_cache(inode))
		goto out;
	if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))
		goto redirty_out;
1151

1152
	/* Dentry blocks are controlled by checkpoint */
1153
	if (S_ISDIR(inode->i_mode)) {
1154 1155
		if (unlikely(f2fs_cp_error(sbi)))
			goto redirty_out;
1156
		err = do_write_data_page(&fio);
1157 1158
		goto done;
	}
H
Huajun Li 已提交
1159

1160 1161 1162
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		SetPageError(page);
1163
		goto out;
1164 1165
	}

1166
	if (!wbc->for_reclaim)
1167
		need_balance_fs = true;
1168
	else if (has_not_enough_free_secs(sbi, 0))
1169
		goto redirty_out;
1170

1171
	err = -EAGAIN;
1172
	f2fs_lock_op(sbi);
1173 1174 1175
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1176
		err = do_write_data_page(&fio);
1177 1178 1179 1180
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1181 1182

	clear_cold_data(page);
1183
out:
1184
	inode_dec_dirty_pages(inode);
1185 1186
	if (err)
		ClearPageUptodate(page);
1187
	unlock_page(page);
1188
	if (need_balance_fs)
1189
		f2fs_balance_fs(sbi);
1190
	if (wbc->for_reclaim) {
1191
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1192
		remove_dirty_inode(inode);
1193
	}
1194 1195 1196
	return 0;

redirty_out:
1197
	redirty_page_for_writepage(wbc, page);
1198
	return AOP_WRITEPAGE_ACTIVATE;
1199 1200
}

1201 1202 1203 1204 1205 1206 1207 1208 1209
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

C
Chao Yu 已提交
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
			struct writeback_control *wbc, writepage_t writepage,
			void *data)
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	int step = 0;

	pagevec_init(&pvec, 0);
next:
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

1288
			if (step == is_cold_data(page))
C
Chao Yu 已提交
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
				goto continue_unlock;

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
					f2fs_wait_on_page_writeback(page, DATA);
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			ret = (*writepage)(page, wbc, data);
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					done_index = page->index + 1;
					done = 1;
					break;
				}
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (step < 1) {
		step++;
		goto next;
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

	return ret;
}

1341
static int f2fs_write_data_pages(struct address_space *mapping,
1342 1343 1344
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1345
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1346
	bool locked = false;
1347
	int ret;
1348
	long diff;
1349

1350 1351
	trace_f2fs_writepages(mapping->host, wbc, DATA);

P
P J P 已提交
1352 1353 1354 1355
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1356 1357 1358 1359
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1360 1361 1362 1363 1364
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1365 1366 1367 1368
	/* skip writing during file defragment */
	if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG))
		goto skip_write;

1369 1370 1371 1372
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

1373
	diff = nr_pages_to_write(sbi, DATA, wbc);
1374

1375 1376 1377 1378
	if (!S_ISDIR(inode->i_mode)) {
		mutex_lock(&sbi->writepages);
		locked = true;
	}
C
Chao Yu 已提交
1379
	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
C
Chao Yu 已提交
1380
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
1381 1382
	if (locked)
		mutex_unlock(&sbi->writepages);
J
Jaegeuk Kim 已提交
1383

1384
	remove_dirty_inode(inode);
1385

1386
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1387
	return ret;
1388 1389

skip_write:
1390
	wbc->pages_skipped += get_dirty_pages(inode);
1391
	return 0;
1392 1393
}

1394 1395 1396 1397 1398 1399
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;

	if (to > inode->i_size) {
		truncate_pagecache(inode, inode->i_size);
1400
		truncate_blocks(inode, inode->i_size, true);
1401 1402 1403
	}
}

1404 1405 1406 1407 1408
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1409
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1410 1411
	struct page *page = NULL;
	struct page *ipage;
1412 1413 1414 1415
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
	struct dnode_of_data dn;
	int err = 0;

1416 1417
	trace_f2fs_write_begin(inode, pos, len, flags);

1418
	f2fs_balance_fs(sbi);
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429

	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1430
repeat:
1431
	page = grab_cache_page_write_begin(mapping, index, flags);
1432 1433 1434 1435
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1436

1437 1438
	*pagep = page;

1439
	f2fs_lock_op(sbi);
1440 1441 1442

	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
1443 1444
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
1445
		goto unlock_fail;
1446
	}
1447

1448 1449
	set_new_dnode(&dn, inode, ipage, ipage, 0);

1450
	if (f2fs_has_inline_data(inode)) {
1451 1452 1453 1454 1455 1456
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
			sync_inode_page(&dn);
			goto put_next;
		}
1457 1458 1459
		err = f2fs_convert_inline_page(&dn, page);
		if (err)
			goto put_fail;
1460
	}
1461 1462

	err = f2fs_get_block(&dn, index);
1463
	if (err)
1464
		goto put_fail;
1465
put_next:
1466 1467 1468
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);

1469 1470
	f2fs_wait_on_page_writeback(page, DATA);

1471 1472 1473 1474
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

C
Chao Yu 已提交
1475 1476 1477 1478
	if (len == PAGE_CACHE_SIZE)
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;
1479 1480 1481 1482 1483 1484 1485

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
C
Chao Yu 已提交
1486
		goto out_update;
1487 1488
	}

1489
	if (dn.data_blkaddr == NEW_ADDR) {
1490 1491
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
1492
		struct f2fs_io_info fio = {
1493
			.sbi = sbi,
1494 1495 1496
			.type = DATA,
			.rw = READ_SYNC,
			.blk_addr = dn.data_blkaddr,
1497
			.page = page,
1498
			.encrypted_page = NULL,
1499
		};
1500
		err = f2fs_submit_page_bio(&fio);
1501 1502
		if (err)
			goto fail;
1503

1504
		lock_page(page);
1505
		if (unlikely(!PageUptodate(page))) {
1506 1507
			err = -EIO;
			goto fail;
1508
		}
1509
		if (unlikely(page->mapping != mapping)) {
1510 1511
			f2fs_put_page(page, 1);
			goto repeat;
1512
		}
1513 1514 1515 1516

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
			err = f2fs_decrypt_one(inode, page);
1517
			if (err)
1518 1519
				goto fail;
		}
1520
	}
C
Chao Yu 已提交
1521
out_update:
1522
	SetPageUptodate(page);
C
Chao Yu 已提交
1523
out_clear:
1524 1525
	clear_cold_data(page);
	return 0;
1526

1527 1528
put_fail:
	f2fs_put_dnode(&dn);
1529 1530
unlock_fail:
	f2fs_unlock_op(sbi);
1531
fail:
1532
	f2fs_put_page(page, 1);
1533 1534
	f2fs_write_failed(mapping, pos + len);
	return err;
1535 1536
}

1537 1538 1539 1540 1541 1542 1543
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1544 1545
	trace_f2fs_write_end(inode, pos, len, copied);

1546
	set_page_dirty(page);
1547 1548 1549 1550 1551 1552 1553

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

1554
	f2fs_put_page(page, 1);
1555 1556 1557
	return copied;
}

1558 1559
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1560 1561 1562 1563 1564 1565
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1566 1567 1568
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1569 1570 1571
	return 0;
}

1572 1573
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
			      loff_t offset)
1574 1575
{
	struct file *file = iocb->ki_filp;
1576 1577 1578 1579
	struct address_space *mapping = file->f_mapping;
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
	int err;
1580

1581
	/* we don't need to use inline_data strictly */
1582 1583 1584
	err = f2fs_convert_inline_inode(inode);
	if (err)
		return err;
H
Huajun Li 已提交
1585

1586 1587 1588
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;

1589 1590 1591
	err = check_direct_IO(inode, iter, offset);
	if (err)
		return err;
1592

1593
	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
1594

1595
	if (iov_iter_rw(iter) == WRITE) {
1596
		__allocate_data_blocks(inode, offset, count);
1597 1598 1599 1600 1601
		if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
			err = -EIO;
			goto out;
		}
	}
1602

C
Chao Yu 已提交
1603
	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
1604
out:
1605
	if (err < 0 && iov_iter_rw(iter) == WRITE)
1606
		f2fs_write_failed(mapping, offset + count);
1607

1608
	trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
1609

1610
	return err;
1611 1612
}

1613 1614
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1615 1616
{
	struct inode *inode = page->mapping->host;
1617
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1618

1619 1620
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
		(offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
1621 1622
		return;

1623 1624 1625 1626 1627 1628 1629 1630
	if (PageDirty(page)) {
		if (inode->i_ino == F2FS_META_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_META);
		else if (inode->i_ino == F2FS_NODE_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_NODES);
		else
			inode_dec_dirty_pages(inode);
	}
C
Chao Yu 已提交
1631 1632 1633 1634 1635

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1636 1637 1638
	ClearPagePrivate(page);
}

1639
int f2fs_release_page(struct page *page, gfp_t wait)
1640
{
1641 1642 1643 1644
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1645 1646 1647 1648
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1649
	ClearPagePrivate(page);
1650
	return 1;
1651 1652 1653 1654 1655 1656 1657
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1658 1659
	trace_f2fs_set_page_dirty(page, DATA);

1660
	SetPageUptodate(page);
1661

1662
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1663 1664 1665 1666 1667 1668 1669 1670 1671
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1672 1673
	}

1674 1675
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
1676
		update_dirty_page(inode, page);
1677 1678 1679 1680 1681
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1682 1683
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1684 1685
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1686 1687 1688 1689 1690 1691 1692
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1693
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1694 1695
}

1696 1697 1698 1699 1700 1701
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1702
	.write_end	= f2fs_write_end,
1703
	.set_page_dirty	= f2fs_set_data_page_dirty,
1704 1705
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1706
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1707
	.bmap		= f2fs_bmap,
1708
};