data.c 41.3 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
J
Jaegeuk Kim 已提交
22
#include <linux/cleancache.h>
23 24 25 26

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
27
#include "trace.h"
28
#include <trace/events/f2fs.h>
29

30
static void f2fs_read_end_io(struct bio *bio)
31
{
32 33
	struct bio_vec *bvec;
	int i;
34

35
	if (f2fs_bio_encrypted(bio)) {
36
		if (bio->bi_error) {
37 38 39 40 41 42 43
			f2fs_release_crypto_ctx(bio->bi_private);
		} else {
			f2fs_end_io_crypto_work(bio->bi_private, bio);
			return;
		}
	}

44 45
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
46

47
		if (!bio->bi_error) {
J
Jaegeuk Kim 已提交
48 49 50 51 52 53 54 55 56 57
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

58
static void f2fs_write_end_io(struct bio *bio)
59
{
60
	struct f2fs_sb_info *sbi = bio->bi_private;
61 62
	struct bio_vec *bvec;
	int i;
63

64
	bio_for_each_segment_all(bvec, bio, i) {
65 66
		struct page *page = bvec->bv_page;

67 68
		f2fs_restore_and_release_control_page(&page);

69
		if (unlikely(bio->bi_error)) {
70
			set_bit(AS_EIO, &page->mapping->flags);
71
			f2fs_stop_checkpoint(sbi);
72 73 74
		}
		end_page_writeback(page);
		dec_page_count(sbi, F2FS_WRITEBACK);
75
	}
76

77
	if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait))
78 79 80 81 82
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

83 84 85 86 87 88 89 90
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
91
	bio = f2fs_bio_alloc(npages);
92 93

	bio->bi_bdev = sbi->sb->s_bdev;
C
Chao Yu 已提交
94
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
95
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
96
	bio->bi_private = is_read ? NULL : sbi;
97 98 99 100

	return bio;
}

J
Jaegeuk Kim 已提交
101
static void __submit_merged_bio(struct f2fs_bio_info *io)
102
{
J
Jaegeuk Kim 已提交
103
	struct f2fs_io_info *fio = &io->fio;
104 105 106 107

	if (!io->bio)
		return;

108
	if (is_read_io(fio->rw))
109
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
110
	else
111
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
112

113
	submit_bio(fio->rw, io->bio);
114 115 116
	io->bio = NULL;
}

117 118
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
C
Chao Yu 已提交
119 120 121 122 123
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

124
	if (!io->bio)
C
Chao Yu 已提交
125
		return false;
126 127 128

	if (!inode && !page && !ino)
		return true;
C
Chao Yu 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142

	bio_for_each_segment_all(bvec, io->bio, i) {

		if (bvec->bv_page->mapping) {
			target = bvec->bv_page;
		} else {
			struct f2fs_crypto_ctx *ctx;

			/* encrypted page */
			ctx = (struct f2fs_crypto_ctx *)page_private(
								bvec->bv_page);
			target = ctx->w.control_page;
		}

143 144 145 146 147
		if (inode && inode == target->mapping->host)
			return true;
		if (page && page == target)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
148 149 150 151 152 153
			return true;
	}

	return false;
}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	bool ret;

	down_read(&io->io_rwsem);
	ret = __has_merged_page(io, inode, page, ino);
	up_read(&io->io_rwsem);
	return ret;
}

static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
171 172 173 174 175 176
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

177
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
178

179 180 181
	if (!__has_merged_page(io, inode, page, ino))
		goto out;

J
Jaegeuk Kim 已提交
182 183 184
	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
J
Jaegeuk Kim 已提交
185 186 187 188
		if (test_opt(sbi, NOBARRIER))
			io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
		else
			io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
189 190
	}
	__submit_merged_bio(io);
191
out:
192
	up_write(&io->io_rwsem);
193 194
}

195 196 197 198 199 200 201 202 203 204 205 206 207 208
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
									int rw)
{
	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}

void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
{
	if (has_merged_page(sbi, inode, page, ino, type))
		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}

209 210 211 212
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
213
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
214 215
{
	struct bio *bio;
216
	struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
217

218
	trace_f2fs_submit_page_bio(page, fio);
219
	f2fs_trace_ios(fio, 0);
220 221

	/* Allocate a new bio */
222
	bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
223 224 225 226 227 228

	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}

229
	submit_bio(fio->rw, bio);
230 231 232
	return 0;
}

233
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
234
{
235
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
236
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
237
	struct f2fs_bio_info *io;
238
	bool is_read = is_read_io(fio->rw);
239
	struct page *bio_page;
240

241
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
242

243
	verify_block_addr(sbi, fio->blk_addr);
244

245
	down_write(&io->io_rwsem);
246

247
	if (!is_read)
248 249
		inc_page_count(sbi, F2FS_WRITEBACK);

250
	if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
J
Jaegeuk Kim 已提交
251 252
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
253 254
alloc_new:
	if (io->bio == NULL) {
J
Jaegeuk Kim 已提交
255
		int bio_blocks = MAX_BIO_BLOCKS(sbi);
256

257
		io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
J
Jaegeuk Kim 已提交
258
		io->fio = *fio;
259 260
	}

261 262 263
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

	if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
264
							PAGE_CACHE_SIZE) {
J
Jaegeuk Kim 已提交
265
		__submit_merged_bio(io);
266 267 268
		goto alloc_new;
	}

269
	io->last_block_in_bio = fio->blk_addr;
270
	f2fs_trace_ios(fio, 0);
271

272
	up_write(&io->io_rwsem);
273
	trace_f2fs_submit_page_mbio(fio->page, fio);
274 275
}

J
Jaegeuk Kim 已提交
276
/*
277 278 279 280 281
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
282
void set_data_blkaddr(struct dnode_of_data *dn)
283 284 285 286 287 288
{
	struct f2fs_node *rn;
	__le32 *addr_array;
	struct page *node_page = dn->node_page;
	unsigned int ofs_in_node = dn->ofs_in_node;

289
	f2fs_wait_on_page_writeback(node_page, NODE, true);
290

291
	rn = F2FS_NODE(node_page);
292 293 294

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
J
Jaegeuk Kim 已提交
295
	addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
296 297
	if (set_page_dirty(node_page))
		dn->node_changed = true;
298 299 300 301
}

int reserve_new_block(struct dnode_of_data *dn)
{
302
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
303

304
	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
305
		return -EPERM;
306
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
307 308
		return -ENOSPC;

309 310
	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);

311
	dn->data_blkaddr = NEW_ADDR;
312
	set_data_blkaddr(dn);
313
	mark_inode_dirty(dn->inode);
314 315 316 317
	sync_inode_page(dn);
	return 0;
}

318 319 320 321 322 323 324 325
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
326

327 328
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
329
	if (err || need_put)
330 331 332 333
		f2fs_put_dnode(dn);
	return err;
}

334
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
335
{
336
	struct extent_info ei;
337
	struct inode *inode = dn->inode;
338

339 340 341
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
342
	}
343

344
	return f2fs_reserve_block(dn, index);
345 346
}

347 348
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
						int rw, bool for_write)
349 350 351 352
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
353
	struct extent_info ei;
354
	int err;
355
	struct f2fs_io_info fio = {
356
		.sbi = F2FS_I_SB(inode),
357
		.type = DATA,
358
		.rw = rw,
359
		.encrypted_page = NULL,
360
	};
361

362 363 364
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

365
	page = f2fs_grab_cache_page(mapping, index, for_write);
366 367 368
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
369 370 371 372 373
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

374
	set_new_dnode(&dn, inode, NULL, NULL, 0);
375
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
376 377
	if (err)
		goto put_err;
378 379
	f2fs_put_dnode(&dn);

380
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
381 382
		err = -ENOENT;
		goto put_err;
383
	}
C
Chao Yu 已提交
384
got_it:
385 386
	if (PageUptodate(page)) {
		unlock_page(page);
387
		return page;
388
	}
389

J
Jaegeuk Kim 已提交
390 391 392 393 394 395 396 397 398
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		SetPageUptodate(page);
399
		unlock_page(page);
J
Jaegeuk Kim 已提交
400 401
		return page;
	}
402

403
	fio.blk_addr = dn.data_blkaddr;
404 405
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
406
	if (err)
407
		goto put_err;
408
	return page;
409 410 411 412

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
413 414 415 416 417 418 419 420 421 422 423 424
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

425
	page = get_read_data_page(inode, index, READ_SYNC, false);
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
445 446
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
447 448 449 450
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
451
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
452 453
	if (IS_ERR(page))
		return page;
454

455
	/* wait for read completion */
456
	lock_page(page);
457
	if (unlikely(!PageUptodate(page))) {
458 459
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
460
	}
461
	if (unlikely(page->mapping != mapping)) {
462 463
		f2fs_put_page(page, 1);
		goto repeat;
464 465 466 467
	}
	return page;
}

J
Jaegeuk Kim 已提交
468
/*
469 470
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
471
 *
C
Chao Yu 已提交
472 473
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
474 475
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
476
 */
477
struct page *get_new_data_page(struct inode *inode,
478
		struct page *ipage, pgoff_t index, bool new_i_size)
479 480 481 482 483
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
484

485
	page = f2fs_grab_cache_page(mapping, index, true);
486 487 488 489 490 491
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
492
		return ERR_PTR(-ENOMEM);
493
	}
494

495
	set_new_dnode(&dn, inode, ipage, NULL, 0);
496
	err = f2fs_reserve_block(&dn, index);
497 498
	if (err) {
		f2fs_put_page(page, 1);
499
		return ERR_PTR(err);
500
	}
501 502
	if (!ipage)
		f2fs_put_dnode(&dn);
503 504

	if (PageUptodate(page))
505
		goto got_it;
506 507 508

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
509
		SetPageUptodate(page);
510
	} else {
511
		f2fs_put_page(page, 1);
512

513 514 515
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
516
		if (IS_ERR(page))
517
			return page;
518
	}
519
got_it:
C
Chao Yu 已提交
520 521 522
	if (new_i_size && i_size_read(inode) <
				((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
		i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
523 524
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
525 526 527 528
	}
	return page;
}

529 530
static int __allocate_data_block(struct dnode_of_data *dn)
{
531
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
532 533
	struct f2fs_summary sum;
	struct node_info ni;
534
	int seg = CURSEG_WARM_DATA;
535
	pgoff_t fofs;
536 537 538

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;
539 540 541 542 543

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

544 545 546
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
		return -ENOSPC;

547
alloc:
548 549 550
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

551 552 553
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

554 555
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
556
	set_data_blkaddr(dn);
557

558
	/* update i_size */
559
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
560
							dn->ofs_in_node;
C
Chao Yu 已提交
561 562 563
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
		i_size_write(dn->inode,
				((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
564 565 566
	return 0;
}

567
ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
568
{
569
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
570
	struct f2fs_map_blocks map;
571
	ssize_t ret = 0;
572

573
	map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos);
574
	map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from));
575
	map.m_next_pgofs = NULL;
576

577 578 579 580 581 582 583 584 585 586
	if (f2fs_encrypted_inode(inode))
		return 0;

	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
587 588 589 590
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
591 592
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
593
	return ret;
594 595
}

J
Jaegeuk Kim 已提交
596
/*
J
Jaegeuk Kim 已提交
597 598
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
599 600 601 602 603
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
604
 */
C
Chao Yu 已提交
605
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
606
						int create, int flag)
607
{
J
Jaegeuk Kim 已提交
608
	unsigned int maxblocks = map->m_len;
609
	struct dnode_of_data dn;
610
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
611 612 613
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
	pgoff_t pgofs, end_offset;
	int err = 0, ofs = 1;
614
	struct extent_info ei;
615
	bool allocated = false;
616
	block_t blkaddr;
617

J
Jaegeuk Kim 已提交
618 619 620 621 622
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
623

624
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
625 626 627
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
628
		goto out;
629
	}
630

C
Chao Yu 已提交
631
next_dnode:
632
	if (create)
633
		f2fs_lock_op(sbi);
634 635 636

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
637
	err = get_dnode_of_data(&dn, pgofs, mode);
638
	if (err) {
639
		if (err == -ENOENT) {
640
			err = 0;
641 642 643 644
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
645
		goto unlock_out;
646
	}
C
Chao Yu 已提交
647

648
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
649 650 651 652 653

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
654
		if (create) {
655 656
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
657
				goto sync_out;
658
			}
659 660 661 662 663 664
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
				if (blkaddr == NULL_ADDR)
					err = reserve_new_block(&dn);
			} else {
				err = __allocate_data_block(&dn);
			}
C
Chao Yu 已提交
665
			if (err)
C
Chao Yu 已提交
666
				goto sync_out;
C
Chao Yu 已提交
667 668
			allocated = true;
			map->m_flags = F2FS_MAP_NEW;
C
Chao Yu 已提交
669
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
670
		} else {
671 672 673 674 675
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
676
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
677
						blkaddr != NEW_ADDR) {
C
Chao Yu 已提交
678 679
				if (flag == F2FS_GET_BLOCK_BMAP)
					err = -ENOENT;
C
Chao Yu 已提交
680
				goto sync_out;
C
Chao Yu 已提交
681
			}
C
Chao Yu 已提交
682 683
		}
	}
684

C
Chao Yu 已提交
685 686 687 688 689 690 691 692 693 694
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
695
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
696 697
			flag == F2FS_GET_BLOCK_PRE_DIO ||
			flag == F2FS_GET_BLOCK_PRE_AIO) {
C
Chao Yu 已提交
698 699 700 701 702
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
703 704 705 706

	dn.ofs_in_node++;
	pgofs++;

C
Chao Yu 已提交
707 708 709
	if (map->m_len < maxblocks) {
		if (dn.ofs_in_node < end_offset)
			goto next_block;
710

711 712 713 714
		if (allocated)
			sync_inode_page(&dn);
		f2fs_put_dnode(&dn);

715 716
		if (create) {
			f2fs_unlock_op(sbi);
717
			f2fs_balance_fs(sbi, allocated);
718
		}
719
		allocated = false;
C
Chao Yu 已提交
720
		goto next_dnode;
721
	}
722

723 724 725
sync_out:
	if (allocated)
		sync_inode_page(&dn);
726
	f2fs_put_dnode(&dn);
727
unlock_out:
728
	if (create) {
729
		f2fs_unlock_op(sbi);
730
		f2fs_balance_fs(sbi, allocated);
731
	}
732
out:
J
Jaegeuk Kim 已提交
733
	trace_f2fs_map_blocks(inode, map, err);
734
	return err;
735 736
}

J
Jaegeuk Kim 已提交
737
static int __get_data_block(struct inode *inode, sector_t iblock,
738 739
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
740 741 742 743 744 745
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
746
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
747

C
Chao Yu 已提交
748
	ret = f2fs_map_blocks(inode, &map, create, flag);
J
Jaegeuk Kim 已提交
749 750 751 752 753 754 755 756
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

757
static int get_data_block(struct inode *inode, sector_t iblock,
758 759
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
760
{
761 762
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
763 764 765
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
766 767
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
768
	return __get_data_block(inode, iblock, bh_result, create,
769
						F2FS_GET_BLOCK_DIO, NULL);
770 771
}

C
Chao Yu 已提交
772
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
773 774
			struct buffer_head *bh_result, int create)
{
775
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
776
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
777 778
		return -EFBIG;

C
Chao Yu 已提交
779
	return __get_data_block(inode, iblock, bh_result, create,
780
						F2FS_GET_BLOCK_BMAP, NULL);
781 782
}

783 784 785 786 787 788 789 790 791 792
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
793 794 795
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
796 797
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
798
	pgoff_t next_pgofs;
799
	loff_t isize;
800 801 802 803 804 805 806 807
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
808 809 810 811 812 813
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
814
	inode_lock(inode);
815 816

	isize = i_size_read(inode);
817 818
	if (start >= isize)
		goto out;
819

820 821
	if (start + len > isize)
		len = isize - start;
822 823 824 825 826 827

	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
828

829 830 831 832
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
833
	ret = get_data_block(inode, start_blk, &map_bh, 0,
834
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
835 836 837 838 839
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
840
		start_blk = next_pgofs;
841
		/* Go through holes util pass the EOF */
842
		if (blk_to_logical(inode, start_blk) < isize)
843 844 845 846 847 848 849
			goto prep_next;
		/* Found a hole beyond isize means no more extents.
		 * Note that the premise is that filesystems don't
		 * punch holes beyond isize and keep size unchanged.
		 */
		flags |= FIEMAP_EXTENT_LAST;
	}
850

851 852 853 854
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

855 856
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
857
	}
858

859 860
	if (start_blk > last_blk || ret)
		goto out;
861

862 863 864 865 866 867
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
868

869
	start_blk += logical_to_blk(inode, size);
870

871
prep_next:
872 873 874 875 876 877 878 879 880
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
881
	inode_unlock(inode);
882
	return ret;
J
Jaegeuk Kim 已提交
883 884
}

J
Jaegeuk Kim 已提交
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
910
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

948
			if (f2fs_map_blocks(inode, &map, 0,
949
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
			submit_bio(READ, bio);
			bio = NULL;
		}
		if (bio == NULL) {
978 979 980 981 982 983 984 985 986 987
			struct f2fs_crypto_ctx *ctx = NULL;

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {

				ctx = f2fs_get_crypto_ctx(inode);
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
988 989
				f2fs_wait_on_encrypted_page_writeback(
						F2FS_I_SB(inode), block_nr);
990 991
			}

J
Jaegeuk Kim 已提交
992
			bio = bio_alloc(GFP_KERNEL,
993
				min_t(int, nr_pages, BIO_MAX_PAGES));
994 995 996
			if (!bio) {
				if (ctx)
					f2fs_release_crypto_ctx(ctx);
J
Jaegeuk Kim 已提交
997
				goto set_error_page;
998
			}
J
Jaegeuk Kim 已提交
999 1000
			bio->bi_bdev = bdev;
			bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1001
			bio->bi_end_io = f2fs_read_end_io;
1002
			bio->bi_private = ctx;
J
Jaegeuk Kim 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
			submit_bio(READ, bio);
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
			page_cache_release(page);
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(READ, bio);
	return 0;
}

1031 1032
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1033
	struct inode *inode = page->mapping->host;
1034
	int ret = -EAGAIN;
H
Huajun Li 已提交
1035

1036 1037
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1038
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1039 1040
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1041
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1042
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1043
	return ret;
1044 1045 1046 1047 1048 1049
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1050
	struct inode *inode = file->f_mapping->host;
1051 1052 1053
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1054 1055 1056 1057 1058

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1059
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1060 1061
}

1062
int do_write_data_page(struct f2fs_io_info *fio)
1063
{
1064
	struct page *page = fio->page;
1065 1066 1067 1068 1069
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1070
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1071 1072 1073
	if (err)
		return err;

1074
	fio->blk_addr = dn.data_blkaddr;
1075 1076

	/* This page is already truncated */
1077 1078
	if (fio->blk_addr == NULL_ADDR) {
		ClearPageUptodate(page);
1079
		goto out_writepage;
1080
	}
1081

1082
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1083 1084 1085 1086 1087

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
							fio->blk_addr);

1088 1089 1090 1091 1092 1093 1094
		fio->encrypted_page = f2fs_encrypt(inode, fio->page);
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
			goto out_writepage;
		}
	}

1095 1096 1097 1098 1099 1100
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1101
	if (unlikely(fio->blk_addr != NEW_ADDR &&
1102
			!is_cold_data(page) &&
1103
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1104
			need_inplace_update(inode))) {
1105
		rewrite_data_page(fio);
1106
		set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1107
		trace_f2fs_do_write_data_page(page, IPU);
1108
	} else {
1109
		write_data_page(&dn, fio);
1110
		set_data_blkaddr(&dn);
1111
		f2fs_update_extent_cache(&dn);
1112
		trace_f2fs_do_write_data_page(page, OPU);
1113
		set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
1114 1115
		if (page->index == 0)
			set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1126
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1127 1128 1129
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
							>> PAGE_CACHE_SHIFT;
H
Huajun Li 已提交
1130
	unsigned offset = 0;
1131
	bool need_balance_fs = false;
1132
	int err = 0;
J
Jaegeuk Kim 已提交
1133
	struct f2fs_io_info fio = {
1134
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1135
		.type = DATA,
C
Chris Fries 已提交
1136
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1137
		.page = page,
1138
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1139
	};
1140

1141 1142
	trace_f2fs_writepage(page, DATA);

1143
	if (page->index < end_index)
1144
		goto write;
1145 1146 1147 1148 1149 1150

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
	offset = i_size & (PAGE_CACHE_SIZE - 1);
1151
	if ((page->index >= end_index + 1) || !offset)
1152
		goto out;
1153 1154

	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1155
write:
1156
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1157
		goto redirty_out;
1158 1159 1160 1161 1162
	if (f2fs_is_drop_cache(inode))
		goto out;
	if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))
		goto redirty_out;
1163

1164
	/* Dentry blocks are controlled by checkpoint */
1165
	if (S_ISDIR(inode->i_mode)) {
1166 1167
		if (unlikely(f2fs_cp_error(sbi)))
			goto redirty_out;
1168
		err = do_write_data_page(&fio);
1169 1170
		goto done;
	}
H
Huajun Li 已提交
1171

1172 1173 1174
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		SetPageError(page);
1175
		goto out;
1176 1177
	}

1178
	if (!wbc->for_reclaim)
1179
		need_balance_fs = true;
1180
	else if (has_not_enough_free_secs(sbi, 0))
1181
		goto redirty_out;
1182

1183
	err = -EAGAIN;
1184
	f2fs_lock_op(sbi);
1185 1186 1187
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1188
		err = do_write_data_page(&fio);
1189 1190 1191 1192
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1193 1194

	clear_cold_data(page);
1195
out:
1196
	inode_dec_dirty_pages(inode);
1197 1198
	if (err)
		ClearPageUptodate(page);
1199 1200 1201 1202 1203 1204

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
		remove_dirty_inode(inode);
	}

1205
	unlock_page(page);
J
Jaegeuk Kim 已提交
1206
	f2fs_balance_fs(sbi, need_balance_fs);
1207 1208

	if (unlikely(f2fs_cp_error(sbi)))
1209
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1210

1211 1212 1213
	return 0;

redirty_out:
1214
	redirty_page_for_writepage(wbc, page);
1215
	return AOP_WRITEPAGE_ACTIVATE;
1216 1217
}

1218 1219 1220 1221 1222 1223 1224 1225 1226
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

C
Chao Yu 已提交
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
			struct writeback_control *wbc, writepage_t writepage,
			void *data)
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	int step = 0;

	pagevec_init(&pvec, 0);
next:
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

1305
			if (step == is_cold_data(page))
C
Chao Yu 已提交
1306 1307 1308 1309
				goto continue_unlock;

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1310 1311
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			ret = (*writepage)(page, wbc, data);
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					done_index = page->index + 1;
					done = 1;
					break;
				}
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (step < 1) {
		step++;
		goto next;
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

	return ret;
}

1359
static int f2fs_write_data_pages(struct address_space *mapping,
1360 1361 1362
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1363
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1364
	bool locked = false;
1365
	int ret;
1366
	long diff;
1367

P
P J P 已提交
1368 1369 1370 1371
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1372 1373 1374 1375
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1376 1377 1378 1379 1380
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1381 1382 1383 1384
	/* skip writing during file defragment */
	if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG))
		goto skip_write;

1385 1386 1387 1388
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

Y
Yunlei He 已提交
1389 1390
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1391
	diff = nr_pages_to_write(sbi, DATA, wbc);
1392

1393
	if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) {
1394 1395 1396
		mutex_lock(&sbi->writepages);
		locked = true;
	}
C
Chao Yu 已提交
1397
	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1398
	f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
1399 1400
	if (locked)
		mutex_unlock(&sbi->writepages);
J
Jaegeuk Kim 已提交
1401

1402
	remove_dirty_inode(inode);
1403

1404
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1405
	return ret;
1406 1407

skip_write:
1408
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1409
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1410
	return 0;
1411 1412
}

1413 1414 1415
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1416
	loff_t i_size = i_size_read(inode);
1417

J
Jaegeuk Kim 已提交
1418 1419 1420
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1421 1422 1423
	}
}

1424 1425 1426 1427 1428 1429 1430 1431
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1432 1433
	bool locked = false;
	struct extent_info ei;
1434 1435
	int err = 0;

1436 1437 1438 1439 1440 1441 1442 1443
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
	if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
					len == PAGE_CACHE_SIZE)
		return 0;

1444 1445 1446 1447 1448 1449
	if (f2fs_has_inline_data(inode) ||
			(pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1463
			set_inline_node(ipage);
1464 1465 1466
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1479
			if (err || (!err && dn.data_blkaddr == NULL_ADDR)) {
1480 1481 1482 1483 1484
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1485 1486
		}
	}
1487

1488 1489 1490
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1491
out:
1492 1493
	f2fs_put_dnode(&dn);
unlock_out:
1494 1495
	if (locked)
		f2fs_unlock_op(sbi);
1496 1497 1498
	return err;
}

1499 1500 1501 1502 1503
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1504
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1505
	struct page *page = NULL;
1506
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1507 1508
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1509 1510
	int err = 0;

1511 1512
	trace_f2fs_write_begin(inode, pos, len, flags);

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1523
repeat:
1524
	page = grab_cache_page_write_begin(mapping, index, flags);
1525 1526 1527 1528
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1529

1530 1531
	*pagep = page;

1532 1533
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1534
	if (err)
1535
		goto fail;
1536

1537
	if (need_balance && has_not_enough_free_secs(sbi, 0)) {
1538
		unlock_page(page);
J
Jaegeuk Kim 已提交
1539
		f2fs_balance_fs(sbi, true);
1540 1541 1542 1543 1544 1545 1546 1547
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1548
	f2fs_wait_on_page_writeback(page, DATA, false);
1549

1550 1551
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1552
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1553

C
Chao Yu 已提交
1554 1555 1556 1557
	if (len == PAGE_CACHE_SIZE)
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;
1558 1559 1560 1561 1562 1563 1564

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
C
Chao Yu 已提交
1565
		goto out_update;
1566 1567
	}

1568
	if (blkaddr == NEW_ADDR) {
1569 1570
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
1571
		struct f2fs_io_info fio = {
1572
			.sbi = sbi,
1573 1574
			.type = DATA,
			.rw = READ_SYNC,
1575
			.blk_addr = blkaddr,
1576
			.page = page,
1577
			.encrypted_page = NULL,
1578
		};
1579
		err = f2fs_submit_page_bio(&fio);
1580 1581
		if (err)
			goto fail;
1582

1583
		lock_page(page);
1584
		if (unlikely(!PageUptodate(page))) {
1585 1586
			err = -EIO;
			goto fail;
1587
		}
1588
		if (unlikely(page->mapping != mapping)) {
1589 1590
			f2fs_put_page(page, 1);
			goto repeat;
1591
		}
1592 1593 1594

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1595
			err = f2fs_decrypt(page);
1596
			if (err)
1597 1598
				goto fail;
		}
1599
	}
C
Chao Yu 已提交
1600
out_update:
1601
	SetPageUptodate(page);
C
Chao Yu 已提交
1602
out_clear:
1603 1604
	clear_cold_data(page);
	return 0;
1605

1606
fail:
1607
	f2fs_put_page(page, 1);
1608 1609
	f2fs_write_failed(mapping, pos + len);
	return err;
1610 1611
}

1612 1613 1614 1615 1616 1617 1618
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1619 1620
	trace_f2fs_write_end(inode, pos, len, copied);

1621
	set_page_dirty(page);
1622 1623 1624 1625 1626 1627

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
	}

1628
	f2fs_put_page(page, 1);
1629
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1630 1631 1632
	return copied;
}

1633 1634
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1635 1636 1637 1638 1639 1640
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1641 1642 1643
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1644 1645 1646
	return 0;
}

1647 1648
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
			      loff_t offset)
1649
{
1650
	struct address_space *mapping = iocb->ki_filp->f_mapping;
1651 1652 1653
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
	int err;
1654

1655
	err = check_direct_IO(inode, iter, offset);
1656 1657
	if (err)
		return err;
H
Huajun Li 已提交
1658

1659 1660 1661
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;

1662
	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
1663

C
Chao Yu 已提交
1664
	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
1665
	if (err < 0 && iov_iter_rw(iter) == WRITE)
1666
		f2fs_write_failed(mapping, offset + count);
1667

1668
	trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
1669

1670
	return err;
1671 1672
}

1673 1674
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1675 1676
{
	struct inode *inode = page->mapping->host;
1677
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1678

1679 1680
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
		(offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
1681 1682
		return;

1683 1684 1685 1686 1687 1688 1689 1690
	if (PageDirty(page)) {
		if (inode->i_ino == F2FS_META_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_META);
		else if (inode->i_ino == F2FS_NODE_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_NODES);
		else
			inode_dec_dirty_pages(inode);
	}
C
Chao Yu 已提交
1691 1692 1693 1694 1695

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1696 1697 1698
	ClearPagePrivate(page);
}

1699
int f2fs_release_page(struct page *page, gfp_t wait)
1700
{
1701 1702 1703 1704
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1705 1706 1707 1708
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1709
	ClearPagePrivate(page);
1710
	return 1;
1711 1712 1713 1714 1715 1716 1717
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1718 1719
	trace_f2fs_set_page_dirty(page, DATA);

1720
	SetPageUptodate(page);
1721

1722
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1723 1724 1725 1726 1727 1728 1729 1730 1731
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1732 1733
	}

1734 1735
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
1736
		update_dirty_page(inode, page);
1737 1738 1739 1740 1741
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1742 1743
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1744 1745
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1746 1747 1748 1749 1750 1751 1752
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1753
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1754 1755
}

1756 1757 1758 1759 1760 1761
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1762
	.write_end	= f2fs_write_end,
1763
	.set_page_dirty	= f2fs_set_data_page_dirty,
1764 1765
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1766
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1767
	.bmap		= f2fs_bmap,
1768
};