data.c 41.8 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
J
Jaegeuk Kim 已提交
22
#include <linux/cleancache.h>
23 24 25 26

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
27
#include "trace.h"
28
#include <trace/events/f2fs.h>
29

30
static void f2fs_read_end_io(struct bio *bio)
31
{
32 33
	struct bio_vec *bvec;
	int i;
34

35
	if (f2fs_bio_encrypted(bio)) {
36
		if (bio->bi_error) {
37 38 39 40 41 42 43
			f2fs_release_crypto_ctx(bio->bi_private);
		} else {
			f2fs_end_io_crypto_work(bio->bi_private, bio);
			return;
		}
	}

44 45
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
46

47
		if (!bio->bi_error) {
J
Jaegeuk Kim 已提交
48 49 50 51 52 53 54 55 56 57
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

58
static void f2fs_write_end_io(struct bio *bio)
59
{
60
	struct f2fs_sb_info *sbi = bio->bi_private;
61 62
	struct bio_vec *bvec;
	int i;
63

64
	bio_for_each_segment_all(bvec, bio, i) {
65 66
		struct page *page = bvec->bv_page;

67 68
		f2fs_restore_and_release_control_page(&page);

69
		if (unlikely(bio->bi_error)) {
70
			set_bit(AS_EIO, &page->mapping->flags);
71
			f2fs_stop_checkpoint(sbi);
72 73 74
		}
		end_page_writeback(page);
		dec_page_count(sbi, F2FS_WRITEBACK);
75
	}
76

77
	if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait))
78 79 80 81 82
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

83 84 85 86 87 88 89 90
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
91
	bio = f2fs_bio_alloc(npages);
92 93

	bio->bi_bdev = sbi->sb->s_bdev;
C
Chao Yu 已提交
94
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
95
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
96
	bio->bi_private = is_read ? NULL : sbi;
97 98 99 100

	return bio;
}

J
Jaegeuk Kim 已提交
101
static void __submit_merged_bio(struct f2fs_bio_info *io)
102
{
J
Jaegeuk Kim 已提交
103
	struct f2fs_io_info *fio = &io->fio;
104 105 106 107

	if (!io->bio)
		return;

108
	if (is_read_io(fio->rw))
109
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
110
	else
111
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
112

113
	submit_bio(fio->rw, io->bio);
114 115 116
	io->bio = NULL;
}

117 118
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
C
Chao Yu 已提交
119 120 121 122 123
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

124
	if (!io->bio)
C
Chao Yu 已提交
125
		return false;
126 127 128

	if (!inode && !page && !ino)
		return true;
C
Chao Yu 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142

	bio_for_each_segment_all(bvec, io->bio, i) {

		if (bvec->bv_page->mapping) {
			target = bvec->bv_page;
		} else {
			struct f2fs_crypto_ctx *ctx;

			/* encrypted page */
			ctx = (struct f2fs_crypto_ctx *)page_private(
								bvec->bv_page);
			target = ctx->w.control_page;
		}

143 144 145 146 147
		if (inode && inode == target->mapping->host)
			return true;
		if (page && page == target)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
148 149 150 151 152 153
			return true;
	}

	return false;
}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	bool ret;

	down_read(&io->io_rwsem);
	ret = __has_merged_page(io, inode, page, ino);
	up_read(&io->io_rwsem);
	return ret;
}

static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
171 172 173 174 175 176
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

177
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
178

179 180 181
	if (!__has_merged_page(io, inode, page, ino))
		goto out;

J
Jaegeuk Kim 已提交
182 183 184
	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
J
Jaegeuk Kim 已提交
185 186 187 188
		if (test_opt(sbi, NOBARRIER))
			io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
		else
			io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
189 190
	}
	__submit_merged_bio(io);
191
out:
192
	up_write(&io->io_rwsem);
193 194
}

195 196 197 198 199 200 201 202 203 204 205 206 207 208
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
									int rw)
{
	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}

void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
{
	if (has_merged_page(sbi, inode, page, ino, type))
		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}

209 210 211 212 213 214 215
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, WRITE);
	f2fs_submit_merged_bio(sbi, META, WRITE);
}

216 217 218 219
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
220
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
221 222
{
	struct bio *bio;
223
	struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
224

225
	trace_f2fs_submit_page_bio(page, fio);
226
	f2fs_trace_ios(fio, 0);
227 228

	/* Allocate a new bio */
229
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
230 231 232 233 234 235

	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}

236
	submit_bio(fio->rw, bio);
237 238 239
	return 0;
}

240
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
241
{
242
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
243
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
244
	struct f2fs_bio_info *io;
245
	bool is_read = is_read_io(fio->rw);
246
	struct page *bio_page;
247

248
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
249

250 251 252
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
253

254
	down_write(&io->io_rwsem);
255

256
	if (!is_read)
257 258
		inc_page_count(sbi, F2FS_WRITEBACK);

259
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
260 261
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
262 263
alloc_new:
	if (io->bio == NULL) {
J
Jaegeuk Kim 已提交
264
		int bio_blocks = MAX_BIO_BLOCKS(sbi);
265

266 267
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
						bio_blocks, is_read);
J
Jaegeuk Kim 已提交
268
		io->fio = *fio;
269 270
	}

271 272 273
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

	if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
274
							PAGE_CACHE_SIZE) {
J
Jaegeuk Kim 已提交
275
		__submit_merged_bio(io);
276 277 278
		goto alloc_new;
	}

279
	io->last_block_in_bio = fio->new_blkaddr;
280
	f2fs_trace_ios(fio, 0);
281

282
	up_write(&io->io_rwsem);
283
	trace_f2fs_submit_page_mbio(fio->page, fio);
284 285
}

J
Jaegeuk Kim 已提交
286
/*
287 288 289 290 291
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
292
void set_data_blkaddr(struct dnode_of_data *dn)
293 294 295 296 297 298
{
	struct f2fs_node *rn;
	__le32 *addr_array;
	struct page *node_page = dn->node_page;
	unsigned int ofs_in_node = dn->ofs_in_node;

299
	f2fs_wait_on_page_writeback(node_page, NODE, true);
300

301
	rn = F2FS_NODE(node_page);
302 303 304

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
J
Jaegeuk Kim 已提交
305
	addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
306 307
	if (set_page_dirty(node_page))
		dn->node_changed = true;
308 309
}

310 311 312 313 314 315 316
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

317 318
int reserve_new_block(struct dnode_of_data *dn)
{
319
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
320

321
	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
322
		return -EPERM;
323
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
324 325
		return -ENOSPC;

326 327
	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);

328
	dn->data_blkaddr = NEW_ADDR;
329
	set_data_blkaddr(dn);
330
	mark_inode_dirty(dn->inode);
331 332 333 334
	sync_inode_page(dn);
	return 0;
}

335 336 337 338 339 340 341 342
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
343

344 345
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
346
	if (err || need_put)
347 348 349 350
		f2fs_put_dnode(dn);
	return err;
}

351
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
352
{
353
	struct extent_info ei;
354
	struct inode *inode = dn->inode;
355

356 357 358
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
359
	}
360

361
	return f2fs_reserve_block(dn, index);
362 363
}

364 365
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
						int rw, bool for_write)
366 367 368 369
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
370
	struct extent_info ei;
371
	int err;
372
	struct f2fs_io_info fio = {
373
		.sbi = F2FS_I_SB(inode),
374
		.type = DATA,
375
		.rw = rw,
376
		.encrypted_page = NULL,
377
	};
378

379 380 381
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

382
	page = f2fs_grab_cache_page(mapping, index, for_write);
383 384 385
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
386 387 388 389 390
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

391
	set_new_dnode(&dn, inode, NULL, NULL, 0);
392
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
393 394
	if (err)
		goto put_err;
395 396
	f2fs_put_dnode(&dn);

397
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
398 399
		err = -ENOENT;
		goto put_err;
400
	}
C
Chao Yu 已提交
401
got_it:
402 403
	if (PageUptodate(page)) {
		unlock_page(page);
404
		return page;
405
	}
406

J
Jaegeuk Kim 已提交
407 408 409 410 411 412 413 414 415
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		SetPageUptodate(page);
416
		unlock_page(page);
J
Jaegeuk Kim 已提交
417 418
		return page;
	}
419

420
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
421 422
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
423
	if (err)
424
		goto put_err;
425
	return page;
426 427 428 429

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
430 431 432 433 434 435 436 437 438 439 440 441
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

442
	page = get_read_data_page(inode, index, READ_SYNC, false);
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
462 463
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
464 465 466 467
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
468
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
469 470
	if (IS_ERR(page))
		return page;
471

472
	/* wait for read completion */
473
	lock_page(page);
474
	if (unlikely(!PageUptodate(page))) {
475 476
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
477
	}
478
	if (unlikely(page->mapping != mapping)) {
479 480
		f2fs_put_page(page, 1);
		goto repeat;
481 482 483 484
	}
	return page;
}

J
Jaegeuk Kim 已提交
485
/*
486 487
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
488
 *
C
Chao Yu 已提交
489 490
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
491 492
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
493
 */
494
struct page *get_new_data_page(struct inode *inode,
495
		struct page *ipage, pgoff_t index, bool new_i_size)
496 497 498 499 500
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
501

502
	page = f2fs_grab_cache_page(mapping, index, true);
503 504 505 506 507 508
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
509
		return ERR_PTR(-ENOMEM);
510
	}
511

512
	set_new_dnode(&dn, inode, ipage, NULL, 0);
513
	err = f2fs_reserve_block(&dn, index);
514 515
	if (err) {
		f2fs_put_page(page, 1);
516
		return ERR_PTR(err);
517
	}
518 519
	if (!ipage)
		f2fs_put_dnode(&dn);
520 521

	if (PageUptodate(page))
522
		goto got_it;
523 524 525

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
526
		SetPageUptodate(page);
527
	} else {
528
		f2fs_put_page(page, 1);
529

530 531 532
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
533
		if (IS_ERR(page))
534
			return page;
535
	}
536
got_it:
C
Chao Yu 已提交
537 538 539
	if (new_i_size && i_size_read(inode) <
				((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
		i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
540 541
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
542 543 544 545
	}
	return page;
}

546 547
static int __allocate_data_block(struct dnode_of_data *dn)
{
548
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
549 550
	struct f2fs_summary sum;
	struct node_info ni;
551
	int seg = CURSEG_WARM_DATA;
552
	pgoff_t fofs;
553 554 555

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;
556 557 558 559 560

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

561 562 563
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
		return -ENOSPC;

564
alloc:
565 566 567
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

568 569 570
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

571 572
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
573
	set_data_blkaddr(dn);
574

575
	/* update i_size */
576
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
577
							dn->ofs_in_node;
C
Chao Yu 已提交
578 579 580
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
		i_size_write(dn->inode,
				((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
581 582 583
	return 0;
}

584
ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
585
{
586
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
587
	struct f2fs_map_blocks map;
588
	ssize_t ret = 0;
589

590
	map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos);
591
	map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from));
592
	map.m_next_pgofs = NULL;
593

594 595 596 597 598 599 600 601 602 603
	if (f2fs_encrypted_inode(inode))
		return 0;

	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
604 605 606 607
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
608 609
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
610
	return ret;
611 612
}

J
Jaegeuk Kim 已提交
613
/*
J
Jaegeuk Kim 已提交
614 615
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
616 617 618 619 620
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
621
 */
C
Chao Yu 已提交
622
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
623
						int create, int flag)
624
{
J
Jaegeuk Kim 已提交
625
	unsigned int maxblocks = map->m_len;
626
	struct dnode_of_data dn;
627
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
628 629 630
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
	pgoff_t pgofs, end_offset;
	int err = 0, ofs = 1;
631
	struct extent_info ei;
632
	bool allocated = false;
633
	block_t blkaddr;
634

J
Jaegeuk Kim 已提交
635 636 637 638 639
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
640

641
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
642 643 644
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
645
		goto out;
646
	}
647

C
Chao Yu 已提交
648
next_dnode:
649
	if (create)
650
		f2fs_lock_op(sbi);
651 652 653

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
654
	err = get_dnode_of_data(&dn, pgofs, mode);
655
	if (err) {
656
		if (err == -ENOENT) {
657
			err = 0;
658 659 660 661
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
662
		goto unlock_out;
663
	}
C
Chao Yu 已提交
664

665
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
666 667 668 669 670

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
671
		if (create) {
672 673
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
674
				goto sync_out;
675
			}
676 677 678 679 680 681
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
				if (blkaddr == NULL_ADDR)
					err = reserve_new_block(&dn);
			} else {
				err = __allocate_data_block(&dn);
			}
C
Chao Yu 已提交
682
			if (err)
C
Chao Yu 已提交
683
				goto sync_out;
C
Chao Yu 已提交
684 685
			allocated = true;
			map->m_flags = F2FS_MAP_NEW;
C
Chao Yu 已提交
686
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
687
		} else {
688 689 690 691 692
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
693
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
694
						blkaddr != NEW_ADDR) {
C
Chao Yu 已提交
695 696
				if (flag == F2FS_GET_BLOCK_BMAP)
					err = -ENOENT;
C
Chao Yu 已提交
697
				goto sync_out;
C
Chao Yu 已提交
698
			}
C
Chao Yu 已提交
699 700
		}
	}
701

C
Chao Yu 已提交
702 703 704 705 706 707 708 709 710 711
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
712
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
713 714
			flag == F2FS_GET_BLOCK_PRE_DIO ||
			flag == F2FS_GET_BLOCK_PRE_AIO) {
C
Chao Yu 已提交
715 716 717 718 719
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
720 721 722 723

	dn.ofs_in_node++;
	pgofs++;

C
Chao Yu 已提交
724 725 726
	if (map->m_len < maxblocks) {
		if (dn.ofs_in_node < end_offset)
			goto next_block;
727

728 729 730 731
		if (allocated)
			sync_inode_page(&dn);
		f2fs_put_dnode(&dn);

732 733
		if (create) {
			f2fs_unlock_op(sbi);
734
			f2fs_balance_fs(sbi, allocated);
735
		}
736
		allocated = false;
C
Chao Yu 已提交
737
		goto next_dnode;
738
	}
739

740 741 742
sync_out:
	if (allocated)
		sync_inode_page(&dn);
743
	f2fs_put_dnode(&dn);
744
unlock_out:
745
	if (create) {
746
		f2fs_unlock_op(sbi);
747
		f2fs_balance_fs(sbi, allocated);
748
	}
749
out:
J
Jaegeuk Kim 已提交
750
	trace_f2fs_map_blocks(inode, map, err);
751
	return err;
752 753
}

J
Jaegeuk Kim 已提交
754
static int __get_data_block(struct inode *inode, sector_t iblock,
755 756
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
757 758 759 760 761 762
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
763
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
764

C
Chao Yu 已提交
765
	ret = f2fs_map_blocks(inode, &map, create, flag);
J
Jaegeuk Kim 已提交
766 767 768 769 770 771 772 773
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

774
static int get_data_block(struct inode *inode, sector_t iblock,
775 776
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
777
{
778 779
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
780 781 782
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
783 784
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
785
	return __get_data_block(inode, iblock, bh_result, create,
786
						F2FS_GET_BLOCK_DIO, NULL);
787 788
}

C
Chao Yu 已提交
789
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
790 791
			struct buffer_head *bh_result, int create)
{
792
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
793
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
794 795
		return -EFBIG;

C
Chao Yu 已提交
796
	return __get_data_block(inode, iblock, bh_result, create,
797
						F2FS_GET_BLOCK_BMAP, NULL);
798 799
}

800 801 802 803 804 805 806 807 808 809
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
810 811 812
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
813 814
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
815
	pgoff_t next_pgofs;
816
	loff_t isize;
817 818 819 820 821 822 823 824
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
825 826 827 828 829 830
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
831
	inode_lock(inode);
832 833

	isize = i_size_read(inode);
834 835
	if (start >= isize)
		goto out;
836

837 838
	if (start + len > isize)
		len = isize - start;
839 840 841 842 843 844

	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
845

846 847 848 849
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
850
	ret = get_data_block(inode, start_blk, &map_bh, 0,
851
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
852 853 854 855 856
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
857
		start_blk = next_pgofs;
858
		/* Go through holes util pass the EOF */
859
		if (blk_to_logical(inode, start_blk) < isize)
860 861 862 863 864 865 866
			goto prep_next;
		/* Found a hole beyond isize means no more extents.
		 * Note that the premise is that filesystems don't
		 * punch holes beyond isize and keep size unchanged.
		 */
		flags |= FIEMAP_EXTENT_LAST;
	}
867

868 869 870 871
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

872 873
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
874
	}
875

876 877
	if (start_blk > last_blk || ret)
		goto out;
878

879 880 881 882 883 884
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
885

886
	start_blk += logical_to_blk(inode, size);
887

888
prep_next:
889 890 891 892 893 894 895 896 897
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
898
	inode_unlock(inode);
899
	return ret;
J
Jaegeuk Kim 已提交
900 901
}

J
Jaegeuk Kim 已提交
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
927
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

965
			if (f2fs_map_blocks(inode, &map, 0,
966
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
			submit_bio(READ, bio);
			bio = NULL;
		}
		if (bio == NULL) {
995 996 997 998 999 1000 1001 1002 1003 1004
			struct f2fs_crypto_ctx *ctx = NULL;

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {

				ctx = f2fs_get_crypto_ctx(inode);
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
1005 1006
				f2fs_wait_on_encrypted_page_writeback(
						F2FS_I_SB(inode), block_nr);
1007 1008
			}

J
Jaegeuk Kim 已提交
1009
			bio = bio_alloc(GFP_KERNEL,
1010
				min_t(int, nr_pages, BIO_MAX_PAGES));
1011 1012 1013
			if (!bio) {
				if (ctx)
					f2fs_release_crypto_ctx(ctx);
J
Jaegeuk Kim 已提交
1014
				goto set_error_page;
1015
			}
J
Jaegeuk Kim 已提交
1016 1017
			bio->bi_bdev = bdev;
			bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1018
			bio->bi_end_io = f2fs_read_end_io;
1019
			bio->bi_private = ctx;
J
Jaegeuk Kim 已提交
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
			submit_bio(READ, bio);
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
			page_cache_release(page);
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(READ, bio);
	return 0;
}

1048 1049
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1050
	struct inode *inode = page->mapping->host;
1051
	int ret = -EAGAIN;
H
Huajun Li 已提交
1052

1053 1054
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1055
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1056 1057
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1058
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1059
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1060
	return ret;
1061 1062 1063 1064 1065 1066
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1067
	struct inode *inode = file->f_mapping->host;
1068 1069 1070
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1071 1072 1073 1074 1075

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1076
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1077 1078
}

1079
int do_write_data_page(struct f2fs_io_info *fio)
1080
{
1081
	struct page *page = fio->page;
1082 1083 1084 1085 1086
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1087
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1088 1089 1090
	if (err)
		return err;

1091
	fio->old_blkaddr = dn.data_blkaddr;
1092 1093

	/* This page is already truncated */
1094
	if (fio->old_blkaddr == NULL_ADDR) {
1095
		ClearPageUptodate(page);
1096
		goto out_writepage;
1097
	}
1098

1099
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1100 1101 1102

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
1103
							fio->old_blkaddr);
1104

1105 1106 1107 1108 1109 1110 1111
		fio->encrypted_page = f2fs_encrypt(inode, fio->page);
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
			goto out_writepage;
		}
	}

1112 1113 1114 1115 1116 1117
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1118
	if (unlikely(fio->old_blkaddr != NEW_ADDR &&
1119
			!is_cold_data(page) &&
1120
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1121
			need_inplace_update(inode))) {
1122
		rewrite_data_page(fio);
1123
		set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1124
		trace_f2fs_do_write_data_page(page, IPU);
1125
	} else {
1126
		write_data_page(&dn, fio);
1127
		trace_f2fs_do_write_data_page(page, OPU);
1128
		set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
1129 1130
		if (page->index == 0)
			set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1141
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1142 1143 1144
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
							>> PAGE_CACHE_SHIFT;
H
Huajun Li 已提交
1145
	unsigned offset = 0;
1146
	bool need_balance_fs = false;
1147
	int err = 0;
J
Jaegeuk Kim 已提交
1148
	struct f2fs_io_info fio = {
1149
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1150
		.type = DATA,
C
Chris Fries 已提交
1151
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1152
		.page = page,
1153
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1154
	};
1155

1156 1157
	trace_f2fs_writepage(page, DATA);

1158
	if (page->index < end_index)
1159
		goto write;
1160 1161 1162 1163 1164 1165

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
	offset = i_size & (PAGE_CACHE_SIZE - 1);
1166
	if ((page->index >= end_index + 1) || !offset)
1167
		goto out;
1168 1169

	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1170
write:
1171
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1172
		goto redirty_out;
1173 1174 1175 1176 1177
	if (f2fs_is_drop_cache(inode))
		goto out;
	if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))
		goto redirty_out;
1178

1179
	/* Dentry blocks are controlled by checkpoint */
1180
	if (S_ISDIR(inode->i_mode)) {
1181 1182
		if (unlikely(f2fs_cp_error(sbi)))
			goto redirty_out;
1183
		err = do_write_data_page(&fio);
1184 1185
		goto done;
	}
H
Huajun Li 已提交
1186

1187 1188 1189
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		SetPageError(page);
1190
		goto out;
1191 1192
	}

1193
	if (!wbc->for_reclaim)
1194
		need_balance_fs = true;
1195
	else if (has_not_enough_free_secs(sbi, 0))
1196
		goto redirty_out;
1197

1198
	err = -EAGAIN;
1199
	f2fs_lock_op(sbi);
1200 1201 1202
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1203
		err = do_write_data_page(&fio);
1204 1205 1206 1207
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1208 1209

	clear_cold_data(page);
1210
out:
1211
	inode_dec_dirty_pages(inode);
1212 1213
	if (err)
		ClearPageUptodate(page);
1214 1215 1216 1217 1218 1219

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
		remove_dirty_inode(inode);
	}

1220
	unlock_page(page);
J
Jaegeuk Kim 已提交
1221
	f2fs_balance_fs(sbi, need_balance_fs);
1222 1223

	if (unlikely(f2fs_cp_error(sbi)))
1224
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1225

1226 1227 1228
	return 0;

redirty_out:
1229
	redirty_page_for_writepage(wbc, page);
1230
	return AOP_WRITEPAGE_ACTIVATE;
1231 1232
}

1233 1234 1235 1236 1237 1238 1239 1240 1241
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

C
Chao Yu 已提交
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
			struct writeback_control *wbc, writepage_t writepage,
			void *data)
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	int step = 0;

	pagevec_init(&pvec, 0);
next:
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

1320
			if (step == is_cold_data(page))
C
Chao Yu 已提交
1321 1322 1323 1324
				goto continue_unlock;

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1325 1326
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			ret = (*writepage)(page, wbc, data);
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					done_index = page->index + 1;
					done = 1;
					break;
				}
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (step < 1) {
		step++;
		goto next;
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

	return ret;
}

1374
static int f2fs_write_data_pages(struct address_space *mapping,
1375 1376 1377
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1378
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1379
	bool locked = false;
1380
	int ret;
1381
	long diff;
1382

P
P J P 已提交
1383 1384 1385 1386
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1387 1388 1389 1390
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1391 1392 1393 1394 1395
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1396 1397 1398 1399
	/* skip writing during file defragment */
	if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG))
		goto skip_write;

1400 1401 1402 1403
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

Y
Yunlei He 已提交
1404 1405
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1406
	diff = nr_pages_to_write(sbi, DATA, wbc);
1407

1408
	if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) {
1409 1410 1411
		mutex_lock(&sbi->writepages);
		locked = true;
	}
C
Chao Yu 已提交
1412
	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1413
	f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
1414 1415
	if (locked)
		mutex_unlock(&sbi->writepages);
J
Jaegeuk Kim 已提交
1416

1417
	remove_dirty_inode(inode);
1418

1419
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1420
	return ret;
1421 1422

skip_write:
1423
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1424
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1425
	return 0;
1426 1427
}

1428 1429 1430
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1431
	loff_t i_size = i_size_read(inode);
1432

J
Jaegeuk Kim 已提交
1433 1434 1435
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1436 1437 1438
	}
}

1439 1440 1441 1442 1443 1444 1445 1446
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1447 1448
	bool locked = false;
	struct extent_info ei;
1449 1450
	int err = 0;

1451 1452 1453 1454 1455 1456 1457 1458
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
	if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
					len == PAGE_CACHE_SIZE)
		return 0;

1459 1460 1461 1462 1463 1464
	if (f2fs_has_inline_data(inode) ||
			(pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1478
			set_inline_node(ipage);
1479 1480 1481
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1494
			if (err || (!err && dn.data_blkaddr == NULL_ADDR)) {
1495 1496 1497 1498 1499
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1500 1501
		}
	}
1502

1503 1504 1505
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1506
out:
1507 1508
	f2fs_put_dnode(&dn);
unlock_out:
1509 1510
	if (locked)
		f2fs_unlock_op(sbi);
1511 1512 1513
	return err;
}

1514 1515 1516 1517 1518
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1519
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1520
	struct page *page = NULL;
1521
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1522 1523
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1524 1525
	int err = 0;

1526 1527
	trace_f2fs_write_begin(inode, pos, len, flags);

1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1538
repeat:
1539
	page = grab_cache_page_write_begin(mapping, index, flags);
1540 1541 1542 1543
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1544

1545 1546
	*pagep = page;

1547 1548
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1549
	if (err)
1550
		goto fail;
1551

1552
	if (need_balance && has_not_enough_free_secs(sbi, 0)) {
1553
		unlock_page(page);
J
Jaegeuk Kim 已提交
1554
		f2fs_balance_fs(sbi, true);
1555 1556 1557 1558 1559 1560 1561 1562
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1563
	f2fs_wait_on_page_writeback(page, DATA, false);
1564

1565 1566
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1567
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1568

C
Chao Yu 已提交
1569 1570 1571 1572
	if (len == PAGE_CACHE_SIZE)
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;
1573 1574 1575 1576 1577 1578 1579

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
C
Chao Yu 已提交
1580
		goto out_update;
1581 1582
	}

1583
	if (blkaddr == NEW_ADDR) {
1584 1585
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
1586
		struct f2fs_io_info fio = {
1587
			.sbi = sbi,
1588 1589
			.type = DATA,
			.rw = READ_SYNC,
1590 1591
			.old_blkaddr = blkaddr,
			.new_blkaddr = blkaddr,
1592
			.page = page,
1593
			.encrypted_page = NULL,
1594
		};
1595
		err = f2fs_submit_page_bio(&fio);
1596 1597
		if (err)
			goto fail;
1598

1599
		lock_page(page);
1600
		if (unlikely(!PageUptodate(page))) {
1601 1602
			err = -EIO;
			goto fail;
1603
		}
1604
		if (unlikely(page->mapping != mapping)) {
1605 1606
			f2fs_put_page(page, 1);
			goto repeat;
1607
		}
1608 1609 1610

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1611
			err = f2fs_decrypt(page);
1612
			if (err)
1613 1614
				goto fail;
		}
1615
	}
C
Chao Yu 已提交
1616
out_update:
1617
	SetPageUptodate(page);
C
Chao Yu 已提交
1618
out_clear:
1619 1620
	clear_cold_data(page);
	return 0;
1621

1622
fail:
1623
	f2fs_put_page(page, 1);
1624 1625
	f2fs_write_failed(mapping, pos + len);
	return err;
1626 1627
}

1628 1629 1630 1631 1632 1633 1634
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1635 1636
	trace_f2fs_write_end(inode, pos, len, copied);

1637
	set_page_dirty(page);
1638 1639 1640 1641 1642 1643

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
	}

1644
	f2fs_put_page(page, 1);
1645
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1646 1647 1648
	return copied;
}

1649 1650
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1651 1652 1653 1654 1655 1656
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1657 1658 1659
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1660 1661 1662
	return 0;
}

1663 1664
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
			      loff_t offset)
1665
{
1666
	struct address_space *mapping = iocb->ki_filp->f_mapping;
1667 1668 1669
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
	int err;
1670

1671
	err = check_direct_IO(inode, iter, offset);
1672 1673
	if (err)
		return err;
H
Huajun Li 已提交
1674

1675 1676 1677
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;

1678
	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
1679

C
Chao Yu 已提交
1680
	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
1681
	if (err < 0 && iov_iter_rw(iter) == WRITE)
1682
		f2fs_write_failed(mapping, offset + count);
1683

1684
	trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
1685

1686
	return err;
1687 1688
}

1689 1690
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1691 1692
{
	struct inode *inode = page->mapping->host;
1693
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1694

1695 1696
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
		(offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
1697 1698
		return;

1699 1700 1701 1702 1703 1704 1705 1706
	if (PageDirty(page)) {
		if (inode->i_ino == F2FS_META_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_META);
		else if (inode->i_ino == F2FS_NODE_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_NODES);
		else
			inode_dec_dirty_pages(inode);
	}
C
Chao Yu 已提交
1707 1708 1709 1710 1711

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1712 1713 1714
	ClearPagePrivate(page);
}

1715
int f2fs_release_page(struct page *page, gfp_t wait)
1716
{
1717 1718 1719 1720
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1721 1722 1723 1724
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1725
	ClearPagePrivate(page);
1726
	return 1;
1727 1728 1729 1730 1731 1732 1733
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1734 1735
	trace_f2fs_set_page_dirty(page, DATA);

1736
	SetPageUptodate(page);
1737

1738
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1739 1740 1741 1742 1743 1744 1745 1746 1747
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1748 1749
	}

1750 1751
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
1752
		update_dirty_page(inode, page);
1753 1754 1755 1756 1757
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1758 1759
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1760 1761
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1762 1763 1764 1765 1766 1767 1768
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1769
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1770 1771
}

1772 1773 1774 1775 1776 1777
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1778
	.write_end	= f2fs_write_end,
1779
	.set_page_dirty	= f2fs_set_data_page_dirty,
1780 1781
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1782
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1783
	.bmap		= f2fs_bmap,
1784
};