data.c 43.2 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
J
Jaegeuk Kim 已提交
22
#include <linux/cleancache.h>
23 24 25 26

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
27
#include "trace.h"
28
#include <trace/events/f2fs.h>
29

30
static void f2fs_read_end_io(struct bio *bio)
31
{
32 33
	struct bio_vec *bvec;
	int i;
34

35
	if (f2fs_bio_encrypted(bio)) {
36
		if (bio->bi_error) {
37
			fscrypt_release_ctx(bio->bi_private);
38
		} else {
39
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
40 41 42 43
			return;
		}
	}

44 45
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
46

47
		if (!bio->bi_error) {
J
Jaegeuk Kim 已提交
48 49 50 51 52 53 54 55 56 57
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

58
static void f2fs_write_end_io(struct bio *bio)
59
{
60
	struct f2fs_sb_info *sbi = bio->bi_private;
61 62
	struct bio_vec *bvec;
	int i;
63

64
	bio_for_each_segment_all(bvec, bio, i) {
65 66
		struct page *page = bvec->bv_page;

67
		fscrypt_pullback_bio_page(&page, true);
68

69
		if (unlikely(bio->bi_error)) {
70
			set_bit(AS_EIO, &page->mapping->flags);
71
			f2fs_stop_checkpoint(sbi, true);
72 73
		}
		end_page_writeback(page);
74
	}
75 76
	if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
				wq_has_sleeper(&sbi->cp_wait))
77 78 79 80 81
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

82 83 84 85 86 87 88 89
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
90
	bio = f2fs_bio_alloc(npages);
91 92

	bio->bi_bdev = sbi->sb->s_bdev;
C
Chao Yu 已提交
93
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
94
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
95
	bio->bi_private = is_read ? NULL : sbi;
96 97 98 99

	return bio;
}

100 101 102 103 104 105 106 107
static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
						struct bio *bio)
{
	if (!is_read_io(rw))
		atomic_inc(&sbi->nr_wb_bios);
	submit_bio(rw, bio);
}

J
Jaegeuk Kim 已提交
108
static void __submit_merged_bio(struct f2fs_bio_info *io)
109
{
J
Jaegeuk Kim 已提交
110
	struct f2fs_io_info *fio = &io->fio;
111 112 113 114

	if (!io->bio)
		return;

115
	if (is_read_io(fio->rw))
116
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
117
	else
118
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
119

120
	__submit_bio(io->sbi, fio->rw, io->bio);
121 122 123
	io->bio = NULL;
}

124 125
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
C
Chao Yu 已提交
126 127 128 129 130
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

131
	if (!io->bio)
C
Chao Yu 已提交
132
		return false;
133 134 135

	if (!inode && !page && !ino)
		return true;
C
Chao Yu 已提交
136 137 138

	bio_for_each_segment_all(bvec, io->bio, i) {

139
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
140
			target = bvec->bv_page;
141 142
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
143

144 145 146 147 148
		if (inode && inode == target->mapping->host)
			return true;
		if (page && page == target)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
149 150 151 152 153 154
			return true;
	}

	return false;
}

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	bool ret;

	down_read(&io->io_rwsem);
	ret = __has_merged_page(io, inode, page, ino);
	up_read(&io->io_rwsem);
	return ret;
}

static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
172 173 174 175 176 177
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

178
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
179

180 181 182
	if (!__has_merged_page(io, inode, page, ino))
		goto out;

J
Jaegeuk Kim 已提交
183 184 185
	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
J
Jaegeuk Kim 已提交
186 187 188 189
		if (test_opt(sbi, NOBARRIER))
			io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
		else
			io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
190 191
	}
	__submit_merged_bio(io);
192
out:
193
	up_write(&io->io_rwsem);
194 195
}

196 197 198 199 200 201 202 203 204 205 206 207 208 209
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
									int rw)
{
	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}

void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
{
	if (has_merged_page(sbi, inode, page, ino, type))
		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}

210 211 212 213 214 215 216
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, WRITE);
	f2fs_submit_merged_bio(sbi, META, WRITE);
}

217 218 219 220
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
221
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
222 223
{
	struct bio *bio;
224 225
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
226

227
	trace_f2fs_submit_page_bio(page, fio);
228
	f2fs_trace_ios(fio, 0);
229 230

	/* Allocate a new bio */
231
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
232

233
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
234 235 236 237
		bio_put(bio);
		return -EFAULT;
	}

238
	__submit_bio(fio->sbi, fio->rw, bio);
239 240 241
	return 0;
}

242
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
243
{
244
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
245
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
246
	struct f2fs_bio_info *io;
247
	bool is_read = is_read_io(fio->rw);
248
	struct page *bio_page;
249

250
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
251

252 253 254
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
255

256
	down_write(&io->io_rwsem);
257

258
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
259 260
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
261 262
alloc_new:
	if (io->bio == NULL) {
J
Jaegeuk Kim 已提交
263
		int bio_blocks = MAX_BIO_BLOCKS(sbi);
264

265 266
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
						bio_blocks, is_read);
J
Jaegeuk Kim 已提交
267
		io->fio = *fio;
268 269
	}

270 271
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

272 273
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
							PAGE_SIZE) {
J
Jaegeuk Kim 已提交
274
		__submit_merged_bio(io);
275 276 277
		goto alloc_new;
	}

278
	io->last_block_in_bio = fio->new_blkaddr;
279
	f2fs_trace_ios(fio, 0);
280

281
	up_write(&io->io_rwsem);
282
	trace_f2fs_submit_page_mbio(fio->page, fio);
283 284
}

285 286 287 288 289 290 291 292 293 294
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}

J
Jaegeuk Kim 已提交
295
/*
296 297 298 299 300
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
301
void set_data_blkaddr(struct dnode_of_data *dn)
302
{
303 304 305
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
306
		dn->node_changed = true;
307 308
}

309 310 311 312 313 314 315
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

316 317
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
318
{
319
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
320

321 322 323
	if (!count)
		return 0;

324
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
325
		return -EPERM;
326
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
327 328
		return -ENOSPC;

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr =
			datablock_addr(dn->node_page, dn->ofs_in_node);
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
346 347 348
	return 0;
}

349 350 351 352 353 354 355 356 357 358 359
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

360 361 362 363 364 365 366 367
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
368

369 370
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
371
	if (err || need_put)
372 373 374 375
		f2fs_put_dnode(dn);
	return err;
}

376
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
377
{
378
	struct extent_info ei;
379
	struct inode *inode = dn->inode;
380

381 382 383
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
384
	}
385

386
	return f2fs_reserve_block(dn, index);
387 388
}

389 390
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
						int rw, bool for_write)
391 392 393 394
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
395
	struct extent_info ei;
396
	int err;
397
	struct f2fs_io_info fio = {
398
		.sbi = F2FS_I_SB(inode),
399
		.type = DATA,
400
		.rw = rw,
401
		.encrypted_page = NULL,
402
	};
403

404 405 406
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

407
	page = f2fs_grab_cache_page(mapping, index, for_write);
408 409 410
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
411 412 413 414 415
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

416
	set_new_dnode(&dn, inode, NULL, NULL, 0);
417
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
418 419
	if (err)
		goto put_err;
420 421
	f2fs_put_dnode(&dn);

422
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
423 424
		err = -ENOENT;
		goto put_err;
425
	}
C
Chao Yu 已提交
426
got_it:
427 428
	if (PageUptodate(page)) {
		unlock_page(page);
429
		return page;
430
	}
431

J
Jaegeuk Kim 已提交
432 433 434 435 436 437 438
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
439
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
440
		SetPageUptodate(page);
441
		unlock_page(page);
J
Jaegeuk Kim 已提交
442 443
		return page;
	}
444

445
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
446 447
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
448
	if (err)
449
		goto put_err;
450
	return page;
451 452 453 454

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
455 456 457 458 459 460 461 462 463 464 465 466
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

467
	page = get_read_data_page(inode, index, READ_SYNC, false);
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
487 488
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
489 490 491 492
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
493
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
494 495
	if (IS_ERR(page))
		return page;
496

497
	/* wait for read completion */
498
	lock_page(page);
499
	if (unlikely(!PageUptodate(page))) {
500 501
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
502
	}
503
	if (unlikely(page->mapping != mapping)) {
504 505
		f2fs_put_page(page, 1);
		goto repeat;
506 507 508 509
	}
	return page;
}

J
Jaegeuk Kim 已提交
510
/*
511 512
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
513
 *
C
Chao Yu 已提交
514 515
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
516 517
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
518
 */
519
struct page *get_new_data_page(struct inode *inode,
520
		struct page *ipage, pgoff_t index, bool new_i_size)
521 522 523 524 525
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
526

527
	page = f2fs_grab_cache_page(mapping, index, true);
528 529 530 531 532 533
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
534
		return ERR_PTR(-ENOMEM);
535
	}
536

537
	set_new_dnode(&dn, inode, ipage, NULL, 0);
538
	err = f2fs_reserve_block(&dn, index);
539 540
	if (err) {
		f2fs_put_page(page, 1);
541
		return ERR_PTR(err);
542
	}
543 544
	if (!ipage)
		f2fs_put_dnode(&dn);
545 546

	if (PageUptodate(page))
547
		goto got_it;
548 549

	if (dn.data_blkaddr == NEW_ADDR) {
550
		zero_user_segment(page, 0, PAGE_SIZE);
551
		SetPageUptodate(page);
552
	} else {
553
		f2fs_put_page(page, 1);
554

555 556 557
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
558
		if (IS_ERR(page))
559
			return page;
560
	}
561
got_it:
C
Chao Yu 已提交
562
	if (new_i_size && i_size_read(inode) <
563
				((loff_t)(index + 1) << PAGE_SHIFT))
564
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
565 566 567
	return page;
}

568 569
static int __allocate_data_block(struct dnode_of_data *dn)
{
570
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
571 572
	struct f2fs_summary sum;
	struct node_info ni;
573
	int seg = CURSEG_WARM_DATA;
574
	pgoff_t fofs;
575
	blkcnt_t count = 1;
576

577
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
578
		return -EPERM;
579 580 581 582 583

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

584
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
585 586
		return -ENOSPC;

587
alloc:
588 589 590
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

591 592 593
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

594 595
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
596
	set_data_blkaddr(dn);
597

598
	/* update i_size */
599
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
600
							dn->ofs_in_node;
601
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
602
		f2fs_i_size_write(dn->inode,
603
				((loff_t)(fofs + 1) << PAGE_SHIFT));
604 605 606
	return 0;
}

607
ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
608
{
609
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
610
	struct f2fs_map_blocks map;
611
	ssize_t ret = 0;
612

613 614
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
	map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
615
	map.m_next_pgofs = NULL;
616

617 618 619 620 621 622 623 624 625 626
	if (f2fs_encrypted_inode(inode))
		return 0;

	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
627 628 629 630
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
631 632
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
633
	return ret;
634 635
}

J
Jaegeuk Kim 已提交
636
/*
J
Jaegeuk Kim 已提交
637 638
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
639 640 641 642 643
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
644
 */
C
Chao Yu 已提交
645
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
646
						int create, int flag)
647
{
J
Jaegeuk Kim 已提交
648
	unsigned int maxblocks = map->m_len;
649
	struct dnode_of_data dn;
650
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
651
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
652
	pgoff_t pgofs, end_offset, end;
653
	int err = 0, ofs = 1;
654 655
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
656
	struct extent_info ei;
657
	bool allocated = false;
658
	block_t blkaddr;
659

J
Jaegeuk Kim 已提交
660 661 662 663 664
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
665
	end = pgofs + maxblocks;
666

667
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
668 669 670
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
671
		goto out;
672
	}
673

C
Chao Yu 已提交
674
next_dnode:
675
	if (create)
676
		f2fs_lock_op(sbi);
677 678 679

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
680
	err = get_dnode_of_data(&dn, pgofs, mode);
681
	if (err) {
C
Chao Yu 已提交
682 683
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
684
		if (err == -ENOENT) {
685
			err = 0;
686 687 688 689
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
690
		goto unlock_out;
691
	}
C
Chao Yu 已提交
692

693 694
	prealloc = 0;
	ofs_in_node = dn.ofs_in_node;
695
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
696 697 698 699 700

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
701
		if (create) {
702 703
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
704
				goto sync_out;
705
			}
706
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
707 708 709 710
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
711 712
			} else {
				err = __allocate_data_block(&dn);
713
				if (!err) {
714
					set_inode_flag(inode, FI_APPEND_WRITE);
715 716
					allocated = true;
				}
717
			}
C
Chao Yu 已提交
718
			if (err)
C
Chao Yu 已提交
719
				goto sync_out;
C
Chao Yu 已提交
720
			map->m_flags = F2FS_MAP_NEW;
C
Chao Yu 已提交
721
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
722
		} else {
C
Chao Yu 已提交
723 724 725 726
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
727 728 729 730 731
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
732
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
733
						blkaddr != NEW_ADDR)
C
Chao Yu 已提交
734
				goto sync_out;
C
Chao Yu 已提交
735 736
		}
	}
737

738 739 740
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
741 742 743 744 745 746 747 748 749 750
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
751
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
752
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
753 754 755 756 757
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
758

759
skip:
760 761 762
	dn.ofs_in_node++;
	pgofs++;

763 764 765
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
766

767 768 769 770
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
771

772 773 774 775
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
776
		}
777 778 779 780 781 782 783 784 785 786 787 788 789
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

	f2fs_put_dnode(&dn);

	if (create) {
		f2fs_unlock_op(sbi);
		f2fs_balance_fs(sbi, allocated);
790
	}
791 792
	allocated = false;
	goto next_dnode;
793

794
sync_out:
795
	f2fs_put_dnode(&dn);
796
unlock_out:
797
	if (create) {
798
		f2fs_unlock_op(sbi);
799
		f2fs_balance_fs(sbi, allocated);
800
	}
801
out:
J
Jaegeuk Kim 已提交
802
	trace_f2fs_map_blocks(inode, map, err);
803
	return err;
804 805
}

J
Jaegeuk Kim 已提交
806
static int __get_data_block(struct inode *inode, sector_t iblock,
807 808
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
809 810 811 812 813 814
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
815
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
816

C
Chao Yu 已提交
817
	ret = f2fs_map_blocks(inode, &map, create, flag);
J
Jaegeuk Kim 已提交
818 819 820 821 822 823 824 825
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

826
static int get_data_block(struct inode *inode, sector_t iblock,
827 828
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
829
{
830 831
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
832 833 834
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
835 836
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
837
	return __get_data_block(inode, iblock, bh_result, create,
838
						F2FS_GET_BLOCK_DIO, NULL);
839 840
}

C
Chao Yu 已提交
841
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
842 843
			struct buffer_head *bh_result, int create)
{
844
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
845
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
846 847
		return -EFBIG;

C
Chao Yu 已提交
848
	return __get_data_block(inode, iblock, bh_result, create,
849
						F2FS_GET_BLOCK_BMAP, NULL);
850 851
}

852 853 854 855 856 857 858 859 860 861
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
862 863 864
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
865 866
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
867
	pgoff_t next_pgofs;
868
	loff_t isize;
869 870 871 872 873 874 875 876
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
877 878 879 880 881 882
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
883
	inode_lock(inode);
884 885

	isize = i_size_read(inode);
886 887
	if (start >= isize)
		goto out;
888

889 890
	if (start + len > isize)
		len = isize - start;
891 892 893 894 895 896

	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
897

898 899 900 901
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
902
	ret = get_data_block(inode, start_blk, &map_bh, 0,
903
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
904 905 906 907 908
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
909
		start_blk = next_pgofs;
910
		/* Go through holes util pass the EOF */
911
		if (blk_to_logical(inode, start_blk) < isize)
912 913 914 915 916 917 918
			goto prep_next;
		/* Found a hole beyond isize means no more extents.
		 * Note that the premise is that filesystems don't
		 * punch holes beyond isize and keep size unchanged.
		 */
		flags |= FIEMAP_EXTENT_LAST;
	}
919

920 921 922 923
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

924 925
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
926
	}
927

928 929
	if (start_blk > last_blk || ret)
		goto out;
930

931 932 933 934 935 936
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
937

938
	start_blk += logical_to_blk(inode, size);
939

940
prep_next:
941 942 943 944 945 946 947 948 949
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
950
	inode_unlock(inode);
951
	return ret;
J
Jaegeuk Kim 已提交
952 953
}

J
Jaegeuk Kim 已提交
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
979
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1017
			if (f2fs_map_blocks(inode, &map, 0,
1018
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1031
			zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
1043
			__submit_bio(F2FS_I_SB(inode), READ, bio);
J
Jaegeuk Kim 已提交
1044 1045 1046
			bio = NULL;
		}
		if (bio == NULL) {
1047
			struct fscrypt_ctx *ctx = NULL;
1048 1049 1050 1051

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {

1052
				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
1053 1054 1055 1056
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
1057 1058
				f2fs_wait_on_encrypted_page_writeback(
						F2FS_I_SB(inode), block_nr);
1059 1060
			}

J
Jaegeuk Kim 已提交
1061
			bio = bio_alloc(GFP_KERNEL,
1062
				min_t(int, nr_pages, BIO_MAX_PAGES));
1063 1064
			if (!bio) {
				if (ctx)
1065
					fscrypt_release_ctx(ctx);
J
Jaegeuk Kim 已提交
1066
				goto set_error_page;
1067
			}
J
Jaegeuk Kim 已提交
1068 1069
			bio->bi_bdev = bdev;
			bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1070
			bio->bi_end_io = f2fs_read_end_io;
1071
			bio->bi_private = ctx;
J
Jaegeuk Kim 已提交
1072 1073 1074 1075 1076 1077 1078 1079 1080
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1081
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1082 1083 1084 1085
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1086
			__submit_bio(F2FS_I_SB(inode), READ, bio);
J
Jaegeuk Kim 已提交
1087 1088 1089 1090 1091
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1092
			put_page(page);
J
Jaegeuk Kim 已提交
1093 1094 1095
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1096
		__submit_bio(F2FS_I_SB(inode), READ, bio);
J
Jaegeuk Kim 已提交
1097 1098 1099
	return 0;
}

1100 1101
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1102
	struct inode *inode = page->mapping->host;
1103
	int ret = -EAGAIN;
H
Huajun Li 已提交
1104

1105 1106
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1107
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1108 1109
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1110
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1111
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1112
	return ret;
1113 1114 1115 1116 1117 1118
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1119
	struct inode *inode = file->f_mapping->host;
1120 1121 1122
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1123 1124 1125 1126 1127

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1128
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1129 1130
}

1131
int do_write_data_page(struct f2fs_io_info *fio)
1132
{
1133
	struct page *page = fio->page;
1134 1135 1136 1137 1138
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1139
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1140 1141 1142
	if (err)
		return err;

1143
	fio->old_blkaddr = dn.data_blkaddr;
1144 1145

	/* This page is already truncated */
1146
	if (fio->old_blkaddr == NULL_ADDR) {
1147
		ClearPageUptodate(page);
1148
		goto out_writepage;
1149
	}
1150

1151
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1152
		gfp_t gfp_flags = GFP_NOFS;
1153 1154 1155

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
1156
							fio->old_blkaddr);
1157 1158 1159
retry_encrypt:
		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
								gfp_flags);
1160 1161
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
1162 1163 1164 1165 1166 1167 1168 1169
			if (err == -ENOMEM) {
				/* flush pending ios and wait for a while */
				f2fs_flush_merged_bios(F2FS_I_SB(inode));
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				gfp_flags |= __GFP_NOFAIL;
				err = 0;
				goto retry_encrypt;
			}
1170 1171 1172 1173
			goto out_writepage;
		}
	}

1174 1175 1176 1177 1178 1179
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1180
	if (unlikely(fio->old_blkaddr != NEW_ADDR &&
1181
			!is_cold_data(page) &&
1182
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1183
			need_inplace_update(inode))) {
1184
		rewrite_data_page(fio);
1185
		set_inode_flag(inode, FI_UPDATE_WRITE);
1186
		trace_f2fs_do_write_data_page(page, IPU);
1187
	} else {
1188
		write_data_page(&dn, fio);
1189
		trace_f2fs_do_write_data_page(page, OPU);
1190
		set_inode_flag(inode, FI_APPEND_WRITE);
1191
		if (page->index == 0)
1192
			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1203
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1204 1205
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1206
							>> PAGE_SHIFT;
1207
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
H
Huajun Li 已提交
1208
	unsigned offset = 0;
1209
	bool need_balance_fs = false;
1210
	int err = 0;
J
Jaegeuk Kim 已提交
1211
	struct f2fs_io_info fio = {
1212
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1213
		.type = DATA,
C
Chris Fries 已提交
1214
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1215
		.page = page,
1216
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1217
	};
1218

1219 1220
	trace_f2fs_writepage(page, DATA);

1221
	if (page->index < end_index)
1222
		goto write;
1223 1224 1225 1226 1227

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1228
	offset = i_size & (PAGE_SIZE - 1);
1229
	if ((page->index >= end_index + 1) || !offset)
1230
		goto out;
1231

1232
	zero_user_segment(page, offset, PAGE_SIZE);
1233
write:
1234
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1235
		goto redirty_out;
1236 1237
	if (f2fs_is_drop_cache(inode))
		goto out;
1238 1239 1240 1241
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1242
		goto redirty_out;
1243

1244
	/* Dentry blocks are controlled by checkpoint */
1245
	if (S_ISDIR(inode->i_mode)) {
1246 1247
		if (unlikely(f2fs_cp_error(sbi)))
			goto redirty_out;
1248
		err = do_write_data_page(&fio);
1249 1250
		goto done;
	}
H
Huajun Li 已提交
1251

1252 1253 1254
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		SetPageError(page);
1255
		goto out;
1256 1257
	}

1258
	if (!wbc->for_reclaim)
1259
		need_balance_fs = true;
1260
	else if (has_not_enough_free_secs(sbi, 0))
1261
		goto redirty_out;
1262

1263
	err = -EAGAIN;
1264
	f2fs_lock_op(sbi);
1265 1266 1267
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1268
		err = do_write_data_page(&fio);
1269 1270
	if (F2FS_I(inode)->last_disk_size < psize)
		F2FS_I(inode)->last_disk_size = psize;
1271 1272 1273 1274
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1275 1276

	clear_cold_data(page);
1277
out:
1278
	inode_dec_dirty_pages(inode);
1279 1280
	if (err)
		ClearPageUptodate(page);
1281 1282 1283 1284 1285 1286

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
		remove_dirty_inode(inode);
	}

1287
	unlock_page(page);
J
Jaegeuk Kim 已提交
1288
	f2fs_balance_fs(sbi, need_balance_fs);
1289 1290

	if (unlikely(f2fs_cp_error(sbi)))
1291
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1292

1293 1294 1295
	return 0;

redirty_out:
1296
	redirty_page_for_writepage(wbc, page);
1297
	return AOP_WRITEPAGE_ACTIVATE;
1298 1299
}

1300 1301 1302 1303 1304 1305 1306 1307 1308
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

C
Chao Yu 已提交
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
			struct writeback_control *wbc, writepage_t writepage,
			void *data)
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	int step = 0;

	pagevec_init(&pvec, 0);
next:
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1342 1343
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

1387
			if (step == is_cold_data(page))
C
Chao Yu 已提交
1388 1389 1390 1391
				goto continue_unlock;

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1392 1393
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			ret = (*writepage)(page, wbc, data);
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					done_index = page->index + 1;
					done = 1;
					break;
				}
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (step < 1) {
		step++;
		goto next;
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

	return ret;
}

1441
static int f2fs_write_data_pages(struct address_space *mapping,
1442 1443 1444
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1445
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1446
	int ret;
1447
	long diff;
1448

P
P J P 已提交
1449 1450 1451 1452
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1453 1454 1455 1456
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1457 1458 1459 1460 1461
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1462
	/* skip writing during file defragment */
1463
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
C
Chao Yu 已提交
1464 1465
		goto skip_write;

1466 1467 1468 1469
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

Y
Yunlei He 已提交
1470 1471
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1472
	diff = nr_pages_to_write(sbi, DATA, wbc);
1473

C
Chao Yu 已提交
1474
	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1475
	f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
J
Jaegeuk Kim 已提交
1476

1477
	remove_dirty_inode(inode);
1478

1479
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1480
	return ret;
1481 1482

skip_write:
1483
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1484
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1485
	return 0;
1486 1487
}

1488 1489 1490
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1491
	loff_t i_size = i_size_read(inode);
1492

J
Jaegeuk Kim 已提交
1493 1494 1495
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1496 1497 1498
	}
}

1499 1500 1501 1502 1503 1504 1505 1506
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1507 1508
	bool locked = false;
	struct extent_info ei;
1509 1510
	int err = 0;

1511 1512 1513 1514 1515
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
	if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
1516
					len == PAGE_SIZE)
1517 1518
		return 0;

1519
	if (f2fs_has_inline_data(inode) ||
1520
			(pos & PAGE_MASK) >= i_size_read(inode)) {
1521 1522 1523 1524
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
1537
			set_inode_flag(inode, FI_DATA_EXIST);
1538 1539
			if (inode->i_nlink)
				set_inline_node(ipage);
1540 1541 1542
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1555
			if (err || dn.data_blkaddr == NULL_ADDR) {
1556 1557 1558 1559 1560
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1561 1562
		}
	}
1563

1564 1565 1566
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1567
out:
1568 1569
	f2fs_put_dnode(&dn);
unlock_out:
1570 1571
	if (locked)
		f2fs_unlock_op(sbi);
1572 1573 1574
	return err;
}

1575 1576 1577 1578 1579
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1580
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1581
	struct page *page = NULL;
1582
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1583 1584
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1585 1586
	int err = 0;

1587 1588
	trace_f2fs_write_begin(inode, pos, len, flags);

1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1599
repeat:
1600
	page = grab_cache_page_write_begin(mapping, index, flags);
1601 1602 1603 1604
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1605

1606 1607
	*pagep = page;

1608 1609
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1610
	if (err)
1611
		goto fail;
1612

1613
	if (need_balance && has_not_enough_free_secs(sbi, 0)) {
1614
		unlock_page(page);
J
Jaegeuk Kim 已提交
1615
		f2fs_balance_fs(sbi, true);
1616 1617 1618 1619 1620 1621 1622 1623
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1624
	f2fs_wait_on_page_writeback(page, DATA, false);
1625

1626 1627
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1628
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1629

1630
	if (len == PAGE_SIZE)
C
Chao Yu 已提交
1631 1632 1633
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;
1634

1635 1636
	if ((pos & PAGE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_SIZE - 1);
1637 1638 1639
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
1640
		zero_user_segments(page, 0, start, end, PAGE_SIZE);
C
Chao Yu 已提交
1641
		goto out_update;
1642 1643
	}

1644
	if (blkaddr == NEW_ADDR) {
1645
		zero_user_segment(page, 0, PAGE_SIZE);
1646
	} else {
1647
		struct f2fs_io_info fio = {
1648
			.sbi = sbi,
1649 1650
			.type = DATA,
			.rw = READ_SYNC,
1651 1652
			.old_blkaddr = blkaddr,
			.new_blkaddr = blkaddr,
1653
			.page = page,
1654
			.encrypted_page = NULL,
1655
		};
1656
		err = f2fs_submit_page_bio(&fio);
1657 1658
		if (err)
			goto fail;
1659

1660
		lock_page(page);
1661
		if (unlikely(!PageUptodate(page))) {
1662 1663
			err = -EIO;
			goto fail;
1664
		}
1665
		if (unlikely(page->mapping != mapping)) {
1666 1667
			f2fs_put_page(page, 1);
			goto repeat;
1668
		}
1669 1670 1671

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1672
			err = fscrypt_decrypt_page(page);
1673
			if (err)
1674 1675
				goto fail;
		}
1676
	}
C
Chao Yu 已提交
1677
out_update:
1678
	SetPageUptodate(page);
C
Chao Yu 已提交
1679
out_clear:
1680 1681
	clear_cold_data(page);
	return 0;
1682

1683
fail:
1684
	f2fs_put_page(page, 1);
1685 1686
	f2fs_write_failed(mapping, pos + len);
	return err;
1687 1688
}

1689 1690 1691 1692 1693 1694 1695
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1696 1697
	trace_f2fs_write_end(inode, pos, len, copied);

1698
	set_page_dirty(page);
1699

1700 1701
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
1702

1703
	f2fs_put_page(page, 1);
1704
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1705 1706 1707
	return copied;
}

1708 1709
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1710 1711 1712 1713 1714 1715
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1716 1717 1718
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1719 1720 1721
	return 0;
}

1722
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1723
{
1724
	struct address_space *mapping = iocb->ki_filp->f_mapping;
1725 1726
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
1727
	loff_t offset = iocb->ki_pos;
1728
	int err;
1729

1730
	err = check_direct_IO(inode, iter, offset);
1731 1732
	if (err)
		return err;
H
Huajun Li 已提交
1733

1734 1735 1736
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;

1737
	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
1738

1739
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
1740 1741
	if (iov_iter_rw(iter) == WRITE) {
		if (err > 0)
1742
			set_inode_flag(inode, FI_UPDATE_WRITE);
1743 1744 1745
		else if (err < 0)
			f2fs_write_failed(mapping, offset + count);
	}
1746

1747
	trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
1748

1749
	return err;
1750 1751
}

1752 1753
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1754 1755
{
	struct inode *inode = page->mapping->host;
1756
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1757

1758
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1759
		(offset % PAGE_SIZE || length != PAGE_SIZE))
1760 1761
		return;

1762 1763 1764 1765 1766 1767 1768 1769
	if (PageDirty(page)) {
		if (inode->i_ino == F2FS_META_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_META);
		else if (inode->i_ino == F2FS_NODE_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_NODES);
		else
			inode_dec_dirty_pages(inode);
	}
C
Chao Yu 已提交
1770 1771 1772 1773 1774

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1775
	set_page_private(page, 0);
1776 1777 1778
	ClearPagePrivate(page);
}

1779
int f2fs_release_page(struct page *page, gfp_t wait)
1780
{
1781 1782 1783 1784
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1785 1786 1787 1788
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1789
	set_page_private(page, 0);
1790
	ClearPagePrivate(page);
1791
	return 1;
1792 1793 1794 1795 1796 1797 1798
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1799 1800
	trace_f2fs_set_page_dirty(page, DATA);

1801
	SetPageUptodate(page);
1802

1803
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1804 1805 1806 1807 1808 1809 1810 1811 1812
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1813 1814
	}

1815 1816
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
1817
		update_dirty_page(inode, page);
1818 1819 1820 1821 1822
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1823 1824
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1825 1826
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1827 1828 1829 1830 1831 1832 1833
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1834
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1835 1836
}

1837 1838 1839 1840 1841 1842
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1843
	.write_end	= f2fs_write_end,
1844
	.set_page_dirty	= f2fs_set_data_page_dirty,
1845 1846
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1847
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1848
	.bmap		= f2fs_bmap,
1849
};