data.c 43.4 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
J
Jaegeuk Kim 已提交
22
#include <linux/cleancache.h>
23 24 25 26

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
27
#include "trace.h"
28
#include <trace/events/f2fs.h>
29

30
static void f2fs_read_end_io(struct bio *bio)
31
{
32 33
	struct bio_vec *bvec;
	int i;
34

35
	if (f2fs_bio_encrypted(bio)) {
36
		if (bio->bi_error) {
37
			fscrypt_release_ctx(bio->bi_private);
38
		} else {
39
			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
40 41 42 43
			return;
		}
	}

44 45
	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
J
Jaegeuk Kim 已提交
46

47
		if (!bio->bi_error) {
J
Jaegeuk Kim 已提交
48 49 50 51 52 53 54 55 56 57
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}
	bio_put(bio);
}

58
static void f2fs_write_end_io(struct bio *bio)
59
{
60
	struct f2fs_sb_info *sbi = bio->bi_private;
61 62
	struct bio_vec *bvec;
	int i;
63

64
	bio_for_each_segment_all(bvec, bio, i) {
65 66
		struct page *page = bvec->bv_page;

67
		fscrypt_pullback_bio_page(&page, true);
68

69
		if (unlikely(bio->bi_error)) {
70
			set_bit(AS_EIO, &page->mapping->flags);
71
			f2fs_stop_checkpoint(sbi);
72 73 74
		}
		end_page_writeback(page);
		dec_page_count(sbi, F2FS_WRITEBACK);
75
	}
76

77
	if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait))
78 79 80 81 82
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

83 84 85 86 87 88 89 90
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

J
Jaegeuk Kim 已提交
91
	bio = f2fs_bio_alloc(npages);
92 93

	bio->bi_bdev = sbi->sb->s_bdev;
C
Chao Yu 已提交
94
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
95
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
96
	bio->bi_private = is_read ? NULL : sbi;
97 98 99 100

	return bio;
}

J
Jaegeuk Kim 已提交
101
static void __submit_merged_bio(struct f2fs_bio_info *io)
102
{
J
Jaegeuk Kim 已提交
103
	struct f2fs_io_info *fio = &io->fio;
104 105 106 107

	if (!io->bio)
		return;

108
	if (is_read_io(fio->rw))
109
		trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
110
	else
111
		trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
112

113
	submit_bio(fio->rw, io->bio);
114 115 116
	io->bio = NULL;
}

117 118
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
						struct page *page, nid_t ino)
C
Chao Yu 已提交
119 120 121 122 123
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

124
	if (!io->bio)
C
Chao Yu 已提交
125
		return false;
126 127 128

	if (!inode && !page && !ino)
		return true;
C
Chao Yu 已提交
129 130 131

	bio_for_each_segment_all(bvec, io->bio, i) {

132
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
133
			target = bvec->bv_page;
134 135
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
136

137 138 139 140 141
		if (inode && inode == target->mapping->host)
			return true;
		if (page && page == target)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
142 143 144 145 146 147
			return true;
	}

	return false;
}

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io = &sbi->write_io[btype];
	bool ret;

	down_read(&io->io_rwsem);
	ret = __has_merged_page(io, inode, page, ino);
	up_read(&io->io_rwsem);
	return ret;
}

static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
165 166 167 168 169 170
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

171
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
172

173 174 175
	if (!__has_merged_page(io, inode, page, ino))
		goto out;

J
Jaegeuk Kim 已提交
176 177 178
	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
J
Jaegeuk Kim 已提交
179 180 181 182
		if (test_opt(sbi, NOBARRIER))
			io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
		else
			io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
183 184
	}
	__submit_merged_bio(io);
185
out:
186
	up_write(&io->io_rwsem);
187 188
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
									int rw)
{
	__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}

void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, struct page *page,
				nid_t ino, enum page_type type, int rw)
{
	if (has_merged_page(sbi, inode, page, ino, type))
		__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}

203 204 205 206 207 208 209
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, WRITE);
	f2fs_submit_merged_bio(sbi, META, WRITE);
}

210 211 212 213
/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
214
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
215 216
{
	struct bio *bio;
217 218
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
219

220
	trace_f2fs_submit_page_bio(page, fio);
221
	f2fs_trace_ios(fio, 0);
222 223

	/* Allocate a new bio */
224
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
225

226
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
227 228 229 230
		bio_put(bio);
		return -EFAULT;
	}

231
	submit_bio(fio->rw, bio);
232 233 234
	return 0;
}

235
void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
236
{
237
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
238
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
239
	struct f2fs_bio_info *io;
240
	bool is_read = is_read_io(fio->rw);
241
	struct page *bio_page;
242

243
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
244

245 246 247
	if (fio->old_blkaddr != NEW_ADDR)
		verify_block_addr(sbi, fio->old_blkaddr);
	verify_block_addr(sbi, fio->new_blkaddr);
248

249
	down_write(&io->io_rwsem);
250

251
	if (!is_read)
252 253
		inc_page_count(sbi, F2FS_WRITEBACK);

254
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
255 256
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
257 258
alloc_new:
	if (io->bio == NULL) {
J
Jaegeuk Kim 已提交
259
		int bio_blocks = MAX_BIO_BLOCKS(sbi);
260

261 262
		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
						bio_blocks, is_read);
J
Jaegeuk Kim 已提交
263
		io->fio = *fio;
264 265
	}

266 267
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

268 269
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
							PAGE_SIZE) {
J
Jaegeuk Kim 已提交
270
		__submit_merged_bio(io);
271 272 273
		goto alloc_new;
	}

274
	io->last_block_in_bio = fio->new_blkaddr;
275
	f2fs_trace_ios(fio, 0);
276

277
	up_write(&io->io_rwsem);
278
	trace_f2fs_submit_page_mbio(fio->page, fio);
279 280
}

281 282 283 284 285 286 287 288 289 290
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}

J
Jaegeuk Kim 已提交
291
/*
292 293 294 295 296
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
297
void set_data_blkaddr(struct dnode_of_data *dn)
298
{
299 300 301
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
302
		dn->node_changed = true;
303 304
}

305 306 307 308 309 310 311
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

312 313
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
314
{
315
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
316

317 318 319
	if (!count)
		return 0;

320
	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
321
		return -EPERM;
322
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
323 324
		return -ENOSPC;

325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr =
			datablock_addr(dn->node_page, dn->ofs_in_node);
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
342

343
	mark_inode_dirty(dn->inode);
344 345 346 347
	sync_inode_page(dn);
	return 0;
}

348 349 350 351 352 353 354 355 356 357 358
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

359 360 361 362 363 364 365 366
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
367

368 369
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
370
	if (err || need_put)
371 372 373 374
		f2fs_put_dnode(dn);
	return err;
}

375
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
376
{
377
	struct extent_info ei;
378
	struct inode *inode = dn->inode;
379

380 381 382
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
383
	}
384

385
	return f2fs_reserve_block(dn, index);
386 387
}

388 389
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
						int rw, bool for_write)
390 391 392 393
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
C
Chao Yu 已提交
394
	struct extent_info ei;
395
	int err;
396
	struct f2fs_io_info fio = {
397
		.sbi = F2FS_I_SB(inode),
398
		.type = DATA,
399
		.rw = rw,
400
		.encrypted_page = NULL,
401
	};
402

403 404 405
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

406
	page = f2fs_grab_cache_page(mapping, index, for_write);
407 408 409
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
410 411 412 413 414
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

415
	set_new_dnode(&dn, inode, NULL, NULL, 0);
416
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
417 418
	if (err)
		goto put_err;
419 420
	f2fs_put_dnode(&dn);

421
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
422 423
		err = -ENOENT;
		goto put_err;
424
	}
C
Chao Yu 已提交
425
got_it:
426 427
	if (PageUptodate(page)) {
		unlock_page(page);
428
		return page;
429
	}
430

J
Jaegeuk Kim 已提交
431 432 433 434 435 436 437
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
438
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
439
		SetPageUptodate(page);
440
		unlock_page(page);
J
Jaegeuk Kim 已提交
441 442
		return page;
	}
443

444
	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
445 446
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
447
	if (err)
448
		goto put_err;
449
	return page;
450 451 452 453

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
454 455 456 457 458 459 460 461 462 463 464 465
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

466
	page = get_read_data_page(inode, index, READ_SYNC, false);
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
486 487
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
488 489 490 491
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
492
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
493 494
	if (IS_ERR(page))
		return page;
495

496
	/* wait for read completion */
497
	lock_page(page);
498
	if (unlikely(!PageUptodate(page))) {
499 500
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
501
	}
502
	if (unlikely(page->mapping != mapping)) {
503 504
		f2fs_put_page(page, 1);
		goto repeat;
505 506 507 508
	}
	return page;
}

J
Jaegeuk Kim 已提交
509
/*
510 511
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
512
 *
C
Chao Yu 已提交
513 514
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
515 516
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
517
 */
518
struct page *get_new_data_page(struct inode *inode,
519
		struct page *ipage, pgoff_t index, bool new_i_size)
520 521 522 523 524
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
525

526
	page = f2fs_grab_cache_page(mapping, index, true);
527 528 529 530 531 532
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
533
		return ERR_PTR(-ENOMEM);
534
	}
535

536
	set_new_dnode(&dn, inode, ipage, NULL, 0);
537
	err = f2fs_reserve_block(&dn, index);
538 539
	if (err) {
		f2fs_put_page(page, 1);
540
		return ERR_PTR(err);
541
	}
542 543
	if (!ipage)
		f2fs_put_dnode(&dn);
544 545

	if (PageUptodate(page))
546
		goto got_it;
547 548

	if (dn.data_blkaddr == NEW_ADDR) {
549
		zero_user_segment(page, 0, PAGE_SIZE);
550
		SetPageUptodate(page);
551
	} else {
552
		f2fs_put_page(page, 1);
553

554 555 556
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
557
		if (IS_ERR(page))
558
			return page;
559
	}
560
got_it:
C
Chao Yu 已提交
561
	if (new_i_size && i_size_read(inode) <
562 563
				((loff_t)(index + 1) << PAGE_SHIFT)) {
		i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
564 565
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
566 567 568 569
	}
	return page;
}

570 571
static int __allocate_data_block(struct dnode_of_data *dn)
{
572
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
573 574
	struct f2fs_summary sum;
	struct node_info ni;
575
	int seg = CURSEG_WARM_DATA;
576
	pgoff_t fofs;
577
	blkcnt_t count = 1;
578 579 580

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;
581 582 583 584 585

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

586
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
587 588
		return -ENOSPC;

589
alloc:
590 591 592
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

593 594 595
	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

596 597
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
598
	set_data_blkaddr(dn);
599

600
	/* update i_size */
601
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
602
							dn->ofs_in_node;
603
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
C
Chao Yu 已提交
604
		i_size_write(dn->inode,
605
				((loff_t)(fofs + 1) << PAGE_SHIFT));
606 607 608
	return 0;
}

609
ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
610
{
611
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
612
	struct f2fs_map_blocks map;
613
	ssize_t ret = 0;
614

615 616
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
	map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
617
	map.m_next_pgofs = NULL;
618

619 620 621 622 623 624 625 626 627 628
	if (f2fs_encrypted_inode(inode))
		return 0;

	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
629 630 631 632
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}
633 634
	if (!f2fs_has_inline_data(inode))
		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
635
	return ret;
636 637
}

J
Jaegeuk Kim 已提交
638
/*
J
Jaegeuk Kim 已提交
639 640
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
641 642 643 644 645
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
646
 */
C
Chao Yu 已提交
647
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
648
						int create, int flag)
649
{
J
Jaegeuk Kim 已提交
650
	unsigned int maxblocks = map->m_len;
651
	struct dnode_of_data dn;
652
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
653
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
654
	pgoff_t pgofs, end_offset, end;
655
	int err = 0, ofs = 1;
656 657
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
658
	struct extent_info ei;
659
	bool allocated = false;
660
	block_t blkaddr;
661

J
Jaegeuk Kim 已提交
662 663 664 665 666
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
667
	end = pgofs + maxblocks;
668

669
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
670 671 672
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
673
		goto out;
674
	}
675

C
Chao Yu 已提交
676
next_dnode:
677
	if (create)
678
		f2fs_lock_op(sbi);
679 680 681

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
682
	err = get_dnode_of_data(&dn, pgofs, mode);
683
	if (err) {
C
Chao Yu 已提交
684 685
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
686
		if (err == -ENOENT) {
687
			err = 0;
688 689 690 691
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
		}
692
		goto unlock_out;
693
	}
C
Chao Yu 已提交
694

695 696
	prealloc = 0;
	ofs_in_node = dn.ofs_in_node;
697
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
698 699 700 701 702

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
703
		if (create) {
704 705
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
706
				goto sync_out;
707
			}
708
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
709 710 711 712
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
713 714
			} else {
				err = __allocate_data_block(&dn);
715
				if (!err) {
716 717
					set_inode_flag(F2FS_I(inode),
							FI_APPEND_WRITE);
718 719
					allocated = true;
				}
720
			}
C
Chao Yu 已提交
721
			if (err)
C
Chao Yu 已提交
722
				goto sync_out;
C
Chao Yu 已提交
723
			map->m_flags = F2FS_MAP_NEW;
C
Chao Yu 已提交
724
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
725
		} else {
C
Chao Yu 已提交
726 727 728 729
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
730 731 732 733 734
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
			}
C
Chao Yu 已提交
735
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
C
Chao Yu 已提交
736
						blkaddr != NEW_ADDR)
C
Chao Yu 已提交
737
				goto sync_out;
C
Chao Yu 已提交
738 739
		}
	}
740

741 742 743
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
744 745 746 747 748 749 750 751 752 753
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
754
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
755
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
756 757 758 759 760
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
761

762
skip:
763 764 765
	dn.ofs_in_node++;
	pgofs++;

766 767 768
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
769

770 771 772 773
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
774

775 776 777 778
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
779
		}
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

	if (allocated)
		sync_inode_page(&dn);
	f2fs_put_dnode(&dn);

	if (create) {
		f2fs_unlock_op(sbi);
		f2fs_balance_fs(sbi, allocated);
795
	}
796 797
	allocated = false;
	goto next_dnode;
798

799 800 801
sync_out:
	if (allocated)
		sync_inode_page(&dn);
802
	f2fs_put_dnode(&dn);
803
unlock_out:
804
	if (create) {
805
		f2fs_unlock_op(sbi);
806
		f2fs_balance_fs(sbi, allocated);
807
	}
808
out:
J
Jaegeuk Kim 已提交
809
	trace_f2fs_map_blocks(inode, map, err);
810
	return err;
811 812
}

J
Jaegeuk Kim 已提交
813
static int __get_data_block(struct inode *inode, sector_t iblock,
814 815
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs)
J
Jaegeuk Kim 已提交
816 817 818 819 820 821
{
	struct f2fs_map_blocks map;
	int ret;

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
822
	map.m_next_pgofs = next_pgofs;
J
Jaegeuk Kim 已提交
823

C
Chao Yu 已提交
824
	ret = f2fs_map_blocks(inode, &map, create, flag);
J
Jaegeuk Kim 已提交
825 826 827 828 829 830 831 832
	if (!ret) {
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
		bh->b_size = map.m_len << inode->i_blkbits;
	}
	return ret;
}

833
static int get_data_block(struct inode *inode, sector_t iblock,
834 835
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
836
{
837 838
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs);
C
Chao Yu 已提交
839 840 841
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
842 843
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
844
	return __get_data_block(inode, iblock, bh_result, create,
845
						F2FS_GET_BLOCK_DIO, NULL);
846 847
}

C
Chao Yu 已提交
848
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
849 850
			struct buffer_head *bh_result, int create)
{
851
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
852
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
853 854
		return -EFBIG;

C
Chao Yu 已提交
855
	return __get_data_block(inode, iblock, bh_result, create,
856
						F2FS_GET_BLOCK_BMAP, NULL);
857 858
}

859 860 861 862 863 864 865 866 867 868
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

J
Jaegeuk Kim 已提交
869 870 871
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
872 873
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
874
	pgoff_t next_pgofs;
875
	loff_t isize;
876 877 878 879 880 881 882 883
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

J
Jaegeuk Kim 已提交
884 885 886 887 888 889
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

A
Al Viro 已提交
890
	inode_lock(inode);
891 892

	isize = i_size_read(inode);
893 894
	if (start >= isize)
		goto out;
895

896 897
	if (start + len > isize)
		len = isize - start;
898 899 900 901 902 903

	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
904

905 906 907 908
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
909
	ret = get_data_block(inode, start_blk, &map_bh, 0,
910
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
911 912 913 914 915
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
916
		start_blk = next_pgofs;
917
		/* Go through holes util pass the EOF */
918
		if (blk_to_logical(inode, start_blk) < isize)
919 920 921 922 923 924 925
			goto prep_next;
		/* Found a hole beyond isize means no more extents.
		 * Note that the premise is that filesystems don't
		 * punch holes beyond isize and keep size unchanged.
		 */
		flags |= FIEMAP_EXTENT_LAST;
	}
926

927 928 929 930
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

931 932
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
933
	}
934

935 936
	if (start_blk > last_blk || ret)
		goto out;
937

938 939 940 941 942 943
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
944

945
	start_blk += logical_to_blk(inode, size);
946

947
prep_next:
948 949 950 951 952 953 954 955 956
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
957
	inode_unlock(inode);
958
	return ret;
J
Jaegeuk Kim 已提交
959 960
}

J
Jaegeuk Kim 已提交
961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
986
	map.m_next_pgofs = NULL;
J
Jaegeuk Kim 已提交
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1024
			if (f2fs_map_blocks(inode, &map, 0,
1025
						F2FS_GET_BLOCK_READ))
J
Jaegeuk Kim 已提交
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1038
			zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
			submit_bio(READ, bio);
			bio = NULL;
		}
		if (bio == NULL) {
1054
			struct fscrypt_ctx *ctx = NULL;
1055 1056 1057 1058

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {

1059
				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
1060 1061 1062 1063
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
1064 1065
				f2fs_wait_on_encrypted_page_writeback(
						F2FS_I_SB(inode), block_nr);
1066 1067
			}

J
Jaegeuk Kim 已提交
1068
			bio = bio_alloc(GFP_KERNEL,
1069
				min_t(int, nr_pages, BIO_MAX_PAGES));
1070 1071
			if (!bio) {
				if (ctx)
1072
					fscrypt_release_ctx(ctx);
J
Jaegeuk Kim 已提交
1073
				goto set_error_page;
1074
			}
J
Jaegeuk Kim 已提交
1075 1076
			bio->bi_bdev = bdev;
			bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1077
			bio->bi_end_io = f2fs_read_end_io;
1078
			bio->bi_private = ctx;
J
Jaegeuk Kim 已提交
1079 1080 1081 1082 1083 1084 1085 1086 1087
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1088
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
			submit_bio(READ, bio);
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1099
			put_page(page);
J
Jaegeuk Kim 已提交
1100 1101 1102 1103 1104 1105 1106
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(READ, bio);
	return 0;
}

1107 1108
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1109
	struct inode *inode = page->mapping->host;
1110
	int ret = -EAGAIN;
H
Huajun Li 已提交
1111

1112 1113
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1114
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1115 1116
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1117
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1118
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1119
	return ret;
1120 1121 1122 1123 1124 1125
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
1126
	struct inode *inode = file->f_mapping->host;
1127 1128 1129
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1130 1131 1132 1133 1134

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1135
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1136 1137
}

1138
int do_write_data_page(struct f2fs_io_info *fio)
1139
{
1140
	struct page *page = fio->page;
1141 1142 1143 1144 1145
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1146
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1147 1148 1149
	if (err)
		return err;

1150
	fio->old_blkaddr = dn.data_blkaddr;
1151 1152

	/* This page is already truncated */
1153
	if (fio->old_blkaddr == NULL_ADDR) {
1154
		ClearPageUptodate(page);
1155
		goto out_writepage;
1156
	}
1157

1158
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1159
		gfp_t gfp_flags = GFP_NOFS;
1160 1161 1162

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
1163
							fio->old_blkaddr);
1164 1165 1166
retry_encrypt:
		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
								gfp_flags);
1167 1168
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
1169 1170 1171 1172 1173 1174 1175 1176
			if (err == -ENOMEM) {
				/* flush pending ios and wait for a while */
				f2fs_flush_merged_bios(F2FS_I_SB(inode));
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				gfp_flags |= __GFP_NOFAIL;
				err = 0;
				goto retry_encrypt;
			}
1177 1178 1179 1180
			goto out_writepage;
		}
	}

1181 1182 1183 1184 1185 1186
	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1187
	if (unlikely(fio->old_blkaddr != NEW_ADDR &&
1188
			!is_cold_data(page) &&
1189
			!IS_ATOMIC_WRITTEN_PAGE(page) &&
1190
			need_inplace_update(inode))) {
1191
		rewrite_data_page(fio);
1192
		set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1193
		trace_f2fs_do_write_data_page(page, IPU);
1194
	} else {
1195
		write_data_page(&dn, fio);
1196
		trace_f2fs_do_write_data_page(page, OPU);
1197
		set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
1198 1199
		if (page->index == 0)
			set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
1210
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1211 1212
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1213
							>> PAGE_SHIFT;
H
Huajun Li 已提交
1214
	unsigned offset = 0;
1215
	bool need_balance_fs = false;
1216
	int err = 0;
J
Jaegeuk Kim 已提交
1217
	struct f2fs_io_info fio = {
1218
		.sbi = sbi,
J
Jaegeuk Kim 已提交
1219
		.type = DATA,
C
Chris Fries 已提交
1220
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1221
		.page = page,
1222
		.encrypted_page = NULL,
J
Jaegeuk Kim 已提交
1223
	};
1224

1225 1226
	trace_f2fs_writepage(page, DATA);

1227
	if (page->index < end_index)
1228
		goto write;
1229 1230 1231 1232 1233

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1234
	offset = i_size & (PAGE_SIZE - 1);
1235
	if ((page->index >= end_index + 1) || !offset)
1236
		goto out;
1237

1238
	zero_user_segment(page, offset, PAGE_SIZE);
1239
write:
1240
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1241
		goto redirty_out;
1242 1243
	if (f2fs_is_drop_cache(inode))
		goto out;
1244 1245 1246 1247
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1248
		goto redirty_out;
1249

1250
	/* Dentry blocks are controlled by checkpoint */
1251
	if (S_ISDIR(inode->i_mode)) {
1252 1253
		if (unlikely(f2fs_cp_error(sbi)))
			goto redirty_out;
1254
		err = do_write_data_page(&fio);
1255 1256
		goto done;
	}
H
Huajun Li 已提交
1257

1258 1259 1260
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		SetPageError(page);
1261
		goto out;
1262 1263
	}

1264
	if (!wbc->for_reclaim)
1265
		need_balance_fs = true;
1266
	else if (has_not_enough_free_secs(sbi, 0))
1267
		goto redirty_out;
1268

1269
	err = -EAGAIN;
1270
	f2fs_lock_op(sbi);
1271 1272 1273
	if (f2fs_has_inline_data(inode))
		err = f2fs_write_inline_data(inode, page);
	if (err == -EAGAIN)
1274
		err = do_write_data_page(&fio);
1275 1276 1277 1278
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1279 1280

	clear_cold_data(page);
1281
out:
1282
	inode_dec_dirty_pages(inode);
1283 1284
	if (err)
		ClearPageUptodate(page);
1285 1286 1287 1288 1289 1290

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
		remove_dirty_inode(inode);
	}

1291
	unlock_page(page);
J
Jaegeuk Kim 已提交
1292
	f2fs_balance_fs(sbi, need_balance_fs);
1293 1294

	if (unlikely(f2fs_cp_error(sbi)))
1295
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
1296

1297 1298 1299
	return 0;

redirty_out:
1300
	redirty_page_for_writepage(wbc, page);
1301
	return AOP_WRITEPAGE_ACTIVATE;
1302 1303
}

1304 1305 1306 1307 1308 1309 1310 1311 1312
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

C
Chao Yu 已提交
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
			struct writeback_control *wbc, writepage_t writepage,
			void *data)
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	int step = 0;

	pagevec_init(&pvec, 0);
next:
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1346 1347
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (page->index > end) {
				done = 1;
				break;
			}

			done_index = page->index;

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

1391
			if (step == is_cold_data(page))
C
Chao Yu 已提交
1392 1393 1394 1395
				goto continue_unlock;

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
1396 1397
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			ret = (*writepage)(page, wbc, data);
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					done_index = page->index + 1;
					done = 1;
					break;
				}
			}

			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (step < 1) {
		step++;
		goto next;
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

	return ret;
}

1445
static int f2fs_write_data_pages(struct address_space *mapping,
1446 1447 1448
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
1449
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1450
	bool locked = false;
1451
	int ret;
1452
	long diff;
1453

P
P J P 已提交
1454 1455 1456 1457
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

1458 1459 1460 1461
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

1462 1463 1464 1465 1466
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
1467 1468 1469 1470
	/* skip writing during file defragment */
	if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG))
		goto skip_write;

1471 1472 1473 1474
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

Y
Yunlei He 已提交
1475 1476
	trace_f2fs_writepages(mapping->host, wbc, DATA);

1477
	diff = nr_pages_to_write(sbi, DATA, wbc);
1478

1479
	if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) {
1480 1481 1482
		mutex_lock(&sbi->writepages);
		locked = true;
	}
C
Chao Yu 已提交
1483
	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1484
	f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
1485 1486
	if (locked)
		mutex_unlock(&sbi->writepages);
J
Jaegeuk Kim 已提交
1487

1488
	remove_dirty_inode(inode);
1489

1490
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1491
	return ret;
1492 1493

skip_write:
1494
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
1495
	trace_f2fs_writepages(mapping->host, wbc, DATA);
1496
	return 0;
1497 1498
}

1499 1500 1501
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
1502
	loff_t i_size = i_size_read(inode);
1503

J
Jaegeuk Kim 已提交
1504 1505 1506
	if (to > i_size) {
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
1507 1508 1509
	}
}

1510 1511 1512 1513 1514 1515 1516 1517
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
1518 1519
	bool locked = false;
	struct extent_info ei;
1520 1521
	int err = 0;

1522 1523 1524 1525 1526
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
	if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
1527
					len == PAGE_SIZE)
1528 1529
		return 0;

1530
	if (f2fs_has_inline_data(inode) ||
1531
			(pos & PAGE_MASK) >= i_size_read(inode)) {
1532 1533 1534 1535
		f2fs_lock_op(sbi);
		locked = true;
	}
restart:
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1549
			set_inline_node(ipage);
1550 1551 1552
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1565
			if (err || dn.data_blkaddr == NULL_ADDR) {
1566 1567 1568 1569 1570
				f2fs_put_dnode(&dn);
				f2fs_lock_op(sbi);
				locked = true;
				goto restart;
			}
1571 1572
		}
	}
1573

1574 1575 1576
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
1577
out:
1578 1579
	f2fs_put_dnode(&dn);
unlock_out:
1580 1581
	if (locked)
		f2fs_unlock_op(sbi);
1582 1583 1584
	return err;
}

1585 1586 1587 1588 1589
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
1590
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1591
	struct page *page = NULL;
1592
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1593 1594
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
1595 1596
	int err = 0;

1597 1598
	trace_f2fs_write_begin(inode, pos, len, flags);

1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
1609
repeat:
1610
	page = grab_cache_page_write_begin(mapping, index, flags);
1611 1612 1613 1614
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
1615

1616 1617
	*pagep = page;

1618 1619
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
1620
	if (err)
1621
		goto fail;
1622

1623
	if (need_balance && has_not_enough_free_secs(sbi, 0)) {
1624
		unlock_page(page);
J
Jaegeuk Kim 已提交
1625
		f2fs_balance_fs(sbi, true);
1626 1627 1628 1629 1630 1631 1632 1633
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

1634
	f2fs_wait_on_page_writeback(page, DATA, false);
1635

1636 1637
	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1638
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1639

1640
	if (len == PAGE_SIZE)
C
Chao Yu 已提交
1641 1642 1643
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;
1644

1645 1646
	if ((pos & PAGE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_SIZE - 1);
1647 1648 1649
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
1650
		zero_user_segments(page, 0, start, end, PAGE_SIZE);
C
Chao Yu 已提交
1651
		goto out_update;
1652 1653
	}

1654
	if (blkaddr == NEW_ADDR) {
1655
		zero_user_segment(page, 0, PAGE_SIZE);
1656
	} else {
1657
		struct f2fs_io_info fio = {
1658
			.sbi = sbi,
1659 1660
			.type = DATA,
			.rw = READ_SYNC,
1661 1662
			.old_blkaddr = blkaddr,
			.new_blkaddr = blkaddr,
1663
			.page = page,
1664
			.encrypted_page = NULL,
1665
		};
1666
		err = f2fs_submit_page_bio(&fio);
1667 1668
		if (err)
			goto fail;
1669

1670
		lock_page(page);
1671
		if (unlikely(!PageUptodate(page))) {
1672 1673
			err = -EIO;
			goto fail;
1674
		}
1675
		if (unlikely(page->mapping != mapping)) {
1676 1677
			f2fs_put_page(page, 1);
			goto repeat;
1678
		}
1679 1680 1681

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1682
			err = fscrypt_decrypt_page(page);
1683
			if (err)
1684 1685
				goto fail;
		}
1686
	}
C
Chao Yu 已提交
1687
out_update:
1688
	SetPageUptodate(page);
C
Chao Yu 已提交
1689
out_clear:
1690 1691
	clear_cold_data(page);
	return 0;
1692

1693
fail:
1694
	f2fs_put_page(page, 1);
1695 1696
	f2fs_write_failed(mapping, pos + len);
	return err;
1697 1698
}

1699 1700 1701 1702 1703 1704 1705
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

1706 1707
	trace_f2fs_write_end(inode, pos, len, copied);

1708
	set_page_dirty(page);
1709 1710 1711 1712 1713 1714

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
	}

1715
	f2fs_put_page(page, 1);
1716
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1717 1718 1719
	return copied;
}

1720 1721
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
1722 1723 1724 1725 1726 1727
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
1728 1729 1730
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

1731 1732 1733
	return 0;
}

1734 1735
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
			      loff_t offset)
1736
{
1737
	struct address_space *mapping = iocb->ki_filp->f_mapping;
1738 1739 1740
	struct inode *inode = mapping->host;
	size_t count = iov_iter_count(iter);
	int err;
1741

1742
	err = check_direct_IO(inode, iter, offset);
1743 1744
	if (err)
		return err;
H
Huajun Li 已提交
1745

1746 1747 1748
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return 0;

1749
	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
1750

C
Chao Yu 已提交
1751
	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
1752 1753 1754 1755 1756 1757
	if (iov_iter_rw(iter) == WRITE) {
		if (err > 0)
			set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
		else if (err < 0)
			f2fs_write_failed(mapping, offset + count);
	}
1758

1759
	trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
1760

1761
	return err;
1762 1763
}

1764 1765
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
1766 1767
{
	struct inode *inode = page->mapping->host;
1768
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1769

1770
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1771
		(offset % PAGE_SIZE || length != PAGE_SIZE))
1772 1773
		return;

1774 1775 1776 1777 1778 1779 1780 1781
	if (PageDirty(page)) {
		if (inode->i_ino == F2FS_META_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_META);
		else if (inode->i_ino == F2FS_NODE_INO(sbi))
			dec_page_count(sbi, F2FS_DIRTY_NODES);
		else
			inode_dec_dirty_pages(inode);
	}
C
Chao Yu 已提交
1782 1783 1784 1785 1786

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return;

1787
	set_page_private(page, 0);
1788 1789 1790
	ClearPagePrivate(page);
}

1791
int f2fs_release_page(struct page *page, gfp_t wait)
1792
{
1793 1794 1795 1796
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
1797 1798 1799 1800
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

1801
	set_page_private(page, 0);
1802
	ClearPagePrivate(page);
1803
	return 1;
1804 1805 1806 1807 1808 1809 1810
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1811 1812
	trace_f2fs_set_page_dirty(page, DATA);

1813
	SetPageUptodate(page);
1814

1815
	if (f2fs_is_atomic_file(inode)) {
C
Chao Yu 已提交
1816 1817 1818 1819 1820 1821 1822 1823 1824
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
1825 1826
	}

1827 1828
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
1829
		update_dirty_page(inode, page);
1830 1831 1832 1833 1834
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1835 1836
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1837 1838
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
1839 1840 1841 1842 1843 1844 1845
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
1846
	return generic_block_bmap(mapping, block, get_data_block_bmap);
1847 1848
}

1849 1850 1851 1852 1853 1854
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1855
	.write_end	= f2fs_write_end,
1856
	.set_page_dirty	= f2fs_set_data_page_dirty,
1857 1858
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
1859
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1860
	.bmap		= f2fs_bmap,
1861
};