data.c 25.7 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
15
#include <linux/aio.h>
16 17 18 19
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21 22 23 24

#include "f2fs.h"
#include "node.h"
#include "segment.h"
25
#include <trace/events/f2fs.h>
26

27 28 29 30 31 32 33 34 35 36 37
static void f2fs_read_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

38
		if (unlikely(!uptodate)) {
39 40
			ClearPageUptodate(page);
			SetPageError(page);
41 42
		} else {
			SetPageUptodate(page);
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
		}
		unlock_page(page);
	} while (bvec >= bio->bi_io_vec);

	bio_put(bio);
}

static void f2fs_write_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb);

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

62
		if (unlikely(!uptodate)) {
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
			SetPageError(page);
			set_bit(AS_EIO, &page->mapping->flags);
			set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
			sbi->sb->s_flags |= MS_RDONLY;
		}
		end_page_writeback(page);
		dec_page_count(sbi, F2FS_WRITEBACK);
	} while (bvec >= bio->bi_io_vec);

	if (bio->bi_private)
		complete(bio->bi_private);

	if (!get_pages(sbi, F2FS_WRITEBACK) &&
			!list_empty(&sbi->cp_wait.task_list))
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

	/* No failure on bio allocation */
	bio = bio_alloc(GFP_NOIO, npages);

	bio->bi_bdev = sbi->sb->s_bdev;
	bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;

	return bio;
}

J
Jaegeuk Kim 已提交
100
static void __submit_merged_bio(struct f2fs_bio_info *io)
101
{
J
Jaegeuk Kim 已提交
102 103
	struct f2fs_io_info *fio = &io->fio;
	int rw;
104 105 106 107

	if (!io->bio)
		return;

108
	rw = fio->rw;
109 110

	if (is_read_io(rw)) {
111 112
		trace_f2fs_submit_read_bio(io->sbi->sb, rw,
						fio->type, io->bio);
J
Jaegeuk Kim 已提交
113
		submit_bio(rw, io->bio);
114
	} else {
115 116 117 118 119 120 121 122 123 124 125 126 127 128
		trace_f2fs_submit_write_bio(io->sbi->sb, rw,
						fio->type, io->bio);
		/*
		 * META_FLUSH is only from the checkpoint procedure, and we
		 * should wait this metadata bio for FS consistency.
		 */
		if (fio->type == META_FLUSH) {
			DECLARE_COMPLETION_ONSTACK(wait);
			io->bio->bi_private = &wait;
			submit_bio(rw, io->bio);
			wait_for_completion(&wait);
		} else {
			submit_bio(rw, io->bio);
		}
129
	}
130

131 132 133 134
	io->bio = NULL;
}

void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
135
				enum page_type type, int rw)
136 137 138 139 140 141 142
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

	mutex_lock(&io->io_mutex);
J
Jaegeuk Kim 已提交
143 144 145 146

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
147
		io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
148 149
	}
	__submit_merged_bio(io);
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
	mutex_unlock(&io->io_mutex);
}

/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
					block_t blk_addr, int rw)
{
	struct bio *bio;

	trace_f2fs_submit_page_bio(page, blk_addr, rw);

	/* Allocate a new bio */
165
	bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
166 167 168 169 170 171 172 173 174 175 176 177

	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
		bio_put(bio);
		f2fs_put_page(page, 1);
		return -EFAULT;
	}

	submit_bio(rw, bio);
	return 0;
}

void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
J
Jaegeuk Kim 已提交
178
			block_t blk_addr, struct f2fs_io_info *fio)
179
{
J
Jaegeuk Kim 已提交
180
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
181
	struct f2fs_bio_info *io;
182
	bool is_read = is_read_io(fio->rw);
183

184
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
185 186 187 188 189

	verify_block_addr(sbi, blk_addr);

	mutex_lock(&io->io_mutex);

190
	if (!is_read)
191 192
		inc_page_count(sbi, F2FS_WRITEBACK);

193
	if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
J
Jaegeuk Kim 已提交
194 195
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
196 197
alloc_new:
	if (io->bio == NULL) {
198 199 200
		int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));

		io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
J
Jaegeuk Kim 已提交
201
		io->fio = *fio;
202 203 204 205
	}

	if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
							PAGE_CACHE_SIZE) {
J
Jaegeuk Kim 已提交
206
		__submit_merged_bio(io);
207 208 209 210 211 212
		goto alloc_new;
	}

	io->last_block_in_bio = blk_addr;

	mutex_unlock(&io->io_mutex);
J
Jaegeuk Kim 已提交
213
	trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
214 215
}

J
Jaegeuk Kim 已提交
216
/*
217 218 219 220 221 222 223 224 225 226 227 228
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
{
	struct f2fs_node *rn;
	__le32 *addr_array;
	struct page *node_page = dn->node_page;
	unsigned int ofs_in_node = dn->ofs_in_node;

229
	f2fs_wait_on_page_writeback(node_page, NODE);
230

231
	rn = F2FS_NODE(node_page);
232 233 234 235 236 237 238 239 240 241 242

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[ofs_in_node] = cpu_to_le32(new_addr);
	set_page_dirty(node_page);
}

int reserve_new_block(struct dnode_of_data *dn)
{
	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);

243
	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
244
		return -EPERM;
245
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
246 247
		return -ENOSPC;

248 249
	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);

250 251 252 253 254 255
	__set_data_blkaddr(dn, NEW_ADDR);
	dn->data_blkaddr = NEW_ADDR;
	sync_inode_page(dn);
	return 0;
}

256 257 258 259 260
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

261 262 263
	/* if inode_page exists, index should be zero */
	f2fs_bug_on(!need_put && index);

264 265 266
	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
267

268 269
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
270
	if (err || need_put)
271 272 273 274
		f2fs_put_dnode(dn);
	return err;
}

275 276 277 278 279 280 281
static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
					struct buffer_head *bh_result)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	pgoff_t start_fofs, end_fofs;
	block_t start_blkaddr;

282 283 284
	if (is_inode_flag_set(fi, FI_NO_EXTENT))
		return 0;

285 286 287 288 289 290
	read_lock(&fi->ext.ext_lock);
	if (fi->ext.len == 0) {
		read_unlock(&fi->ext.ext_lock);
		return 0;
	}

291 292
	stat_inc_total_hit(inode->i_sb);

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	start_fofs = fi->ext.fofs;
	end_fofs = fi->ext.fofs + fi->ext.len - 1;
	start_blkaddr = fi->ext.blk_addr;

	if (pgofs >= start_fofs && pgofs <= end_fofs) {
		unsigned int blkbits = inode->i_sb->s_blocksize_bits;
		size_t count;

		clear_buffer_new(bh_result);
		map_bh(bh_result, inode->i_sb,
				start_blkaddr + pgofs - start_fofs);
		count = end_fofs - pgofs + 1;
		if (count < (UINT_MAX >> blkbits))
			bh_result->b_size = (count << blkbits);
		else
			bh_result->b_size = UINT_MAX;

310
		stat_inc_read_hit(inode->i_sb);
311 312 313 314 315 316 317 318 319 320 321 322
		read_unlock(&fi->ext.ext_lock);
		return 1;
	}
	read_unlock(&fi->ext.ext_lock);
	return 0;
}

void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
{
	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
	pgoff_t fofs, start_fofs, end_fofs;
	block_t start_blkaddr, end_blkaddr;
323
	int need_update = true;
324

325
	f2fs_bug_on(blk_addr == NEW_ADDR);
326 327
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
							dn->ofs_in_node;
328 329 330 331

	/* Update the page address in the parent node */
	__set_data_blkaddr(dn, blk_addr);

332 333 334
	if (is_inode_flag_set(fi, FI_NO_EXTENT))
		return;

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
	write_lock(&fi->ext.ext_lock);

	start_fofs = fi->ext.fofs;
	end_fofs = fi->ext.fofs + fi->ext.len - 1;
	start_blkaddr = fi->ext.blk_addr;
	end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;

	/* Drop and initialize the matched extent */
	if (fi->ext.len == 1 && fofs == start_fofs)
		fi->ext.len = 0;

	/* Initial extent */
	if (fi->ext.len == 0) {
		if (blk_addr != NULL_ADDR) {
			fi->ext.fofs = fofs;
			fi->ext.blk_addr = blk_addr;
			fi->ext.len = 1;
		}
		goto end_update;
	}

N
Namjae Jeon 已提交
356
	/* Front merge */
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
		fi->ext.fofs--;
		fi->ext.blk_addr--;
		fi->ext.len++;
		goto end_update;
	}

	/* Back merge */
	if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
		fi->ext.len++;
		goto end_update;
	}

	/* Split the existing extent */
	if (fi->ext.len > 1 &&
		fofs >= start_fofs && fofs <= end_fofs) {
		if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
			fi->ext.len = fofs - start_fofs;
		} else {
			fi->ext.fofs = fofs + 1;
			fi->ext.blk_addr = start_blkaddr +
					fofs - start_fofs + 1;
			fi->ext.len -= fofs - start_fofs + 1;
		}
381 382
	} else {
		need_update = false;
383 384
	}

385 386 387 388 389 390
	/* Finally, if the extent is very fragmented, let's drop the cache. */
	if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
		fi->ext.len = 0;
		set_inode_flag(fi, FI_NO_EXTENT);
		need_update = true;
	}
391 392
end_update:
	write_unlock(&fi->ext.ext_lock);
393 394 395
	if (need_update)
		sync_inode_page(dn);
	return;
396 397
}

398
struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
399 400 401 402 403 404 405 406 407 408 409 410 411
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
	int err;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

	set_new_dnode(&dn, inode, NULL, NULL, 0);
412
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
413 414 415 416 417 418 419 420
	if (err)
		return ERR_PTR(err);
	f2fs_put_dnode(&dn);

	if (dn.data_blkaddr == NULL_ADDR)
		return ERR_PTR(-ENOENT);

	/* By fallocate(), there is no cached page, but with NEW_ADDR */
421
	if (unlikely(dn.data_blkaddr == NEW_ADDR))
422 423
		return ERR_PTR(-EINVAL);

424
	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
425 426 427
	if (!page)
		return ERR_PTR(-ENOMEM);

428 429 430 431 432
	if (PageUptodate(page)) {
		unlock_page(page);
		return page;
	}

433
	err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
434
					sync ? READ_SYNC : READA);
435 436 437
	if (err)
		return ERR_PTR(err);

438 439
	if (sync) {
		wait_on_page_locked(page);
440
		if (unlikely(!PageUptodate(page))) {
441 442 443
			f2fs_put_page(page, 0);
			return ERR_PTR(-EIO);
		}
444 445 446 447
	}
	return page;
}

J
Jaegeuk Kim 已提交
448
/*
449 450 451 452 453 454 455 456 457 458 459 460
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
	int err;

461
repeat:
462
	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
463 464 465
	if (!page)
		return ERR_PTR(-ENOMEM);

466
	set_new_dnode(&dn, inode, NULL, NULL, 0);
467
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
468 469
	if (err) {
		f2fs_put_page(page, 1);
470
		return ERR_PTR(err);
471
	}
472 473
	f2fs_put_dnode(&dn);

474
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
475
		f2fs_put_page(page, 1);
476
		return ERR_PTR(-ENOENT);
477
	}
478 479 480 481

	if (PageUptodate(page))
		return page;

J
Jaegeuk Kim 已提交
482 483 484 485 486 487 488 489 490 491 492
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		SetPageUptodate(page);
		return page;
	}
493

494
	err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC);
495
	if (err)
496
		return ERR_PTR(err);
497 498

	lock_page(page);
499
	if (unlikely(!PageUptodate(page))) {
500 501
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
502
	}
503
	if (unlikely(page->mapping != mapping)) {
504 505
		f2fs_put_page(page, 1);
		goto repeat;
506 507 508 509
	}
	return page;
}

J
Jaegeuk Kim 已提交
510
/*
511 512
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
513
 *
C
Chao Yu 已提交
514 515
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
516
 * Note that, ipage is set only by make_empty_dir.
517
 */
518
struct page *get_new_data_page(struct inode *inode,
519
		struct page *ipage, pgoff_t index, bool new_i_size)
520 521 522 523 524 525 526
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;

527
	set_new_dnode(&dn, inode, ipage, NULL, 0);
528
	err = f2fs_reserve_block(&dn, index);
529 530
	if (err)
		return ERR_PTR(err);
531
repeat:
532
	page = grab_cache_page(mapping, index);
533 534 535 536
	if (!page) {
		err = -ENOMEM;
		goto put_err;
	}
537 538 539 540 541 542

	if (PageUptodate(page))
		return page;

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
543
		SetPageUptodate(page);
544
	} else {
545 546
		err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
								READ_SYNC);
547
		if (err)
548 549
			goto put_err;

550
		lock_page(page);
551
		if (unlikely(!PageUptodate(page))) {
552
			f2fs_put_page(page, 1);
553 554
			err = -EIO;
			goto put_err;
555
		}
556
		if (unlikely(page->mapping != mapping)) {
557 558
			f2fs_put_page(page, 1);
			goto repeat;
559 560 561 562 563 564
		}
	}

	if (new_i_size &&
		i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
		i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
565 566
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
567 568 569
		mark_inode_dirty_sync(inode);
	}
	return page;
570 571 572 573

put_err:
	f2fs_put_dnode(&dn);
	return ERR_PTR(err);
574 575
}

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
static int __allocate_data_block(struct dnode_of_data *dn)
{
	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
	struct f2fs_summary sum;
	block_t new_blkaddr;
	struct node_info ni;
	int type;

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
		return -ENOSPC;

	__set_data_blkaddr(dn, NEW_ADDR);
	dn->data_blkaddr = NEW_ADDR;

	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

	type = CURSEG_WARM_DATA;

	allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);

	/* direct IO doesn't use extent cache to maximize the performance */
	set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
	update_extent_cache(new_blkaddr, dn);
	clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);

	dn->data_blkaddr = new_blkaddr;
	return 0;
}

J
Jaegeuk Kim 已提交
608
/*
C
Chao Yu 已提交
609 610 611 612 613 614
 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
615
 */
616
static int get_data_block(struct inode *inode, sector_t iblock,
617 618
			struct buffer_head *bh_result, int create)
{
619
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
620 621 622
	unsigned int blkbits = inode->i_sb->s_blocksize_bits;
	unsigned maxblocks = bh_result->b_size >> blkbits;
	struct dnode_of_data dn;
623 624 625 626
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
	pgoff_t pgofs, end_offset;
	int err = 0, ofs = 1;
	bool allocated = false;
627 628 629 630

	/* Get the page offset from the block offset(iblock) */
	pgofs =	(pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));

631 632 633 634 635
	if (check_extent_cache(inode, pgofs, bh_result))
		goto out;

	if (create)
		f2fs_lock_op(sbi);
636 637 638

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
639
	err = get_dnode_of_data(&dn, pgofs, mode);
640
	if (err) {
641 642 643
		if (err == -ENOENT)
			err = 0;
		goto unlock_out;
644
	}
645 646
	if (dn.data_blkaddr == NEW_ADDR)
		goto put_out;
647

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
	if (dn.data_blkaddr != NULL_ADDR) {
		map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
	} else if (create) {
		err = __allocate_data_block(&dn);
		if (err)
			goto put_out;
		allocated = true;
		map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
	} else {
		goto put_out;
	}

	end_offset = IS_INODE(dn.node_page) ?
			ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK;
	bh_result->b_size = (((size_t)1) << blkbits);
	dn.ofs_in_node++;
	pgofs++;

get_next:
	if (dn.ofs_in_node >= end_offset) {
		if (allocated)
			sync_inode_page(&dn);
		allocated = false;
		f2fs_put_dnode(&dn);

		set_new_dnode(&dn, inode, NULL, NULL, 0);
		err = get_dnode_of_data(&dn, pgofs, mode);
675
		if (err) {
676 677 678 679
			if (err == -ENOENT)
				err = 0;
			goto unlock_out;
		}
680 681 682
		if (dn.data_blkaddr == NEW_ADDR)
			goto put_out;

683
		end_offset = IS_INODE(dn.node_page) ?
684 685
			ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK;
	}
686

687 688 689 690 691 692 693 694 695
	if (maxblocks > (bh_result->b_size >> blkbits)) {
		block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
		if (blkaddr == NULL_ADDR && create) {
			err = __allocate_data_block(&dn);
			if (err)
				goto sync_out;
			allocated = true;
			blkaddr = dn.data_blkaddr;
		}
696
		/* Give more consecutive addresses for the read ahead */
697 698 699 700 701 702 703
		if (blkaddr == (bh_result->b_blocknr + ofs)) {
			ofs++;
			dn.ofs_in_node++;
			pgofs++;
			bh_result->b_size += (((size_t)1) << blkbits);
			goto get_next;
		}
704
	}
705 706 707 708
sync_out:
	if (allocated)
		sync_inode_page(&dn);
put_out:
709
	f2fs_put_dnode(&dn);
710 711 712 713 714 715
unlock_out:
	if (create)
		f2fs_unlock_op(sbi);
out:
	trace_f2fs_get_data_block(inode, iblock, bh_result, err);
	return err;
716 717 718 719
}

static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
720 721 722 723 724 725 726 727 728 729
	struct inode *inode = page->mapping->host;
	int ret;

	/* If the file has inline data, try to read it directlly */
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
	else
		ret = mpage_readpage(page, get_data_block);

	return ret;
730 731 732 733 734 735
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
736 737 738 739 740 741
	struct inode *inode = file->f_mapping->host;

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

742
	return mpage_readpages(mapping, pages, nr_pages, get_data_block);
743 744
}

J
Jaegeuk Kim 已提交
745
int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
746 747
{
	struct inode *inode = page->mapping->host;
J
Jaegeuk Kim 已提交
748
	block_t old_blkaddr, new_blkaddr;
749 750 751 752
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
753
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
754 755 756
	if (err)
		return err;

J
Jaegeuk Kim 已提交
757
	old_blkaddr = dn.data_blkaddr;
758 759

	/* This page is already truncated */
J
Jaegeuk Kim 已提交
760
	if (old_blkaddr == NULL_ADDR)
761 762 763 764 765 766 767 768
		goto out_writepage;

	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
J
Jaegeuk Kim 已提交
769
	if (unlikely(old_blkaddr != NEW_ADDR &&
770 771
			!is_cold_data(page) &&
			need_inplace_update(inode))) {
J
Jaegeuk Kim 已提交
772
		rewrite_data_page(page, old_blkaddr, fio);
773
	} else {
J
Jaegeuk Kim 已提交
774 775
		write_data_page(page, &dn, &new_blkaddr, fio);
		update_extent_cache(new_blkaddr, &dn);
776 777 778 779 780 781 782 783 784 785 786 787 788 789
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
							>> PAGE_CACHE_SHIFT;
H
Huajun Li 已提交
790
	unsigned offset = 0;
791
	bool need_balance_fs = false;
792
	int err = 0;
J
Jaegeuk Kim 已提交
793 794
	struct f2fs_io_info fio = {
		.type = DATA,
C
Chris Fries 已提交
795
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
J
Jaegeuk Kim 已提交
796
	};
797 798

	if (page->index < end_index)
799
		goto write;
800 801 802 803 804 805 806 807 808 809 810

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
	offset = i_size & (PAGE_CACHE_SIZE - 1);
	if ((page->index >= end_index + 1) || !offset) {
		if (S_ISDIR(inode->i_mode)) {
			dec_page_count(sbi, F2FS_DIRTY_DENTS);
			inode_dec_dirty_dents(inode);
		}
811
		goto out;
812 813 814
	}

	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
815
write:
816
	if (unlikely(sbi->por_doing)) {
817
		err = AOP_WRITEPAGE_ACTIVATE;
818
		goto redirty_out;
819
	}
820

821
	/* Dentry blocks are controlled by checkpoint */
822 823 824
	if (S_ISDIR(inode->i_mode)) {
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(inode);
J
Jaegeuk Kim 已提交
825
		err = do_write_data_page(page, &fio);
826
	} else {
827
		f2fs_lock_op(sbi);
H
Huajun Li 已提交
828 829 830 831 832 833 834 835 836

		if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode)) {
			err = f2fs_write_inline_data(inode, page, offset);
			f2fs_unlock_op(sbi);
			goto out;
		} else {
			err = do_write_data_page(page, &fio);
		}

837
		f2fs_unlock_op(sbi);
838
		need_balance_fs = true;
839
	}
840 841 842 843
	if (err == -ENOENT)
		goto out;
	else if (err)
		goto redirty_out;
844

845
	if (wbc->for_reclaim) {
J
Jaegeuk Kim 已提交
846
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
847 848
		need_balance_fs = false;
	}
849 850

	clear_cold_data(page);
851
out:
852
	unlock_page(page);
853
	if (need_balance_fs)
854 855 856 857 858 859
		f2fs_balance_fs(sbi);
	return 0;

redirty_out:
	wbc->pages_skipped++;
	set_page_dirty(page);
860
	return err;
861 862 863 864
}

#define MAX_DESIRED_PAGES_WP	4096

865 866 867 868 869 870 871 872 873
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

874
static int f2fs_write_data_pages(struct address_space *mapping,
875 876 877 878
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
879
	bool locked = false;
880 881 882
	int ret;
	long excess_nrtw = 0, desired_nrtw;

P
P J P 已提交
883 884 885 886
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

887 888 889 890 891 892
	if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
		desired_nrtw = MAX_DESIRED_PAGES_WP;
		excess_nrtw = desired_nrtw - wbc->nr_to_write;
		wbc->nr_to_write = desired_nrtw;
	}

893
	if (!S_ISDIR(inode->i_mode)) {
894
		mutex_lock(&sbi->writepages);
895 896
		locked = true;
	}
897
	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
898
	if (locked)
899
		mutex_unlock(&sbi->writepages);
J
Jaegeuk Kim 已提交
900 901

	f2fs_submit_merged_bio(sbi, DATA, WRITE);
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920

	remove_dirty_dir_inode(inode);

	wbc->nr_to_write -= excess_nrtw;
	return ret;
}

static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct page *page;
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
	struct dnode_of_data dn;
	int err = 0;

	f2fs_balance_fs(sbi);
921
repeat:
922 923 924 925
	err = f2fs_convert_inline_data(inode, pos + len);
	if (err)
		return err;

926 927 928 929 930
	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page)
		return -ENOMEM;
	*pagep = page;

931 932
	if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA)
		goto inline_data;
H
Huajun Li 已提交
933

934
	f2fs_lock_op(sbi);
935
	set_new_dnode(&dn, inode, NULL, NULL, 0);
936
	err = f2fs_reserve_block(&dn, index);
937
	f2fs_unlock_op(sbi);
938

939 940 941 942
	if (err) {
		f2fs_put_page(page, 1);
		return err;
	}
H
Huajun Li 已提交
943
inline_data:
944 945 946 947 948 949 950 951 952
	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
		return 0;

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
953
		goto out;
954 955 956 957 958
	}

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
H
Huajun Li 已提交
959 960 961 962
		if (f2fs_has_inline_data(inode))
			err = f2fs_read_inline_data(inode, page);
		else
			err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
963
							READ_SYNC);
964
		if (err)
965
			return err;
966
		lock_page(page);
967
		if (unlikely(!PageUptodate(page))) {
968 969
			f2fs_put_page(page, 1);
			return -EIO;
970
		}
971
		if (unlikely(page->mapping != mapping)) {
972 973
			f2fs_put_page(page, 1);
			goto repeat;
974 975
		}
	}
976
out:
977 978 979 980 981
	SetPageUptodate(page);
	clear_cold_data(page);
	return 0;
}

982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

	SetPageUptodate(page);
	set_page_dirty(page);

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

998
	f2fs_put_page(page, 1);
999 1000 1001
	return copied;
}

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
static int check_direct_IO(struct inode *inode, int rw,
		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
	int i;

	if (rw == READ)
		return 0;

	if (offset & blocksize_mask)
		return -EINVAL;

	for (i = 0; i < nr_segs; i++)
		if (iov[i].iov_len & blocksize_mask)
			return -EINVAL;
	return 0;
}

1020 1021 1022 1023 1024
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_mapping->host;
1025

H
Huajun Li 已提交
1026 1027 1028 1029
	/* Let buffer I/O handle the inline data case. */
	if (f2fs_has_inline_data(inode))
		return 0;

1030 1031 1032
	if (check_direct_IO(inode, rw, iov, offset, nr_segs))
		return 0;

1033
	return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
1034
							get_data_block);
1035 1036
}

1037 1038
static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
				      unsigned int length)
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
{
	struct inode *inode = page->mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(inode);
	}
	ClearPagePrivate(page);
}

static int f2fs_release_data_page(struct page *page, gfp_t wait)
{
	ClearPagePrivate(page);
1052
	return 1;
1053 1054 1055 1056 1057 1058 1059
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1060 1061
	trace_f2fs_set_page_dirty(page, DATA);

1062 1063 1064 1065 1066 1067 1068 1069 1070
	SetPageUptodate(page);
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
		set_dirty_dir_page(inode, page);
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1071 1072
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1073
	return generic_block_bmap(mapping, block, get_data_block);
J
Jaegeuk Kim 已提交
1074 1075
}

1076 1077 1078 1079 1080 1081
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1082
	.write_end	= f2fs_write_end,
1083 1084 1085 1086
	.set_page_dirty	= f2fs_set_data_page_dirty,
	.invalidatepage	= f2fs_invalidate_data_page,
	.releasepage	= f2fs_release_data_page,
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1087
	.bmap		= f2fs_bmap,
1088
};