data.c 25.5 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
15
#include <linux/aio.h>
16 17 18 19
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21 22 23 24

#include "f2fs.h"
#include "node.h"
#include "segment.h"
25
#include <trace/events/f2fs.h>
26

27 28
static void f2fs_read_end_io(struct bio *bio, int err)
{
29 30
	struct bio_vec *bvec;
	int i;
31

32
	bio_for_each_segment_all(bvec, bio, i) {
33 34
		struct page *page = bvec->bv_page;

35 36 37
		if (!err) {
			SetPageUptodate(page);
		} else {
38 39 40 41
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
42
	}
43 44 45 46 47
	bio_put(bio);
}

static void f2fs_write_end_io(struct bio *bio, int err)
{
48 49 50
	struct f2fs_sb_info *sbi = F2FS_SB(bio->bi_io_vec->bv_page->mapping->host->i_sb);
	struct bio_vec *bvec;
	int i;
51

52
	bio_for_each_segment_all(bvec, bio, i) {
53 54
		struct page *page = bvec->bv_page;

55
		if (unlikely(err)) {
56 57 58 59 60 61 62
			SetPageError(page);
			set_bit(AS_EIO, &page->mapping->flags);
			set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
			sbi->sb->s_flags |= MS_RDONLY;
		}
		end_page_writeback(page);
		dec_page_count(sbi, F2FS_WRITEBACK);
63
	}
64 65 66 67 68 69 70 71 72 73 74

	if (bio->bi_private)
		complete(bio->bi_private);

	if (!get_pages(sbi, F2FS_WRITEBACK) &&
			!list_empty(&sbi->cp_wait.task_list))
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

75 76 77 78 79 80 81 82 83 84 85 86
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
				int npages, bool is_read)
{
	struct bio *bio;

	/* No failure on bio allocation */
	bio = bio_alloc(GFP_NOIO, npages);

	bio->bi_bdev = sbi->sb->s_bdev;
87
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
88 89 90 91 92
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;

	return bio;
}

J
Jaegeuk Kim 已提交
93
static void __submit_merged_bio(struct f2fs_bio_info *io)
94
{
J
Jaegeuk Kim 已提交
95 96
	struct f2fs_io_info *fio = &io->fio;
	int rw;
97 98 99 100

	if (!io->bio)
		return;

101
	rw = fio->rw;
102 103

	if (is_read_io(rw)) {
104 105
		trace_f2fs_submit_read_bio(io->sbi->sb, rw,
						fio->type, io->bio);
J
Jaegeuk Kim 已提交
106
		submit_bio(rw, io->bio);
107
	} else {
108 109 110 111 112 113 114 115 116 117 118 119 120 121
		trace_f2fs_submit_write_bio(io->sbi->sb, rw,
						fio->type, io->bio);
		/*
		 * META_FLUSH is only from the checkpoint procedure, and we
		 * should wait this metadata bio for FS consistency.
		 */
		if (fio->type == META_FLUSH) {
			DECLARE_COMPLETION_ONSTACK(wait);
			io->bio->bi_private = &wait;
			submit_bio(rw, io->bio);
			wait_for_completion(&wait);
		} else {
			submit_bio(rw, io->bio);
		}
122
	}
123

124 125 126 127
	io->bio = NULL;
}

void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
128
				enum page_type type, int rw)
129 130 131 132 133 134 135
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

	mutex_lock(&io->io_mutex);
J
Jaegeuk Kim 已提交
136 137 138 139

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
140
		io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
J
Jaegeuk Kim 已提交
141 142
	}
	__submit_merged_bio(io);
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	mutex_unlock(&io->io_mutex);
}

/*
 * Fill the locked page with data located in the block address.
 * Return unlocked page.
 */
int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
					block_t blk_addr, int rw)
{
	struct bio *bio;

	trace_f2fs_submit_page_bio(page, blk_addr, rw);

	/* Allocate a new bio */
158
	bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
159 160 161 162 163 164 165 166 167 168 169 170

	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
		bio_put(bio);
		f2fs_put_page(page, 1);
		return -EFAULT;
	}

	submit_bio(rw, bio);
	return 0;
}

void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
J
Jaegeuk Kim 已提交
171
			block_t blk_addr, struct f2fs_io_info *fio)
172
{
J
Jaegeuk Kim 已提交
173
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
174
	struct f2fs_bio_info *io;
175
	bool is_read = is_read_io(fio->rw);
176

177
	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
178 179 180 181 182

	verify_block_addr(sbi, blk_addr);

	mutex_lock(&io->io_mutex);

183
	if (!is_read)
184 185
		inc_page_count(sbi, F2FS_WRITEBACK);

186
	if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
J
Jaegeuk Kim 已提交
187 188
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
189 190
alloc_new:
	if (io->bio == NULL) {
191 192 193
		int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));

		io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
J
Jaegeuk Kim 已提交
194
		io->fio = *fio;
195 196 197 198
	}

	if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
							PAGE_CACHE_SIZE) {
J
Jaegeuk Kim 已提交
199
		__submit_merged_bio(io);
200 201 202 203 204 205
		goto alloc_new;
	}

	io->last_block_in_bio = blk_addr;

	mutex_unlock(&io->io_mutex);
J
Jaegeuk Kim 已提交
206
	trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
207 208
}

J
Jaegeuk Kim 已提交
209
/*
210 211 212 213 214 215 216 217 218 219 220 221
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
{
	struct f2fs_node *rn;
	__le32 *addr_array;
	struct page *node_page = dn->node_page;
	unsigned int ofs_in_node = dn->ofs_in_node;

222
	f2fs_wait_on_page_writeback(node_page, NODE);
223

224
	rn = F2FS_NODE(node_page);
225 226 227 228 229 230 231 232 233 234 235

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[ofs_in_node] = cpu_to_le32(new_addr);
	set_page_dirty(node_page);
}

int reserve_new_block(struct dnode_of_data *dn)
{
	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);

236
	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
237
		return -EPERM;
238
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
239 240
		return -ENOSPC;

241 242
	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);

243 244
	__set_data_blkaddr(dn, NEW_ADDR);
	dn->data_blkaddr = NEW_ADDR;
245
	mark_inode_dirty(dn->inode);
246 247 248 249
	sync_inode_page(dn);
	return 0;
}

250 251 252 253 254
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

255 256 257
	/* if inode_page exists, index should be zero */
	f2fs_bug_on(!need_put && index);

258 259 260
	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
261

262 263
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
264
	if (err || need_put)
265 266 267 268
		f2fs_put_dnode(dn);
	return err;
}

269 270 271 272 273 274 275
static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
					struct buffer_head *bh_result)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	pgoff_t start_fofs, end_fofs;
	block_t start_blkaddr;

276 277 278
	if (is_inode_flag_set(fi, FI_NO_EXTENT))
		return 0;

279 280 281 282 283 284
	read_lock(&fi->ext.ext_lock);
	if (fi->ext.len == 0) {
		read_unlock(&fi->ext.ext_lock);
		return 0;
	}

285 286
	stat_inc_total_hit(inode->i_sb);

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	start_fofs = fi->ext.fofs;
	end_fofs = fi->ext.fofs + fi->ext.len - 1;
	start_blkaddr = fi->ext.blk_addr;

	if (pgofs >= start_fofs && pgofs <= end_fofs) {
		unsigned int blkbits = inode->i_sb->s_blocksize_bits;
		size_t count;

		clear_buffer_new(bh_result);
		map_bh(bh_result, inode->i_sb,
				start_blkaddr + pgofs - start_fofs);
		count = end_fofs - pgofs + 1;
		if (count < (UINT_MAX >> blkbits))
			bh_result->b_size = (count << blkbits);
		else
			bh_result->b_size = UINT_MAX;

304
		stat_inc_read_hit(inode->i_sb);
305 306 307 308 309 310 311 312 313 314 315 316
		read_unlock(&fi->ext.ext_lock);
		return 1;
	}
	read_unlock(&fi->ext.ext_lock);
	return 0;
}

void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
{
	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
	pgoff_t fofs, start_fofs, end_fofs;
	block_t start_blkaddr, end_blkaddr;
317
	int need_update = true;
318

319
	f2fs_bug_on(blk_addr == NEW_ADDR);
320 321
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
							dn->ofs_in_node;
322 323 324 325

	/* Update the page address in the parent node */
	__set_data_blkaddr(dn, blk_addr);

326 327 328
	if (is_inode_flag_set(fi, FI_NO_EXTENT))
		return;

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	write_lock(&fi->ext.ext_lock);

	start_fofs = fi->ext.fofs;
	end_fofs = fi->ext.fofs + fi->ext.len - 1;
	start_blkaddr = fi->ext.blk_addr;
	end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;

	/* Drop and initialize the matched extent */
	if (fi->ext.len == 1 && fofs == start_fofs)
		fi->ext.len = 0;

	/* Initial extent */
	if (fi->ext.len == 0) {
		if (blk_addr != NULL_ADDR) {
			fi->ext.fofs = fofs;
			fi->ext.blk_addr = blk_addr;
			fi->ext.len = 1;
		}
		goto end_update;
	}

N
Namjae Jeon 已提交
350
	/* Front merge */
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
	if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
		fi->ext.fofs--;
		fi->ext.blk_addr--;
		fi->ext.len++;
		goto end_update;
	}

	/* Back merge */
	if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
		fi->ext.len++;
		goto end_update;
	}

	/* Split the existing extent */
	if (fi->ext.len > 1 &&
		fofs >= start_fofs && fofs <= end_fofs) {
		if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
			fi->ext.len = fofs - start_fofs;
		} else {
			fi->ext.fofs = fofs + 1;
			fi->ext.blk_addr = start_blkaddr +
					fofs - start_fofs + 1;
			fi->ext.len -= fofs - start_fofs + 1;
		}
375 376
	} else {
		need_update = false;
377 378
	}

379 380 381 382 383 384
	/* Finally, if the extent is very fragmented, let's drop the cache. */
	if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
		fi->ext.len = 0;
		set_inode_flag(fi, FI_NO_EXTENT);
		need_update = true;
	}
385 386
end_update:
	write_unlock(&fi->ext.ext_lock);
387 388 389
	if (need_update)
		sync_inode_page(dn);
	return;
390 391
}

392
struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
393 394 395 396 397 398 399 400 401 402 403 404 405
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
	int err;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

	set_new_dnode(&dn, inode, NULL, NULL, 0);
406
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
407 408 409 410 411 412 413 414
	if (err)
		return ERR_PTR(err);
	f2fs_put_dnode(&dn);

	if (dn.data_blkaddr == NULL_ADDR)
		return ERR_PTR(-ENOENT);

	/* By fallocate(), there is no cached page, but with NEW_ADDR */
415
	if (unlikely(dn.data_blkaddr == NEW_ADDR))
416 417
		return ERR_PTR(-EINVAL);

418
	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
419 420 421
	if (!page)
		return ERR_PTR(-ENOMEM);

422 423 424 425 426
	if (PageUptodate(page)) {
		unlock_page(page);
		return page;
	}

427
	err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
428
					sync ? READ_SYNC : READA);
429 430 431
	if (err)
		return ERR_PTR(err);

432 433
	if (sync) {
		wait_on_page_locked(page);
434
		if (unlikely(!PageUptodate(page))) {
435 436 437
			f2fs_put_page(page, 0);
			return ERR_PTR(-EIO);
		}
438 439 440 441
	}
	return page;
}

J
Jaegeuk Kim 已提交
442
/*
443 444 445 446 447 448 449 450 451 452 453 454
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
	int err;

455
repeat:
456
	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
457 458 459
	if (!page)
		return ERR_PTR(-ENOMEM);

460
	set_new_dnode(&dn, inode, NULL, NULL, 0);
461
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
462 463
	if (err) {
		f2fs_put_page(page, 1);
464
		return ERR_PTR(err);
465
	}
466 467
	f2fs_put_dnode(&dn);

468
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
469
		f2fs_put_page(page, 1);
470
		return ERR_PTR(-ENOENT);
471
	}
472 473 474 475

	if (PageUptodate(page))
		return page;

J
Jaegeuk Kim 已提交
476 477 478 479 480 481 482 483 484 485 486
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		SetPageUptodate(page);
		return page;
	}
487

488
	err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC);
489
	if (err)
490
		return ERR_PTR(err);
491 492

	lock_page(page);
493
	if (unlikely(!PageUptodate(page))) {
494 495
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
496
	}
497
	if (unlikely(page->mapping != mapping)) {
498 499
		f2fs_put_page(page, 1);
		goto repeat;
500 501 502 503
	}
	return page;
}

J
Jaegeuk Kim 已提交
504
/*
505 506
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
507
 *
C
Chao Yu 已提交
508 509
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
510
 * Note that, ipage is set only by make_empty_dir.
511
 */
512
struct page *get_new_data_page(struct inode *inode,
513
		struct page *ipage, pgoff_t index, bool new_i_size)
514 515 516 517 518 519 520
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;

521
	set_new_dnode(&dn, inode, ipage, NULL, 0);
522
	err = f2fs_reserve_block(&dn, index);
523 524
	if (err)
		return ERR_PTR(err);
525
repeat:
526
	page = grab_cache_page(mapping, index);
527 528 529 530
	if (!page) {
		err = -ENOMEM;
		goto put_err;
	}
531 532 533 534 535 536

	if (PageUptodate(page))
		return page;

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
537
		SetPageUptodate(page);
538
	} else {
539 540
		err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
								READ_SYNC);
541
		if (err)
542 543
			goto put_err;

544
		lock_page(page);
545
		if (unlikely(!PageUptodate(page))) {
546
			f2fs_put_page(page, 1);
547 548
			err = -EIO;
			goto put_err;
549
		}
550
		if (unlikely(page->mapping != mapping)) {
551 552
			f2fs_put_page(page, 1);
			goto repeat;
553 554 555 556 557 558
		}
	}

	if (new_i_size &&
		i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
		i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
559 560
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
561 562
	}
	return page;
563 564 565 566

put_err:
	f2fs_put_dnode(&dn);
	return ERR_PTR(err);
567 568
}

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
static int __allocate_data_block(struct dnode_of_data *dn)
{
	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
	struct f2fs_summary sum;
	block_t new_blkaddr;
	struct node_info ni;
	int type;

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;
	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
		return -ENOSPC;

	__set_data_blkaddr(dn, NEW_ADDR);
	dn->data_blkaddr = NEW_ADDR;

	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

	type = CURSEG_WARM_DATA;

	allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);

	/* direct IO doesn't use extent cache to maximize the performance */
	set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
	update_extent_cache(new_blkaddr, dn);
	clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);

	dn->data_blkaddr = new_blkaddr;
	return 0;
}

J
Jaegeuk Kim 已提交
601
/*
C
Chao Yu 已提交
602 603 604 605 606 607
 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
608
 */
609
static int get_data_block(struct inode *inode, sector_t iblock,
610 611
			struct buffer_head *bh_result, int create)
{
612
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
613 614 615
	unsigned int blkbits = inode->i_sb->s_blocksize_bits;
	unsigned maxblocks = bh_result->b_size >> blkbits;
	struct dnode_of_data dn;
616 617 618 619
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
	pgoff_t pgofs, end_offset;
	int err = 0, ofs = 1;
	bool allocated = false;
620 621 622 623

	/* Get the page offset from the block offset(iblock) */
	pgofs =	(pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));

624 625 626 627 628
	if (check_extent_cache(inode, pgofs, bh_result))
		goto out;

	if (create)
		f2fs_lock_op(sbi);
629 630 631

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
632
	err = get_dnode_of_data(&dn, pgofs, mode);
633
	if (err) {
634 635 636
		if (err == -ENOENT)
			err = 0;
		goto unlock_out;
637
	}
638 639
	if (dn.data_blkaddr == NEW_ADDR)
		goto put_out;
640

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
	if (dn.data_blkaddr != NULL_ADDR) {
		map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
	} else if (create) {
		err = __allocate_data_block(&dn);
		if (err)
			goto put_out;
		allocated = true;
		map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
	} else {
		goto put_out;
	}

	end_offset = IS_INODE(dn.node_page) ?
			ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK;
	bh_result->b_size = (((size_t)1) << blkbits);
	dn.ofs_in_node++;
	pgofs++;

get_next:
	if (dn.ofs_in_node >= end_offset) {
		if (allocated)
			sync_inode_page(&dn);
		allocated = false;
		f2fs_put_dnode(&dn);

		set_new_dnode(&dn, inode, NULL, NULL, 0);
		err = get_dnode_of_data(&dn, pgofs, mode);
668
		if (err) {
669 670 671 672
			if (err == -ENOENT)
				err = 0;
			goto unlock_out;
		}
673 674 675
		if (dn.data_blkaddr == NEW_ADDR)
			goto put_out;

676
		end_offset = IS_INODE(dn.node_page) ?
677 678
			ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK;
	}
679

680 681 682 683 684 685 686 687 688
	if (maxblocks > (bh_result->b_size >> blkbits)) {
		block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
		if (blkaddr == NULL_ADDR && create) {
			err = __allocate_data_block(&dn);
			if (err)
				goto sync_out;
			allocated = true;
			blkaddr = dn.data_blkaddr;
		}
689
		/* Give more consecutive addresses for the read ahead */
690 691 692 693 694 695 696
		if (blkaddr == (bh_result->b_blocknr + ofs)) {
			ofs++;
			dn.ofs_in_node++;
			pgofs++;
			bh_result->b_size += (((size_t)1) << blkbits);
			goto get_next;
		}
697
	}
698 699 700 701
sync_out:
	if (allocated)
		sync_inode_page(&dn);
put_out:
702
	f2fs_put_dnode(&dn);
703 704 705 706 707 708
unlock_out:
	if (create)
		f2fs_unlock_op(sbi);
out:
	trace_f2fs_get_data_block(inode, iblock, bh_result, err);
	return err;
709 710 711 712
}

static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
713 714 715 716 717 718 719 720 721 722
	struct inode *inode = page->mapping->host;
	int ret;

	/* If the file has inline data, try to read it directlly */
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
	else
		ret = mpage_readpage(page, get_data_block);

	return ret;
723 724 725 726 727 728
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
H
Huajun Li 已提交
729 730 731 732 733 734
	struct inode *inode = file->f_mapping->host;

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

735
	return mpage_readpages(mapping, pages, nr_pages, get_data_block);
736 737
}

J
Jaegeuk Kim 已提交
738
int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
739 740
{
	struct inode *inode = page->mapping->host;
J
Jaegeuk Kim 已提交
741
	block_t old_blkaddr, new_blkaddr;
742 743 744 745
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
746
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
747 748 749
	if (err)
		return err;

J
Jaegeuk Kim 已提交
750
	old_blkaddr = dn.data_blkaddr;
751 752

	/* This page is already truncated */
J
Jaegeuk Kim 已提交
753
	if (old_blkaddr == NULL_ADDR)
754 755 756 757 758 759 760 761
		goto out_writepage;

	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
J
Jaegeuk Kim 已提交
762
	if (unlikely(old_blkaddr != NEW_ADDR &&
763 764
			!is_cold_data(page) &&
			need_inplace_update(inode))) {
J
Jaegeuk Kim 已提交
765
		rewrite_data_page(page, old_blkaddr, fio);
766
	} else {
J
Jaegeuk Kim 已提交
767 768
		write_data_page(page, &dn, &new_blkaddr, fio);
		update_extent_cache(new_blkaddr, &dn);
769 770 771 772 773 774 775 776 777 778 779 780 781 782
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
							>> PAGE_CACHE_SHIFT;
H
Huajun Li 已提交
783
	unsigned offset = 0;
784
	bool need_balance_fs = false;
785
	int err = 0;
J
Jaegeuk Kim 已提交
786 787
	struct f2fs_io_info fio = {
		.type = DATA,
C
Chris Fries 已提交
788
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
J
Jaegeuk Kim 已提交
789
	};
790 791

	if (page->index < end_index)
792
		goto write;
793 794 795 796 797 798 799 800 801 802 803

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
	offset = i_size & (PAGE_CACHE_SIZE - 1);
	if ((page->index >= end_index + 1) || !offset) {
		if (S_ISDIR(inode->i_mode)) {
			dec_page_count(sbi, F2FS_DIRTY_DENTS);
			inode_dec_dirty_dents(inode);
		}
804
		goto out;
805 806 807
	}

	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
808
write:
809
	if (unlikely(sbi->por_doing)) {
810
		err = AOP_WRITEPAGE_ACTIVATE;
811
		goto redirty_out;
812
	}
813

814
	/* Dentry blocks are controlled by checkpoint */
815 816 817
	if (S_ISDIR(inode->i_mode)) {
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(inode);
J
Jaegeuk Kim 已提交
818
		err = do_write_data_page(page, &fio);
819
	} else {
820
		f2fs_lock_op(sbi);
H
Huajun Li 已提交
821 822 823 824 825 826 827 828 829

		if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode)) {
			err = f2fs_write_inline_data(inode, page, offset);
			f2fs_unlock_op(sbi);
			goto out;
		} else {
			err = do_write_data_page(page, &fio);
		}

830
		f2fs_unlock_op(sbi);
831
		need_balance_fs = true;
832
	}
833 834 835 836
	if (err == -ENOENT)
		goto out;
	else if (err)
		goto redirty_out;
837

838
	if (wbc->for_reclaim) {
J
Jaegeuk Kim 已提交
839
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
840 841
		need_balance_fs = false;
	}
842 843

	clear_cold_data(page);
844
out:
845
	unlock_page(page);
846
	if (need_balance_fs)
847 848 849 850 851 852
		f2fs_balance_fs(sbi);
	return 0;

redirty_out:
	wbc->pages_skipped++;
	set_page_dirty(page);
853
	return err;
854 855 856 857
}

#define MAX_DESIRED_PAGES_WP	4096

858 859 860 861 862 863 864 865 866
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

867
static int f2fs_write_data_pages(struct address_space *mapping,
868 869 870 871
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
872
	bool locked = false;
873 874 875
	int ret;
	long excess_nrtw = 0, desired_nrtw;

P
P J P 已提交
876 877 878 879
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

880 881 882 883 884 885
	if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
		desired_nrtw = MAX_DESIRED_PAGES_WP;
		excess_nrtw = desired_nrtw - wbc->nr_to_write;
		wbc->nr_to_write = desired_nrtw;
	}

886
	if (!S_ISDIR(inode->i_mode)) {
887
		mutex_lock(&sbi->writepages);
888 889
		locked = true;
	}
890
	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
891
	if (locked)
892
		mutex_unlock(&sbi->writepages);
J
Jaegeuk Kim 已提交
893 894

	f2fs_submit_merged_bio(sbi, DATA, WRITE);
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913

	remove_dirty_dir_inode(inode);

	wbc->nr_to_write -= excess_nrtw;
	return ret;
}

static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct page *page;
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
	struct dnode_of_data dn;
	int err = 0;

	f2fs_balance_fs(sbi);
914
repeat:
915 916 917 918
	err = f2fs_convert_inline_data(inode, pos + len);
	if (err)
		return err;

919 920 921 922 923
	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page)
		return -ENOMEM;
	*pagep = page;

924 925
	if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA)
		goto inline_data;
H
Huajun Li 已提交
926

927
	f2fs_lock_op(sbi);
928
	set_new_dnode(&dn, inode, NULL, NULL, 0);
929
	err = f2fs_reserve_block(&dn, index);
930
	f2fs_unlock_op(sbi);
931

932 933 934 935
	if (err) {
		f2fs_put_page(page, 1);
		return err;
	}
H
Huajun Li 已提交
936
inline_data:
937 938 939 940 941 942 943 944 945
	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
		return 0;

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
946
		goto out;
947 948 949 950 951
	}

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
H
Huajun Li 已提交
952 953 954 955
		if (f2fs_has_inline_data(inode))
			err = f2fs_read_inline_data(inode, page);
		else
			err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
956
							READ_SYNC);
957
		if (err)
958
			return err;
959
		lock_page(page);
960
		if (unlikely(!PageUptodate(page))) {
961 962
			f2fs_put_page(page, 1);
			return -EIO;
963
		}
964
		if (unlikely(page->mapping != mapping)) {
965 966
			f2fs_put_page(page, 1);
			goto repeat;
967 968
		}
	}
969
out:
970 971 972 973 974
	SetPageUptodate(page);
	clear_cold_data(page);
	return 0;
}

975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

	SetPageUptodate(page);
	set_page_dirty(page);

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

991
	f2fs_put_page(page, 1);
992 993 994
	return copied;
}

995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
static int check_direct_IO(struct inode *inode, int rw,
		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
	int i;

	if (rw == READ)
		return 0;

	if (offset & blocksize_mask)
		return -EINVAL;

	for (i = 0; i < nr_segs; i++)
		if (iov[i].iov_len & blocksize_mask)
			return -EINVAL;
	return 0;
}

1013 1014 1015 1016 1017
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_mapping->host;
1018

H
Huajun Li 已提交
1019 1020 1021 1022
	/* Let buffer I/O handle the inline data case. */
	if (f2fs_has_inline_data(inode))
		return 0;

1023 1024 1025
	if (check_direct_IO(inode, rw, iov, offset, nr_segs))
		return 0;

1026
	return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
1027
							get_data_block);
1028 1029
}

1030 1031
static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
				      unsigned int length)
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
{
	struct inode *inode = page->mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(inode);
	}
	ClearPagePrivate(page);
}

static int f2fs_release_data_page(struct page *page, gfp_t wait)
{
	ClearPagePrivate(page);
1045
	return 1;
1046 1047 1048 1049 1050 1051 1052
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

1053 1054
	trace_f2fs_set_page_dirty(page, DATA);

1055
	SetPageUptodate(page);
1056 1057
	mark_inode_dirty(inode);

1058 1059 1060 1061 1062 1063 1064 1065
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
		set_dirty_dir_page(inode, page);
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
1066 1067
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
1068
	return generic_block_bmap(mapping, block, get_data_block);
J
Jaegeuk Kim 已提交
1069 1070
}

1071 1072 1073 1074 1075 1076
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
1077
	.write_end	= f2fs_write_end,
1078 1079 1080 1081
	.set_page_dirty	= f2fs_set_data_page_dirty,
	.invalidatepage	= f2fs_invalidate_data_page,
	.releasepage	= f2fs_release_data_page,
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
1082
	.bmap		= f2fs_bmap,
1083
};