data.c 18.6 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
15
#include <linux/aio.h>
16 17 18 19
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21 22 23 24

#include "f2fs.h"
#include "node.h"
#include "segment.h"
25
#include <trace/events/f2fs.h>
26

J
Jaegeuk Kim 已提交
27
/*
28 29 30 31 32 33 34 35 36 37 38 39
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
{
	struct f2fs_node *rn;
	__le32 *addr_array;
	struct page *node_page = dn->node_page;
	unsigned int ofs_in_node = dn->ofs_in_node;

J
Jin Xu 已提交
40
	f2fs_wait_on_page_writeback(node_page, NODE, false);
41

42
	rn = F2FS_NODE(node_page);
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[ofs_in_node] = cpu_to_le32(new_addr);
	set_page_dirty(node_page);
}

int reserve_new_block(struct dnode_of_data *dn)
{
	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);

	if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
		return -EPERM;
	if (!inc_valid_block_count(sbi, dn->inode, 1))
		return -ENOSPC;

59 60
	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	__set_data_blkaddr(dn, NEW_ADDR);
	dn->data_blkaddr = NEW_ADDR;
	sync_inode_page(dn);
	return 0;
}

static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
					struct buffer_head *bh_result)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	pgoff_t start_fofs, end_fofs;
	block_t start_blkaddr;

	read_lock(&fi->ext.ext_lock);
	if (fi->ext.len == 0) {
		read_unlock(&fi->ext.ext_lock);
		return 0;
	}

80 81
	stat_inc_total_hit(inode->i_sb);

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
	start_fofs = fi->ext.fofs;
	end_fofs = fi->ext.fofs + fi->ext.len - 1;
	start_blkaddr = fi->ext.blk_addr;

	if (pgofs >= start_fofs && pgofs <= end_fofs) {
		unsigned int blkbits = inode->i_sb->s_blocksize_bits;
		size_t count;

		clear_buffer_new(bh_result);
		map_bh(bh_result, inode->i_sb,
				start_blkaddr + pgofs - start_fofs);
		count = end_fofs - pgofs + 1;
		if (count < (UINT_MAX >> blkbits))
			bh_result->b_size = (count << blkbits);
		else
			bh_result->b_size = UINT_MAX;

99
		stat_inc_read_hit(inode->i_sb);
100 101 102 103 104 105 106 107 108 109 110 111 112
		read_unlock(&fi->ext.ext_lock);
		return 1;
	}
	read_unlock(&fi->ext.ext_lock);
	return 0;
}

void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
{
	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
	pgoff_t fofs, start_fofs, end_fofs;
	block_t start_blkaddr, end_blkaddr;

113
	f2fs_bug_on(blk_addr == NEW_ADDR);
114 115
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
							dn->ofs_in_node;
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140

	/* Update the page address in the parent node */
	__set_data_blkaddr(dn, blk_addr);

	write_lock(&fi->ext.ext_lock);

	start_fofs = fi->ext.fofs;
	end_fofs = fi->ext.fofs + fi->ext.len - 1;
	start_blkaddr = fi->ext.blk_addr;
	end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;

	/* Drop and initialize the matched extent */
	if (fi->ext.len == 1 && fofs == start_fofs)
		fi->ext.len = 0;

	/* Initial extent */
	if (fi->ext.len == 0) {
		if (blk_addr != NULL_ADDR) {
			fi->ext.fofs = fofs;
			fi->ext.blk_addr = blk_addr;
			fi->ext.len = 1;
		}
		goto end_update;
	}

N
Namjae Jeon 已提交
141
	/* Front merge */
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
		fi->ext.fofs--;
		fi->ext.blk_addr--;
		fi->ext.len++;
		goto end_update;
	}

	/* Back merge */
	if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
		fi->ext.len++;
		goto end_update;
	}

	/* Split the existing extent */
	if (fi->ext.len > 1 &&
		fofs >= start_fofs && fofs <= end_fofs) {
		if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
			fi->ext.len = fofs - start_fofs;
		} else {
			fi->ext.fofs = fofs + 1;
			fi->ext.blk_addr = start_blkaddr +
					fofs - start_fofs + 1;
			fi->ext.len -= fofs - start_fofs + 1;
		}
		goto end_update;
	}
	write_unlock(&fi->ext.ext_lock);
	return;

end_update:
	write_unlock(&fi->ext.ext_lock);
	sync_inode_page(dn);
}

176
struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
177 178 179 180 181 182 183 184 185 186 187 188 189
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
	int err;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

	set_new_dnode(&dn, inode, NULL, NULL, 0);
190
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
191 192 193 194 195 196 197 198 199 200 201
	if (err)
		return ERR_PTR(err);
	f2fs_put_dnode(&dn);

	if (dn.data_blkaddr == NULL_ADDR)
		return ERR_PTR(-ENOENT);

	/* By fallocate(), there is no cached page, but with NEW_ADDR */
	if (dn.data_blkaddr == NEW_ADDR)
		return ERR_PTR(-EINVAL);

202
	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
203 204 205
	if (!page)
		return ERR_PTR(-ENOMEM);

206 207 208 209 210
	if (PageUptodate(page)) {
		unlock_page(page);
		return page;
	}

211 212 213 214 215 216 217 218
	err = f2fs_readpage(sbi, page, dn.data_blkaddr,
					sync ? READ_SYNC : READA);
	if (sync) {
		wait_on_page_locked(page);
		if (!PageUptodate(page)) {
			f2fs_put_page(page, 0);
			return ERR_PTR(-EIO);
		}
219 220 221 222
	}
	return page;
}

J
Jaegeuk Kim 已提交
223
/*
224 225 226 227 228 229 230 231 232 233 234 235
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
	int err;

236
repeat:
237
	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
238 239 240
	if (!page)
		return ERR_PTR(-ENOMEM);

241
	set_new_dnode(&dn, inode, NULL, NULL, 0);
242
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
243 244
	if (err) {
		f2fs_put_page(page, 1);
245
		return ERR_PTR(err);
246
	}
247 248
	f2fs_put_dnode(&dn);

249 250
	if (dn.data_blkaddr == NULL_ADDR) {
		f2fs_put_page(page, 1);
251
		return ERR_PTR(-ENOENT);
252
	}
253 254 255 256

	if (PageUptodate(page))
		return page;

J
Jaegeuk Kim 已提交
257 258 259 260 261 262 263 264 265 266 267
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		SetPageUptodate(page);
		return page;
	}
268 269

	err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
270
	if (err)
271
		return ERR_PTR(err);
272 273 274 275 276

	lock_page(page);
	if (!PageUptodate(page)) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
277
	}
278 279 280
	if (page->mapping != mapping) {
		f2fs_put_page(page, 1);
		goto repeat;
281 282 283 284
	}
	return page;
}

J
Jaegeuk Kim 已提交
285
/*
286 287
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
288 289 290
 *
 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
 * mutex_unlock_op().
291
 * Note that, npage is set only by make_empty_dir.
292
 */
293 294
struct page *get_new_data_page(struct inode *inode,
		struct page *npage, pgoff_t index, bool new_i_size)
295 296 297 298 299 300 301
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;

302
	set_new_dnode(&dn, inode, npage, npage, 0);
303
	err = get_dnode_of_data(&dn, index, ALLOC_NODE);
304 305 306 307 308
	if (err)
		return ERR_PTR(err);

	if (dn.data_blkaddr == NULL_ADDR) {
		if (reserve_new_block(&dn)) {
309 310
			if (!npage)
				f2fs_put_dnode(&dn);
311 312 313
			return ERR_PTR(-ENOSPC);
		}
	}
314 315
	if (!npage)
		f2fs_put_dnode(&dn);
316
repeat:
317 318 319 320 321 322 323 324 325
	page = grab_cache_page(mapping, index);
	if (!page)
		return ERR_PTR(-ENOMEM);

	if (PageUptodate(page))
		return page;

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
326
		SetPageUptodate(page);
327 328
	} else {
		err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
329
		if (err)
330
			return ERR_PTR(err);
331 332 333 334
		lock_page(page);
		if (!PageUptodate(page)) {
			f2fs_put_page(page, 1);
			return ERR_PTR(-EIO);
335
		}
336 337 338
		if (page->mapping != mapping) {
			f2fs_put_page(page, 1);
			goto repeat;
339 340 341 342 343 344
		}
	}

	if (new_i_size &&
		i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
		i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
345 346
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
		mark_inode_dirty_sync(inode);
	}
	return page;
}

static void read_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}

J
Jaegeuk Kim 已提交
374
/*
375
 * Fill the locked page with data located in the block address.
376
 * Return unlocked page.
377 378 379 380 381 382 383
 */
int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
					block_t blk_addr, int type)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	struct bio *bio;

384
	trace_f2fs_readpage(page, blk_addr, type);
385 386

	/* Allocate a new bio */
387
	bio = f2fs_bio_alloc(bdev, 1);
388 389

	/* Initialize the bio */
390
	bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
391
	bio->bi_end_io = read_end_io;
392

393 394
	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
		bio_put(bio);
395
		f2fs_put_page(page, 1);
396 397 398 399 400 401 402
		return -EFAULT;
	}

	submit_bio(type, bio);
	return 0;
}

J
Jaegeuk Kim 已提交
403
/*
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
 * This function should be used by the data read flow only where it
 * does not check the "create" flag that indicates block allocation.
 * The reason for this special functionality is to exploit VFS readahead
 * mechanism.
 */
static int get_data_block_ro(struct inode *inode, sector_t iblock,
			struct buffer_head *bh_result, int create)
{
	unsigned int blkbits = inode->i_sb->s_blocksize_bits;
	unsigned maxblocks = bh_result->b_size >> blkbits;
	struct dnode_of_data dn;
	pgoff_t pgofs;
	int err;

	/* Get the page offset from the block offset(iblock) */
	pgofs =	(pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));

421 422
	if (check_extent_cache(inode, pgofs, bh_result)) {
		trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
423
		return 0;
424
	}
425 426 427

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
428
	err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
429 430
	if (err) {
		trace_f2fs_get_data_block(inode, iblock, bh_result, err);
431
		return (err == -ENOENT) ? 0 : err;
432
	}
433 434

	/* It does not support data allocation */
435
	f2fs_bug_on(create);
436 437 438 439 440 441

	if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
		int i;
		unsigned int end_offset;

		end_offset = IS_INODE(dn.node_page) ?
442
				ADDRS_PER_INODE(F2FS_I(inode)) :
443 444 445 446 447 448 449 450 451 452 453 454 455 456
				ADDRS_PER_BLOCK;

		clear_buffer_new(bh_result);

		/* Give more consecutive addresses for the read ahead */
		for (i = 0; i < end_offset - dn.ofs_in_node; i++)
			if (((datablock_addr(dn.node_page,
							dn.ofs_in_node + i))
				!= (dn.data_blkaddr + i)) || maxblocks == i)
				break;
		map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
		bh_result->b_size = (i << blkbits);
	}
	f2fs_put_dnode(&dn);
457
	trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	return 0;
}

static int f2fs_read_data_page(struct file *file, struct page *page)
{
	return mpage_readpage(page, get_data_block_ro);
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
	return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
}

int do_write_data_page(struct page *page)
{
	struct inode *inode = page->mapping->host;
	block_t old_blk_addr, new_blk_addr;
	struct dnode_of_data dn;
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
481
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	if (err)
		return err;

	old_blk_addr = dn.data_blkaddr;

	/* This page is already truncated */
	if (old_blk_addr == NULL_ADDR)
		goto out_writepage;

	set_page_writeback(page);

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
497 498 499
	if (unlikely(old_blk_addr != NEW_ADDR &&
			!is_cold_data(page) &&
			need_inplace_update(inode))) {
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
		rewrite_data_page(F2FS_SB(inode->i_sb), page,
						old_blk_addr);
	} else {
		write_data_page(inode, page, &dn,
				old_blk_addr, &new_blk_addr);
		update_extent_cache(new_blk_addr, &dn);
	}
out_writepage:
	f2fs_put_dnode(&dn);
	return err;
}

static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
							>> PAGE_CACHE_SHIFT;
	unsigned offset;
521
	bool need_balance_fs = false;
522 523 524
	int err = 0;

	if (page->index < end_index)
525
		goto write;
526 527 528 529 530 531 532 533 534 535 536

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
	offset = i_size & (PAGE_CACHE_SIZE - 1);
	if ((page->index >= end_index + 1) || !offset) {
		if (S_ISDIR(inode->i_mode)) {
			dec_page_count(sbi, F2FS_DIRTY_DENTS);
			inode_dec_dirty_dents(inode);
		}
537
		goto out;
538 539 540
	}

	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
541 542 543
write:
	if (sbi->por_doing) {
		err = AOP_WRITEPAGE_ACTIVATE;
544
		goto redirty_out;
545
	}
546

547
	/* Dentry blocks are controlled by checkpoint */
548 549 550
	if (S_ISDIR(inode->i_mode)) {
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(inode);
551 552
		err = do_write_data_page(page);
	} else {
553
		f2fs_lock_op(sbi);
554
		err = do_write_data_page(page);
555
		f2fs_unlock_op(sbi);
556
		need_balance_fs = true;
557
	}
558 559 560 561
	if (err == -ENOENT)
		goto out;
	else if (err)
		goto redirty_out;
562 563 564 565 566

	if (wbc->for_reclaim)
		f2fs_submit_bio(sbi, DATA, true);

	clear_cold_data(page);
567
out:
568
	unlock_page(page);
569
	if (need_balance_fs)
570 571 572 573 574 575
		f2fs_balance_fs(sbi);
	return 0;

redirty_out:
	wbc->pages_skipped++;
	set_page_dirty(page);
576
	return err;
577 578 579 580
}

#define MAX_DESIRED_PAGES_WP	4096

581 582 583 584 585 586 587 588 589
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

590
static int f2fs_write_data_pages(struct address_space *mapping,
591 592 593 594
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
595
	bool locked = false;
596 597 598
	int ret;
	long excess_nrtw = 0, desired_nrtw;

P
P J P 已提交
599 600 601 602
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

603 604 605 606 607 608
	if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
		desired_nrtw = MAX_DESIRED_PAGES_WP;
		excess_nrtw = desired_nrtw - wbc->nr_to_write;
		wbc->nr_to_write = desired_nrtw;
	}

609
	if (!S_ISDIR(inode->i_mode)) {
610
		mutex_lock(&sbi->writepages);
611 612
		locked = true;
	}
613
	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
614
	if (locked)
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
		mutex_unlock(&sbi->writepages);
	f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));

	remove_dirty_dir_inode(inode);

	wbc->nr_to_write -= excess_nrtw;
	return ret;
}

static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct page *page;
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
	struct dnode_of_data dn;
	int err = 0;

	f2fs_balance_fs(sbi);
636
repeat:
637 638 639 640 641
	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page)
		return -ENOMEM;
	*pagep = page;

642
	f2fs_lock_op(sbi);
643 644

	set_new_dnode(&dn, inode, NULL, NULL, 0);
645
	err = get_dnode_of_data(&dn, index, ALLOC_NODE);
646 647
	if (err)
		goto err;
648

649
	if (dn.data_blkaddr == NULL_ADDR)
650
		err = reserve_new_block(&dn);
651

652
	f2fs_put_dnode(&dn);
653 654
	if (err)
		goto err;
655

656
	f2fs_unlock_op(sbi);
657 658 659 660 661 662 663 664 665 666

	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
		return 0;

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
667
		goto out;
668 669 670 671 672 673
	}

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
		err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
674
		if (err)
675
			return err;
676 677 678 679
		lock_page(page);
		if (!PageUptodate(page)) {
			f2fs_put_page(page, 1);
			return -EIO;
680
		}
681 682 683
		if (page->mapping != mapping) {
			f2fs_put_page(page, 1);
			goto repeat;
684 685
		}
	}
686
out:
687 688 689
	SetPageUptodate(page);
	clear_cold_data(page);
	return 0;
690 691

err:
692
	f2fs_unlock_op(sbi);
693 694
	f2fs_put_page(page, 1);
	return err;
695 696
}

697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

	SetPageUptodate(page);
	set_page_dirty(page);

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

713
	f2fs_put_page(page, 1);
714 715 716
	return copied;
}

717 718 719 720 721 722 723 724 725 726 727 728 729 730
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_mapping->host;

	if (rw == WRITE)
		return 0;

	/* Needs synchronization with the cleaner */
	return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
						  get_data_block_ro);
}

731 732
static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
				      unsigned int length)
733 734 735 736 737 738 739 740 741 742 743 744 745
{
	struct inode *inode = page->mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(inode);
	}
	ClearPagePrivate(page);
}

static int f2fs_release_data_page(struct page *page, gfp_t wait)
{
	ClearPagePrivate(page);
746
	return 1;
747 748 749 750 751 752 753
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

754 755
	trace_f2fs_set_page_dirty(page, DATA);

756 757 758 759 760 761 762 763 764
	SetPageUptodate(page);
	if (!PageDirty(page)) {
		__set_page_dirty_nobuffers(page);
		set_dirty_dir_page(inode, page);
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
765 766 767 768 769
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
	return generic_block_bmap(mapping, block, get_data_block_ro);
}

770 771 772 773 774 775
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
776
	.write_end	= f2fs_write_end,
777 778 779 780
	.set_page_dirty	= f2fs_set_data_page_dirty,
	.invalidatepage	= f2fs_invalidate_data_page,
	.releasepage	= f2fs_release_data_page,
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
781
	.bmap		= f2fs_bmap,
782
};