data.c 61.7 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/data.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
C
Chao Yu 已提交
17
#include <linux/pagevec.h>
18 19
#include <linux/blkdev.h>
#include <linux/bio.h>
20
#include <linux/prefetch.h>
21
#include <linux/uio.h>
J
Jaegeuk Kim 已提交
22
#include <linux/cleancache.h>
23
#include <linux/sched/signal.h>
24 25 26 27

#include "f2fs.h"
#include "node.h"
#include "segment.h"
J
Jaegeuk Kim 已提交
28
#include "trace.h"
29
#include <trace/events/f2fs.h>
30

31 32 33 34 35
#define NUM_PREALLOC_POST_READ_CTXS	128

static struct kmem_cache *bio_post_read_ctx_cache;
static mempool_t *bio_post_read_ctx_pool;

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
static bool __is_cp_guaranteed(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct f2fs_sb_info *sbi;

	if (!mapping)
		return false;

	inode = mapping->host;
	sbi = F2FS_I_SB(inode);

	if (inode->i_ino == F2FS_META_INO(sbi) ||
			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
			S_ISDIR(inode->i_mode) ||
			is_cold_data(page))
		return true;
	return false;
}

56 57 58 59 60 61 62 63 64 65 66 67 68 69
/* postprocessing steps for read bios */
enum bio_post_read_step {
	STEP_INITIAL = 0,
	STEP_DECRYPT,
};

struct bio_post_read_ctx {
	struct bio *bio;
	struct work_struct work;
	unsigned int cur_step;
	unsigned int enabled_steps;
};

static void __read_end_io(struct bio *bio)
70
{
71 72
	struct page *page;
	struct bio_vec *bv;
73
	int i;
74

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
	bio_for_each_segment_all(bv, bio, i) {
		page = bv->bv_page;

		/* PG_error was set if any post_read step failed */
		if (bio->bi_status || PageError(page)) {
			ClearPageUptodate(page);
			SetPageError(page);
		} else {
			SetPageUptodate(page);
		}
		unlock_page(page);
	}
	if (bio->bi_private)
		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
	bio_put(bio);
}

static void bio_post_read_processing(struct bio_post_read_ctx *ctx);

static void decrypt_work(struct work_struct *work)
{
	struct bio_post_read_ctx *ctx =
		container_of(work, struct bio_post_read_ctx, work);

	fscrypt_decrypt_bio(ctx->bio);

	bio_post_read_processing(ctx);
}

static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
{
	switch (++ctx->cur_step) {
	case STEP_DECRYPT:
		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
			INIT_WORK(&ctx->work, decrypt_work);
			fscrypt_enqueue_decrypt_work(&ctx->work);
			return;
		}
		ctx->cur_step++;
		/* fall-through */
	default:
		__read_end_io(ctx->bio);
	}
}

static bool f2fs_bio_post_read_required(struct bio *bio)
{
	return bio->bi_private && !bio->bi_status;
}

static void f2fs_read_end_io(struct bio *bio)
{
C
Chao Yu 已提交
127
#ifdef CONFIG_F2FS_FAULT_INJECTION
128
	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
129
		f2fs_show_injection_info(FAULT_IO);
130
		bio->bi_status = BLK_STS_IOERR;
131
	}
C
Chao Yu 已提交
132 133
#endif

134 135
	if (f2fs_bio_post_read_required(bio)) {
		struct bio_post_read_ctx *ctx = bio->bi_private;
J
Jaegeuk Kim 已提交
136

137 138 139
		ctx->cur_step = STEP_INITIAL;
		bio_post_read_processing(ctx);
		return;
J
Jaegeuk Kim 已提交
140
	}
141 142

	__read_end_io(bio);
J
Jaegeuk Kim 已提交
143 144
}

145
static void f2fs_write_end_io(struct bio *bio)
146
{
147
	struct f2fs_sb_info *sbi = bio->bi_private;
148 149
	struct bio_vec *bvec;
	int i;
150

151
	bio_for_each_segment_all(bvec, bio, i) {
152
		struct page *page = bvec->bv_page;
153
		enum count_type type = WB_DATA_TYPE(page);
154

155 156 157 158 159 160
		if (IS_DUMMY_WRITTEN_PAGE(page)) {
			set_page_private(page, (unsigned long)NULL);
			ClearPagePrivate(page);
			unlock_page(page);
			mempool_free(page, sbi->write_io_dummy);

161
			if (unlikely(bio->bi_status))
162 163 164 165
				f2fs_stop_checkpoint(sbi, true);
			continue;
		}

166
		fscrypt_pullback_bio_page(&page, true);
167

168
		if (unlikely(bio->bi_status)) {
169
			mapping_set_error(page->mapping, -EIO);
170 171
			if (type == F2FS_WB_CP_DATA)
				f2fs_stop_checkpoint(sbi, true);
172
		}
173 174 175 176

		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
					page->index != nid_of_node(page));

177 178
		dec_page_count(sbi, type);
		clear_cold_data(page);
179
		end_page_writeback(page);
180
	}
181
	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
182
				wq_has_sleeper(&sbi->cp_wait))
183 184 185 186 187
		wake_up(&sbi->cp_wait);

	bio_put(bio);
}

J
Jaegeuk Kim 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
/*
 * Return true, if pre_bio's bdev is same as its target device.
 */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
	struct block_device *bdev = sbi->sb->s_bdev;
	int i;

	for (i = 0; i < sbi->s_ndevs; i++) {
		if (FDEV(i).start_blk <= blk_addr &&
					FDEV(i).end_blk >= blk_addr) {
			blk_addr -= FDEV(i).start_blk;
			bdev = FDEV(i).bdev;
			break;
		}
	}
	if (bio) {
206
		bio_set_dev(bio, bdev);
J
Jaegeuk Kim 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
	}
	return bdev;
}

int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
	int i;

	for (i = 0; i < sbi->s_ndevs; i++)
		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
			return i;
	return 0;
}

static bool __same_bdev(struct f2fs_sb_info *sbi,
				block_t blk_addr, struct bio *bio)
{
225 226
	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
J
Jaegeuk Kim 已提交
227 228
}

229 230 231 232
/*
 * Low-level block read/write IO operations.
 */
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
233
				struct writeback_control *wbc,
234 235
				int npages, bool is_read,
				enum page_type type, enum temp_type temp)
236 237 238
{
	struct bio *bio;

239
	bio = f2fs_bio_alloc(sbi, npages, true);
240

J
Jaegeuk Kim 已提交
241
	f2fs_target_device(sbi, blk_addr, bio);
242 243 244 245 246 247 248 249
	if (is_read) {
		bio->bi_end_io = f2fs_read_end_io;
		bio->bi_private = NULL;
	} else {
		bio->bi_end_io = f2fs_write_end_io;
		bio->bi_private = sbi;
		bio->bi_write_hint = io_type_to_rw_hint(sbi, type, temp);
	}
250 251
	if (wbc)
		wbc_init_bio(wbc, bio);
252 253 254 255

	return bio;
}

256 257
static inline void __submit_bio(struct f2fs_sb_info *sbi,
				struct bio *bio, enum page_type type)
258
{
259
	if (!is_read_io(bio_op(bio))) {
260 261 262 263 264
		unsigned int start;

		if (type != DATA && type != NODE)
			goto submit_io;

265
		if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
266 267
			blk_finish_plug(current->plug);

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
		start %= F2FS_IO_SIZE(sbi);

		if (start == 0)
			goto submit_io;

		/* fill dummy pages */
		for (; start < F2FS_IO_SIZE(sbi); start++) {
			struct page *page =
				mempool_alloc(sbi->write_io_dummy,
					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
			f2fs_bug_on(sbi, !page);

			SetPagePrivate(page);
			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
			lock_page(page);
			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
				f2fs_bug_on(sbi, 1);
		}
		/*
		 * In the NODE case, we lose next block address chain. So, we
		 * need to do checkpoint in f2fs_sync_file.
		 */
		if (type == NODE)
			set_sbi_flag(sbi, SBI_NEED_CP);
J
Jaegeuk Kim 已提交
293
	}
294
submit_io:
J
Jaegeuk Kim 已提交
295 296 297 298
	if (is_read_io(bio_op(bio)))
		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
	else
		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
299
	submit_bio(bio);
300 301
}

J
Jaegeuk Kim 已提交
302
static void __submit_merged_bio(struct f2fs_bio_info *io)
303
{
J
Jaegeuk Kim 已提交
304
	struct f2fs_io_info *fio = &io->fio;
305 306 307 308

	if (!io->bio)
		return;

J
Jaegeuk Kim 已提交
309 310
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

M
Mike Christie 已提交
311
	if (is_read_io(fio->op))
J
Jaegeuk Kim 已提交
312
		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
313
	else
J
Jaegeuk Kim 已提交
314
		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
M
Mike Christie 已提交
315

316
	__submit_bio(io->sbi, io->bio, fio->type);
317 318 319
	io->bio = NULL;
}

320 321
static bool __has_merged_page(struct f2fs_bio_info *io,
				struct inode *inode, nid_t ino, pgoff_t idx)
C
Chao Yu 已提交
322 323 324 325 326
{
	struct bio_vec *bvec;
	struct page *target;
	int i;

327
	if (!io->bio)
C
Chao Yu 已提交
328
		return false;
329

330
	if (!inode && !ino)
331
		return true;
C
Chao Yu 已提交
332 333 334

	bio_for_each_segment_all(bvec, io->bio, i) {

335
		if (bvec->bv_page->mapping)
C
Chao Yu 已提交
336
			target = bvec->bv_page;
337 338
		else
			target = fscrypt_control_page(bvec->bv_page);
C
Chao Yu 已提交
339

340 341 342
		if (idx != target->index)
			continue;

343 344 345
		if (inode && inode == target->mapping->host)
			return true;
		if (ino && ino == ino_of_node(target))
C
Chao Yu 已提交
346 347 348 349 350 351
			return true;
	}

	return false;
}

352
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
353
				nid_t ino, pgoff_t idx, enum page_type type)
354 355
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
356 357 358
	enum temp_type temp;
	struct f2fs_bio_info *io;
	bool ret = false;
359

J
Jaegeuk Kim 已提交
360 361 362 363 364 365
	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
		io = sbi->write_io[btype] + temp;

		down_read(&io->io_rwsem);
		ret = __has_merged_page(io, inode, ino, idx);
		up_read(&io->io_rwsem);
366

J
Jaegeuk Kim 已提交
367 368 369 370
		/* TODO: use HOT temp only for meta pages now. */
		if (ret || btype == META)
			break;
	}
371 372 373
	return ret;
}

374
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
J
Jaegeuk Kim 已提交
375
				enum page_type type, enum temp_type temp)
376 377
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
J
Jaegeuk Kim 已提交
378
	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
379

380
	down_write(&io->io_rwsem);
J
Jaegeuk Kim 已提交
381 382 383 384

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
M
Mike Christie 已提交
385
		io->fio.op = REQ_OP_WRITE;
386
		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
387
		if (!test_opt(sbi, NOBARRIER))
388
			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
J
Jaegeuk Kim 已提交
389 390
	}
	__submit_merged_bio(io);
391
	up_write(&io->io_rwsem);
392 393
}

J
Jaegeuk Kim 已提交
394 395 396
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
				struct inode *inode, nid_t ino, pgoff_t idx,
				enum page_type type, bool force)
397
{
J
Jaegeuk Kim 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410
	enum temp_type temp;

	if (!force && !has_merged_page(sbi, inode, ino, idx, type))
		return;

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {

		__f2fs_submit_merged_write(sbi, type, temp);

		/* TODO: use HOT temp only for meta pages now. */
		if (type >= META)
			break;
	}
411 412
}

413
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
414
{
J
Jaegeuk Kim 已提交
415
	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
416 417
}

418
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
419
				struct inode *inode, nid_t ino, pgoff_t idx,
420
				enum page_type type)
421
{
J
Jaegeuk Kim 已提交
422
	__submit_merged_write_cond(sbi, inode, ino, idx, type, false);
423 424
}

425
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
426
{
427 428 429
	f2fs_submit_merged_write(sbi, DATA);
	f2fs_submit_merged_write(sbi, NODE);
	f2fs_submit_merged_write(sbi, META);
430 431
}

432 433
/*
 * Fill the locked page with data located in the block address.
434
 * A caller needs to unlock the page on failure.
435
 */
436
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
437 438
{
	struct bio *bio;
439 440
	struct page *page = fio->encrypted_page ?
			fio->encrypted_page : fio->page;
441

442
	verify_block_addr(fio, fio->new_blkaddr);
443
	trace_f2fs_submit_page_bio(page, fio);
444
	f2fs_trace_ios(fio, 0);
445 446

	/* Allocate a new bio */
447
	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
448
				1, is_read_io(fio->op), fio->type, fio->temp);
449

450
	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
451 452 453
		bio_put(bio);
		return -EFAULT;
	}
M
Mike Christie 已提交
454
	bio_set_op_attrs(bio, fio->op, fio->op_flags);
455

456
	__submit_bio(fio->sbi, bio, fio->type);
457 458 459

	if (!is_read_io(fio->op))
		inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
460 461 462
	return 0;
}

463
int f2fs_submit_page_write(struct f2fs_io_info *fio)
464
{
465
	struct f2fs_sb_info *sbi = fio->sbi;
J
Jaegeuk Kim 已提交
466
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
J
Jaegeuk Kim 已提交
467
	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
468
	struct page *bio_page;
469
	int err = 0;
470

471
	f2fs_bug_on(sbi, is_read_io(fio->op));
472

473 474 475 476 477 478 479 480 481 482 483 484 485
	down_write(&io->io_rwsem);
next:
	if (fio->in_list) {
		spin_lock(&io->io_lock);
		if (list_empty(&io->io_list)) {
			spin_unlock(&io->io_lock);
			goto out_fail;
		}
		fio = list_first_entry(&io->io_list,
						struct f2fs_io_info, list);
		list_del(&fio->list);
		spin_unlock(&io->io_lock);
	}
486

487
	if (fio->old_blkaddr != NEW_ADDR)
488 489
		verify_block_addr(fio, fio->old_blkaddr);
	verify_block_addr(fio, fio->new_blkaddr);
490

491 492
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;

493 494
	/* set submitted = true as a return value */
	fio->submitted = true;
495

496
	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
497

498
	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
J
Jaegeuk Kim 已提交
499 500
	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
J
Jaegeuk Kim 已提交
501
		__submit_merged_bio(io);
502 503
alloc_new:
	if (io->bio == NULL) {
504 505 506
		if ((fio->type == DATA || fio->type == NODE) &&
				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
			err = -EAGAIN;
507
			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
508 509
			goto out_fail;
		}
510
		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
511 512
						BIO_MAX_PAGES, false,
						fio->type, fio->temp);
J
Jaegeuk Kim 已提交
513
		io->fio = *fio;
514 515
	}

J
Jaegeuk Kim 已提交
516
	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
J
Jaegeuk Kim 已提交
517
		__submit_merged_bio(io);
518 519 520
		goto alloc_new;
	}

521 522 523
	if (fio->io_wbc)
		wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);

524
	io->last_block_in_bio = fio->new_blkaddr;
525
	f2fs_trace_ios(fio, 0);
526 527 528 529 530

	trace_f2fs_submit_page_write(fio->page, fio);

	if (fio->in_list)
		goto next;
531
out_fail:
532
	up_write(&io->io_rwsem);
533
	return err;
534 535
}

536 537 538 539 540
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
							 unsigned nr_pages)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct bio *bio;
541 542
	struct bio_post_read_ctx *ctx;
	unsigned int post_read_steps = 0;
543

544
	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
545
	if (!bio)
546 547 548 549 550
		return ERR_PTR(-ENOMEM);
	f2fs_target_device(sbi, blkaddr, bio);
	bio->bi_end_io = f2fs_read_end_io;
	bio_set_op_attrs(bio, REQ_OP_READ, 0);

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	if (f2fs_encrypted_file(inode))
		post_read_steps |= 1 << STEP_DECRYPT;
	if (post_read_steps) {
		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
		if (!ctx) {
			bio_put(bio);
			return ERR_PTR(-ENOMEM);
		}
		ctx->bio = bio;
		ctx->enabled_steps = post_read_steps;
		bio->bi_private = ctx;

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_block_writeback(sbi, blkaddr);
	}

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
	return bio;
}

/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
							block_t blkaddr)
{
	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}
	__submit_bio(F2FS_I_SB(inode), bio, DATA);
	return 0;
}

587 588 589 590
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;
591 592 593 594
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);
595 596 597

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
598
	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
599 600
}

J
Jaegeuk Kim 已提交
601
/*
602 603 604 605 606
 * Lock ordering for the change of data block address:
 * ->data_page
 *  ->node_page
 *    update block addresses in the node page
 */
607
void set_data_blkaddr(struct dnode_of_data *dn)
608
{
609 610 611
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
612
		dn->node_changed = true;
613 614
}

615 616 617 618 619 620 621
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
}

622 623
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
624
{
625
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
C
Chao Yu 已提交
626
	int err;
627

628 629 630
	if (!count)
		return 0;

631
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
632
		return -EPERM;
C
Chao Yu 已提交
633 634
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
635

636 637 638 639 640 641
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
642 643
		block_t blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
644 645 646 647 648 649 650 651 652
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
			count--;
		}
	}

	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
653 654 655
	return 0;
}

656 657 658 659 660 661 662 663 664 665 666
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
	unsigned int ofs_in_node = dn->ofs_in_node;
	int ret;

	ret = reserve_new_blocks(dn, 1);
	dn->ofs_in_node = ofs_in_node;
	return ret;
}

667 668 669 670 671 672 673 674
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
	bool need_put = dn->inode_page ? false : true;
	int err;

	err = get_dnode_of_data(dn, index, ALLOC_NODE);
	if (err)
		return err;
675

676 677
	if (dn->data_blkaddr == NULL_ADDR)
		err = reserve_new_block(dn);
678
	if (err || need_put)
679 680 681 682
		f2fs_put_dnode(dn);
	return err;
}

683
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
684
{
685
	struct extent_info ei  = {0,0,0};
686
	struct inode *inode = dn->inode;
687

688 689 690
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
691
	}
692

693
	return f2fs_reserve_block(dn, index);
694 695
}

696
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
M
Mike Christie 已提交
697
						int op_flags, bool for_write)
698 699 700 701
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
	struct page *page;
702
	struct extent_info ei = {0,0,0};
703
	int err;
704

705
	page = f2fs_grab_cache_page(mapping, index, for_write);
706 707 708
	if (!page)
		return ERR_PTR(-ENOMEM);

C
Chao Yu 已提交
709 710 711 712 713
	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		goto got_it;
	}

714
	set_new_dnode(&dn, inode, NULL, NULL, 0);
715
	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
716 717
	if (err)
		goto put_err;
718 719
	f2fs_put_dnode(&dn);

720
	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
721 722
		err = -ENOENT;
		goto put_err;
723
	}
C
Chao Yu 已提交
724
got_it:
725 726
	if (PageUptodate(page)) {
		unlock_page(page);
727
		return page;
728
	}
729

J
Jaegeuk Kim 已提交
730 731 732 733 734 735 736
	/*
	 * A new dentry page is allocated but not able to be written, since its
	 * new inode page couldn't be allocated due to -ENOSPC.
	 * In such the case, its blkaddr can be remained as NEW_ADDR.
	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
	 */
	if (dn.data_blkaddr == NEW_ADDR) {
737
		zero_user_segment(page, 0, PAGE_SIZE);
738 739
		if (!PageUptodate(page))
			SetPageUptodate(page);
740
		unlock_page(page);
J
Jaegeuk Kim 已提交
741 742
		return page;
	}
743

744
	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
745
	if (err)
746
		goto put_err;
747
	return page;
748 749 750 751

put_err:
	f2fs_put_page(page, 1);
	return ERR_PTR(err);
752 753 754 755 756 757 758 759 760 761 762 763
}

struct page *find_data_page(struct inode *inode, pgoff_t index)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	page = find_get_page(mapping, index);
	if (page && PageUptodate(page))
		return page;
	f2fs_put_page(page, 0);

764
	page = get_read_data_page(inode, index, 0, false);
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
	if (IS_ERR(page))
		return page;

	if (PageUptodate(page))
		return page;

	wait_on_page_locked(page);
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 0);
		return ERR_PTR(-EIO);
	}
	return page;
}

/*
 * If it tries to access a hole, return an error.
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
784 785
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
786 787 788 789
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
790
	page = get_read_data_page(inode, index, 0, for_write);
791 792
	if (IS_ERR(page))
		return page;
793

794
	/* wait for read completion */
795
	lock_page(page);
796
	if (unlikely(page->mapping != mapping)) {
797 798
		f2fs_put_page(page, 1);
		goto repeat;
799
	}
800 801 802 803
	if (unlikely(!PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
804 805 806
	return page;
}

J
Jaegeuk Kim 已提交
807
/*
808 809
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
810
 *
C
Chao Yu 已提交
811 812
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
813 814
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
815
 */
816
struct page *get_new_data_page(struct inode *inode,
817
		struct page *ipage, pgoff_t index, bool new_i_size)
818 819 820 821 822
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct dnode_of_data dn;
	int err;
823

824
	page = f2fs_grab_cache_page(mapping, index, true);
825 826 827 828 829 830
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
		 * if any error occur.
		 */
		f2fs_put_page(ipage, 1);
831
		return ERR_PTR(-ENOMEM);
832
	}
833

834
	set_new_dnode(&dn, inode, ipage, NULL, 0);
835
	err = f2fs_reserve_block(&dn, index);
836 837
	if (err) {
		f2fs_put_page(page, 1);
838
		return ERR_PTR(err);
839
	}
840 841
	if (!ipage)
		f2fs_put_dnode(&dn);
842 843

	if (PageUptodate(page))
844
		goto got_it;
845 846

	if (dn.data_blkaddr == NEW_ADDR) {
847
		zero_user_segment(page, 0, PAGE_SIZE);
848 849
		if (!PageUptodate(page))
			SetPageUptodate(page);
850
	} else {
851
		f2fs_put_page(page, 1);
852

853 854 855
		/* if ipage exists, blkaddr should be NEW_ADDR */
		f2fs_bug_on(F2FS_I_SB(inode), ipage);
		page = get_lock_data_page(inode, index, true);
856
		if (IS_ERR(page))
857
			return page;
858
	}
859
got_it:
C
Chao Yu 已提交
860
	if (new_i_size && i_size_read(inode) <
861
				((loff_t)(index + 1) << PAGE_SHIFT))
862
		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
863 864 865
	return page;
}

866
static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
867
{
868
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
869 870
	struct f2fs_summary sum;
	struct node_info ni;
871
	pgoff_t fofs;
872
	blkcnt_t count = 1;
C
Chao Yu 已提交
873
	int err;
874

875
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
876
		return -EPERM;
877

878 879
	dn->data_blkaddr = datablock_addr(dn->inode,
				dn->node_page, dn->ofs_in_node);
880 881 882
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

C
Chao Yu 已提交
883 884
	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
		return err;
885

886
alloc:
887 888 889
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

890
	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
891
					&sum, seg_type, NULL, false);
892
	set_data_blkaddr(dn);
893

894
	/* update i_size */
895
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
896
							dn->ofs_in_node;
897
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
898
		f2fs_i_size_write(dn->inode,
899
				((loff_t)(fofs + 1) << PAGE_SHIFT));
900 901 902
	return 0;
}

903
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
904
{
905
	struct inode *inode = file_inode(iocb->ki_filp);
C
Chao Yu 已提交
906
	struct f2fs_map_blocks map;
907
	int flag;
908
	int err = 0;
909
	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
910

911
	/* convert inline data for Direct I/O*/
912
	if (direct_io) {
913 914 915 916 917
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
	}

918 919 920
	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

921
	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
922 923 924 925 926 927
	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
	if (map.m_len > map.m_lblk)
		map.m_len -= map.m_lblk;
	else
		map.m_len = 0;

928
	map.m_next_pgofs = NULL;
929
	map.m_next_extent = NULL;
930
	map.m_seg_type = NO_CHECK_TYPE;
931

932
	if (direct_io) {
933
		map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint);
H
Hyunchul Lee 已提交
934
		flag = f2fs_force_buffered_io(inode, WRITE) ?
935 936 937
					F2FS_GET_BLOCK_PRE_AIO :
					F2FS_GET_BLOCK_PRE_DIO;
		goto map_blocks;
938
	}
C
Chao Yu 已提交
939
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
940 941 942
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
943
	}
944
	if (f2fs_has_inline_data(inode))
945
		return err;
946 947 948 949 950 951 952 953 954

	flag = F2FS_GET_BLOCK_PRE_AIO;

map_blocks:
	err = f2fs_map_blocks(inode, &map, 1, flag);
	if (map.m_len > 0 && err == -ENOSPC) {
		if (!direct_io)
			set_inode_flag(inode, FI_NO_PREALLOC);
		err = 0;
955
	}
956
	return err;
957 958
}

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
		if (lock)
			down_read(&sbi->node_change);
		else
			up_read(&sbi->node_change);
	} else {
		if (lock)
			f2fs_lock_op(sbi);
		else
			f2fs_unlock_op(sbi);
	}
}

J
Jaegeuk Kim 已提交
974
/*
J
Jaegeuk Kim 已提交
975 976
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
C
Chao Yu 已提交
977 978 979 980 981
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
982
 */
C
Chao Yu 已提交
983
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
C
Chao Yu 已提交
984
						int create, int flag)
985
{
J
Jaegeuk Kim 已提交
986
	unsigned int maxblocks = map->m_len;
987
	struct dnode_of_data dn;
988
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
989
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
990
	pgoff_t pgofs, end_offset, end;
991
	int err = 0, ofs = 1;
992 993
	unsigned int ofs_in_node, last_ofs_in_node;
	blkcnt_t prealloc;
994
	struct extent_info ei = {0,0,0};
995
	block_t blkaddr;
996
	unsigned int start_pgofs;
997

998 999 1000
	if (!maxblocks)
		return 0;

J
Jaegeuk Kim 已提交
1001 1002 1003 1004 1005
	map->m_len = 0;
	map->m_flags = 0;

	/* it only supports block size == page size */
	pgofs =	(pgoff_t)map->m_lblk;
1006
	end = pgofs + maxblocks;
1007

1008
	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
J
Jaegeuk Kim 已提交
1009 1010 1011
		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
1012 1013
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + map->m_len;
1014
		goto out;
1015
	}
1016

C
Chao Yu 已提交
1017
next_dnode:
1018
	if (create)
1019
		__do_map_lock(sbi, flag, true);
1020 1021 1022

	/* When reading holes, we need its node page */
	set_new_dnode(&dn, inode, NULL, NULL, 0);
1023
	err = get_dnode_of_data(&dn, pgofs, mode);
1024
	if (err) {
C
Chao Yu 已提交
1025 1026
		if (flag == F2FS_GET_BLOCK_BMAP)
			map->m_pblk = 0;
1027
		if (err == -ENOENT) {
1028
			err = 0;
1029 1030 1031
			if (map->m_next_pgofs)
				*map->m_next_pgofs =
					get_next_page_offset(&dn, pgofs);
1032 1033 1034
			if (map->m_next_extent)
				*map->m_next_extent =
					get_next_page_offset(&dn, pgofs);
1035
		}
1036
		goto unlock_out;
1037
	}
C
Chao Yu 已提交
1038

1039
	start_pgofs = pgofs;
1040
	prealloc = 0;
1041
	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1042
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
C
Chao Yu 已提交
1043 1044

next_block:
1045
	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
C
Chao Yu 已提交
1046 1047

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
C
Chao Yu 已提交
1048
		if (create) {
1049 1050
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
C
Chao Yu 已提交
1051
				goto sync_out;
1052
			}
1053
			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1054 1055 1056 1057
				if (blkaddr == NULL_ADDR) {
					prealloc++;
					last_ofs_in_node = dn.ofs_in_node;
				}
1058
			} else {
1059 1060
				err = __allocate_data_block(&dn,
							map->m_seg_type);
1061
				if (!err)
1062
					set_inode_flag(inode, FI_APPEND_WRITE);
1063
			}
C
Chao Yu 已提交
1064
			if (err)
C
Chao Yu 已提交
1065
				goto sync_out;
1066
			map->m_flags |= F2FS_MAP_NEW;
C
Chao Yu 已提交
1067
			blkaddr = dn.data_blkaddr;
C
Chao Yu 已提交
1068
		} else {
C
Chao Yu 已提交
1069 1070 1071 1072
			if (flag == F2FS_GET_BLOCK_BMAP) {
				map->m_pblk = 0;
				goto sync_out;
			}
1073 1074
			if (flag == F2FS_GET_BLOCK_PRECACHE)
				goto sync_out;
1075 1076 1077 1078
			if (flag == F2FS_GET_BLOCK_FIEMAP &&
						blkaddr == NULL_ADDR) {
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
C
Chao Yu 已提交
1079
				goto sync_out;
1080
			}
1081 1082 1083 1084
			if (flag != F2FS_GET_BLOCK_FIEMAP) {
				/* for defragment case */
				if (map->m_next_pgofs)
					*map->m_next_pgofs = pgofs + 1;
C
Chao Yu 已提交
1085
				goto sync_out;
1086
			}
C
Chao Yu 已提交
1087 1088
		}
	}
1089

1090 1091 1092
	if (flag == F2FS_GET_BLOCK_PRE_AIO)
		goto skip;

C
Chao Yu 已提交
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	if (map->m_len == 0) {
		/* preallocated unwritten block should be mapped for fiemap. */
		if (blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
		map->m_flags |= F2FS_MAP_MAPPED;

		map->m_pblk = blkaddr;
		map->m_len = 1;
	} else if ((map->m_pblk != NEW_ADDR &&
			blkaddr == (map->m_pblk + ofs)) ||
1103
			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1104
			flag == F2FS_GET_BLOCK_PRE_DIO) {
C
Chao Yu 已提交
1105 1106 1107 1108 1109
		ofs++;
		map->m_len++;
	} else {
		goto sync_out;
	}
1110

1111
skip:
1112 1113 1114
	dn.ofs_in_node++;
	pgofs++;

1115 1116 1117
	/* preallocate blocks in batch for one dnode page */
	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
			(pgofs == end || dn.ofs_in_node == end_offset)) {
1118

1119 1120 1121 1122
		dn.ofs_in_node = ofs_in_node;
		err = reserve_new_blocks(&dn, prealloc);
		if (err)
			goto sync_out;
1123

1124 1125 1126 1127
		map->m_len += dn.ofs_in_node - ofs_in_node;
		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
			err = -ENOSPC;
			goto sync_out;
1128
		}
1129 1130 1131 1132 1133 1134 1135 1136
		dn.ofs_in_node = end_offset;
	}

	if (pgofs >= end)
		goto sync_out;
	else if (dn.ofs_in_node < end_offset)
		goto next_block;

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
	}

1147 1148 1149
	f2fs_put_dnode(&dn);

	if (create) {
1150
		__do_map_lock(sbi, flag, false);
1151
		f2fs_balance_fs(sbi, dn.node_changed);
1152
	}
1153
	goto next_dnode;
1154

1155
sync_out:
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	if (flag == F2FS_GET_BLOCK_PRECACHE) {
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
		if (map->m_next_extent)
			*map->m_next_extent = pgofs + 1;
	}
1167
	f2fs_put_dnode(&dn);
1168
unlock_out:
1169
	if (create) {
1170
		__do_map_lock(sbi, flag, false);
1171
		f2fs_balance_fs(sbi, dn.node_changed);
1172
	}
1173
out:
J
Jaegeuk Kim 已提交
1174
	trace_f2fs_map_blocks(inode, map, err);
1175
	return err;
1176 1177
}

H
Hyunchul Lee 已提交
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
{
	struct f2fs_map_blocks map;
	block_t last_lblk;
	int err;

	if (pos + len > i_size_read(inode))
		return false;

	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
	map.m_next_pgofs = NULL;
	map.m_next_extent = NULL;
	map.m_seg_type = NO_CHECK_TYPE;
	last_lblk = F2FS_BLK_ALIGN(pos + len);

	while (map.m_lblk < last_lblk) {
		map.m_len = last_lblk - map.m_lblk;
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
		if (err || map.m_len == 0)
			return false;
		map.m_lblk += map.m_len;
	}
	return true;
}

J
Jaegeuk Kim 已提交
1203
static int __get_data_block(struct inode *inode, sector_t iblock,
1204
			struct buffer_head *bh, int create, int flag,
1205
			pgoff_t *next_pgofs, int seg_type)
J
Jaegeuk Kim 已提交
1206 1207
{
	struct f2fs_map_blocks map;
1208
	int err;
J
Jaegeuk Kim 已提交
1209 1210 1211

	map.m_lblk = iblock;
	map.m_len = bh->b_size >> inode->i_blkbits;
1212
	map.m_next_pgofs = next_pgofs;
1213
	map.m_next_extent = NULL;
1214
	map.m_seg_type = seg_type;
J
Jaegeuk Kim 已提交
1215

1216 1217
	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
J
Jaegeuk Kim 已提交
1218 1219
		map_bh(bh, inode->i_sb, map.m_pblk);
		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1220
		bh->b_size = (u64)map.m_len << inode->i_blkbits;
J
Jaegeuk Kim 已提交
1221
	}
1222
	return err;
J
Jaegeuk Kim 已提交
1223 1224
}

1225
static int get_data_block(struct inode *inode, sector_t iblock,
1226 1227
			struct buffer_head *bh_result, int create, int flag,
			pgoff_t *next_pgofs)
C
Chao Yu 已提交
1228
{
1229
	return __get_data_block(inode, iblock, bh_result, create,
1230 1231
							flag, next_pgofs,
							NO_CHECK_TYPE);
C
Chao Yu 已提交
1232 1233 1234
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
1235 1236
			struct buffer_head *bh_result, int create)
{
C
Chao Yu 已提交
1237
	return __get_data_block(inode, iblock, bh_result, create,
1238 1239 1240
						F2FS_GET_BLOCK_DEFAULT, NULL,
						rw_hint_to_seg_type(
							inode->i_write_hint));
1241 1242
}

C
Chao Yu 已提交
1243
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1244 1245
			struct buffer_head *bh_result, int create)
{
1246
	/* Block number less than F2FS MAX BLOCKS */
C
Chao Yu 已提交
1247
	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1248 1249
		return -EFBIG;

C
Chao Yu 已提交
1250
	return __get_data_block(inode, iblock, bh_result, create,
1251 1252
						F2FS_GET_BLOCK_BMAP, NULL,
						NO_CHECK_TYPE);
1253 1254
}

1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
	return (offset >> inode->i_blkbits);
}

static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
	return (blk << inode->i_blkbits);
}

C
Chao Yu 已提交
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
static int f2fs_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page;
	struct node_info ni;
	__u64 phys = 0, len;
	__u32 flags;
	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
	int err = 0;

	if (f2fs_has_inline_xattr(inode)) {
		int offset;

		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
						inode->i_ino, false);
		if (!page)
			return -ENOMEM;

		get_node_info(sbi, inode->i_ino, &ni);

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		offset = offsetof(struct f2fs_inode, i_addr) +
					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1289
					get_inline_xattr_addrs(inode));
C
Chao Yu 已提交
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326

		phys += offset;
		len = inline_xattr_size(inode);

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;

		if (!xnid)
			flags |= FIEMAP_EXTENT_LAST;

		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
		if (err || err == 1)
			return err;
	}

	if (xnid) {
		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
		if (!page)
			return -ENOMEM;

		get_node_info(sbi, xnid, &ni);

		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
		len = inode->i_sb->s_blocksize;

		f2fs_put_page(page, 1);

		flags = FIEMAP_EXTENT_LAST;
	}

	if (phys)
		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);

	return (err < 0 ? err : 0);
}

J
Jaegeuk Kim 已提交
1327 1328 1329
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
1330 1331
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
1332
	pgoff_t next_pgofs;
1333 1334 1335 1336
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;

1337 1338 1339 1340 1341 1342
	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
		ret = f2fs_precache_extents(inode);
		if (ret)
			return ret;
	}

C
Chao Yu 已提交
1343
	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
1344 1345 1346
	if (ret)
		return ret;

1347 1348
	inode_lock(inode);

C
Chao Yu 已提交
1349 1350 1351 1352 1353
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		ret = f2fs_xattr_fiemap(inode, fieinfo);
		goto out;
	}

J
Jaegeuk Kim 已提交
1354 1355 1356
	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
1357
			goto out;
J
Jaegeuk Kim 已提交
1358 1359
	}

1360 1361 1362 1363 1364
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);
1365

1366 1367 1368 1369
next:
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

C
Chao Yu 已提交
1370
	ret = get_data_block(inode, start_blk, &map_bh, 0,
1371
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1372 1373 1374 1375 1376
	if (ret)
		goto out;

	/* HOLE */
	if (!buffer_mapped(&map_bh)) {
1377
		start_blk = next_pgofs;
1378 1379 1380

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
1381
			goto prep_next;
1382

1383 1384
		flags |= FIEMAP_EXTENT_LAST;
	}
1385

1386 1387 1388 1389
	if (size) {
		if (f2fs_encrypted_inode(inode))
			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;

1390 1391
		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
1392
	}
1393

1394 1395
	if (start_blk > last_blk || ret)
		goto out;
1396

1397 1398 1399 1400 1401 1402
	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
	flags = 0;
	if (buffer_unwritten(&map_bh))
		flags = FIEMAP_EXTENT_UNWRITTEN;
1403

1404
	start_blk += logical_to_blk(inode, size);
1405

1406
prep_next:
1407 1408 1409 1410 1411 1412 1413 1414 1415
	cond_resched();
	if (fatal_signal_pending(current))
		ret = -EINTR;
	else
		goto next;
out:
	if (ret == 1)
		ret = 0;

A
Al Viro 已提交
1416
	inode_unlock(inode);
1417
	return ret;
J
Jaegeuk Kim 已提交
1418 1419
}

J
Jaegeuk Kim 已提交
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;
1443
	map.m_next_pgofs = NULL;
1444
	map.m_next_extent = NULL;
1445
	map.m_seg_type = NO_CHECK_TYPE;
J
Jaegeuk Kim 已提交
1446

L
LiFan 已提交
1447
	for (; nr_pages; nr_pages--) {
J
Jaegeuk Kim 已提交
1448
		if (pages) {
1449
			page = list_last_entry(pages, struct page, lru);
1450 1451

			prefetchw(&page->flags);
J
Jaegeuk Kim 已提交
1452 1453
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
1454 1455
						  page->index,
						  readahead_gfp_mask(mapping)))
J
Jaegeuk Kim 已提交
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

1484
			if (f2fs_map_blocks(inode, &map, 0,
1485
						F2FS_GET_BLOCK_DEFAULT))
J
Jaegeuk Kim 已提交
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
1498
			zero_user_segment(page, 0, PAGE_SIZE);
1499 1500
			if (!PageUptodate(page))
				SetPageUptodate(page);
J
Jaegeuk Kim 已提交
1501 1502 1503 1504 1505 1506 1507 1508
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
J
Jaegeuk Kim 已提交
1509 1510
		if (bio && (last_block_in_bio != block_nr - 1 ||
			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
J
Jaegeuk Kim 已提交
1511
submit_and_realloc:
1512
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1513 1514 1515
			bio = NULL;
		}
		if (bio == NULL) {
1516
			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
J
Jaegeuk Kim 已提交
1517 1518
			if (IS_ERR(bio)) {
				bio = NULL;
J
Jaegeuk Kim 已提交
1519
				goto set_error_page;
1520
			}
J
Jaegeuk Kim 已提交
1521 1522 1523 1524 1525 1526 1527 1528 1529
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
1530
		zero_user_segment(page, 0, PAGE_SIZE);
J
Jaegeuk Kim 已提交
1531 1532 1533 1534
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
1535
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1536 1537 1538 1539 1540
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
1541
			put_page(page);
J
Jaegeuk Kim 已提交
1542 1543 1544
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
1545
		__submit_bio(F2FS_I_SB(inode), bio, DATA);
J
Jaegeuk Kim 已提交
1546 1547 1548
	return 0;
}

1549 1550
static int f2fs_read_data_page(struct file *file, struct page *page)
{
H
Huajun Li 已提交
1551
	struct inode *inode = page->mapping->host;
1552
	int ret = -EAGAIN;
H
Huajun Li 已提交
1553

1554 1555
	trace_f2fs_readpage(page, DATA);

A
arter97 已提交
1556
	/* If the file has inline data, try to read it directly */
H
Huajun Li 已提交
1557 1558
	if (f2fs_has_inline_data(inode))
		ret = f2fs_read_inline_data(inode, page);
1559
	if (ret == -EAGAIN)
J
Jaegeuk Kim 已提交
1560
		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
H
Huajun Li 已提交
1561
	return ret;
1562 1563 1564 1565 1566 1567
}

static int f2fs_read_data_pages(struct file *file,
			struct address_space *mapping,
			struct list_head *pages, unsigned nr_pages)
{
1568
	struct inode *inode = mapping->host;
1569
	struct page *page = list_last_entry(pages, struct page, lru);
1570 1571

	trace_f2fs_readpages(inode, page, nr_pages);
H
Huajun Li 已提交
1572 1573 1574 1575 1576

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
		return 0;

J
Jaegeuk Kim 已提交
1577
	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1578 1579
}

1580 1581 1582 1583 1584
static int encrypt_one_page(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;
	gfp_t gfp_flags = GFP_NOFS;

1585
	if (!f2fs_encrypted_file(inode))
1586 1587
		return 0;

1588
	/* wait for GCed page writeback via META_MAPPING */
1589
	f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
1590 1591 1592 1593 1594 1595 1596 1597 1598

retry_encrypt:
	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
			PAGE_SIZE, 0, fio->page->index, gfp_flags);
	if (!IS_ERR(fio->encrypted_page))
		return 0;

	/* flush pending IOs and wait for a while in the ENOMEM case */
	if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1599
		f2fs_flush_merged_writes(fio->sbi);
1600 1601 1602 1603 1604 1605 1606
		congestion_wait(BLK_RW_ASYNC, HZ/50);
		gfp_flags |= __GFP_NOFAIL;
		goto retry_encrypt;
	}
	return PTR_ERR(fio->encrypted_page);
}

C
Chao Yu 已提交
1607 1608
static inline bool check_inplace_update_policy(struct inode *inode,
				struct f2fs_io_info *fio)
1609
{
C
Chao Yu 已提交
1610 1611
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	unsigned int policy = SM_I(sbi)->ipu_policy;
1612

C
Chao Yu 已提交
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
	if (policy & (0x1 << F2FS_IPU_FORCE))
		return true;
	if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
		return true;
	if (policy & (0x1 << F2FS_IPU_UTIL) &&
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;
	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
			utilization(sbi) > SM_I(sbi)->min_ipu_util)
		return true;

	/*
	 * IPU for rewrite async pages
	 */
	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
			fio && fio->op == REQ_OP_WRITE &&
			!(fio->op_flags & REQ_SYNC) &&
			!f2fs_encrypted_inode(inode))
		return true;

	/* this is only set during fdatasync */
	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
			is_inode_flag_set(inode, FI_NEED_IPU))
		return true;

	return false;
}

bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
{
1643 1644
	if (f2fs_is_pinned_file(inode))
		return true;
C
Chao Yu 已提交
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671

	/* if this is cold file, we should overwrite to avoid fragmentation */
	if (file_is_cold(inode))
		return true;

	return check_inplace_update_policy(inode, fio);
}

bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	if (test_opt(sbi, LFS))
		return true;
	if (S_ISDIR(inode->i_mode))
		return true;
	if (f2fs_is_atomic_file(inode))
		return true;
	if (fio) {
		if (is_cold_data(fio->page))
			return true;
		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
			return true;
	}
	return false;
}

1672 1673 1674 1675
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
	struct inode *inode = fio->page->mapping->host;

C
Chao Yu 已提交
1676
	if (should_update_outplace(inode, fio))
1677 1678
		return false;

C
Chao Yu 已提交
1679
	return should_update_inplace(inode, fio);
1680 1681
}

1682 1683 1684 1685 1686 1687 1688 1689 1690
static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
{
	if (fio->old_blkaddr == NEW_ADDR)
		return false;
	if (fio->old_blkaddr == NULL_ADDR)
		return false;
	return true;
}

1691
int do_write_data_page(struct f2fs_io_info *fio)
1692
{
1693
	struct page *page = fio->page;
1694 1695
	struct inode *inode = page->mapping->host;
	struct dnode_of_data dn;
1696 1697
	struct extent_info ei = {0,0,0};
	bool ipu_force = false;
1698 1699 1700
	int err = 0;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
1701 1702 1703
	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1704 1705

		if (valid_ipu_blkaddr(fio)) {
1706
			ipu_force = true;
1707
			fio->need_lock = LOCK_DONE;
1708 1709 1710
			goto got_it;
		}
	}
1711

1712 1713 1714
	/* Deadlock due to between page->lock and f2fs_lock_op */
	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
		return -EAGAIN;
1715

1716
	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1717
	if (err)
1718
		goto out;
1719

1720
	fio->old_blkaddr = dn.data_blkaddr;
1721 1722

	/* This page is already truncated */
1723
	if (fio->old_blkaddr == NULL_ADDR) {
1724
		ClearPageUptodate(page);
1725
		goto out_writepage;
1726
	}
1727
got_it:
1728 1729 1730 1731
	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
1732
	if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
1733 1734 1735 1736 1737
		err = encrypt_one_page(fio);
		if (err)
			goto out_writepage;

		set_page_writeback(page);
J
Jaegeuk Kim 已提交
1738
		ClearPageError(page);
1739
		f2fs_put_dnode(&dn);
1740
		if (fio->need_lock == LOCK_REQ)
1741
			f2fs_unlock_op(fio->sbi);
1742
		err = rewrite_data_page(fio);
1743
		trace_f2fs_do_write_data_page(fio->page, IPU);
1744
		set_inode_flag(inode, FI_UPDATE_WRITE);
1745
		return err;
1746
	}
1747

1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
	if (fio->need_lock == LOCK_RETRY) {
		if (!f2fs_trylock_op(fio->sbi)) {
			err = -EAGAIN;
			goto out_writepage;
		}
		fio->need_lock = LOCK_REQ;
	}

	err = encrypt_one_page(fio);
	if (err)
		goto out_writepage;

	set_page_writeback(page);
J
Jaegeuk Kim 已提交
1761
	ClearPageError(page);
1762

1763 1764 1765 1766 1767 1768
	/* LFS mode write path */
	write_data_page(&dn, fio);
	trace_f2fs_do_write_data_page(page, OPU);
	set_inode_flag(inode, FI_APPEND_WRITE);
	if (page->index == 0)
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1769 1770
out_writepage:
	f2fs_put_dnode(&dn);
1771
out:
1772
	if (fio->need_lock == LOCK_REQ)
1773
		f2fs_unlock_op(fio->sbi);
1774 1775 1776
	return err;
}

1777
static int __write_data_page(struct page *page, bool *submitted,
C
Chao Yu 已提交
1778 1779
				struct writeback_control *wbc,
				enum iostat_type io_type)
1780 1781
{
	struct inode *inode = page->mapping->host;
1782
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1783 1784
	loff_t i_size = i_size_read(inode);
	const pgoff_t end_index = ((unsigned long long) i_size)
1785
							>> PAGE_SHIFT;
1786
	loff_t psize = (page->index + 1) << PAGE_SHIFT;
H
Huajun Li 已提交
1787
	unsigned offset = 0;
1788
	bool need_balance_fs = false;
1789
	int err = 0;
J
Jaegeuk Kim 已提交
1790
	struct f2fs_io_info fio = {
1791
		.sbi = sbi,
C
Chao Yu 已提交
1792
		.ino = inode->i_ino,
J
Jaegeuk Kim 已提交
1793
		.type = DATA,
M
Mike Christie 已提交
1794
		.op = REQ_OP_WRITE,
J
Jens Axboe 已提交
1795
		.op_flags = wbc_to_write_flags(wbc),
1796
		.old_blkaddr = NULL_ADDR,
1797
		.page = page,
1798
		.encrypted_page = NULL,
1799
		.submitted = false,
1800
		.need_lock = LOCK_RETRY,
C
Chao Yu 已提交
1801
		.io_type = io_type,
1802
		.io_wbc = wbc,
J
Jaegeuk Kim 已提交
1803
	};
1804

1805 1806
	trace_f2fs_writepage(page, DATA);

1807 1808 1809 1810 1811 1812
	/* we should bypass data pages to proceed the kworkder jobs */
	if (unlikely(f2fs_cp_error(sbi))) {
		mapping_set_error(page->mapping, -EIO);
		goto out;
	}

1813 1814 1815
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;

1816
	if (page->index < end_index)
1817
		goto write;
1818 1819 1820 1821 1822

	/*
	 * If the offset is out-of-range of file size,
	 * this page does not have to be written to disk.
	 */
1823
	offset = i_size & (PAGE_SIZE - 1);
1824
	if ((page->index >= end_index + 1) || !offset)
1825
		goto out;
1826

1827
	zero_user_segment(page, offset, PAGE_SIZE);
1828
write:
1829 1830
	if (f2fs_is_drop_cache(inode))
		goto out;
1831 1832 1833 1834
	/* we should not write 0'th page having journal header */
	if (f2fs_is_volatile_file(inode) && (!page->index ||
			(!wbc->for_reclaim &&
			available_free_memory(sbi, BASE_CHECK))))
1835
		goto redirty_out;
1836

1837
	/* Dentry blocks are controlled by checkpoint */
1838
	if (S_ISDIR(inode->i_mode)) {
1839
		fio.need_lock = LOCK_DONE;
1840
		err = do_write_data_page(&fio);
1841 1842
		goto done;
	}
H
Huajun Li 已提交
1843

1844
	if (!wbc->for_reclaim)
1845
		need_balance_fs = true;
1846
	else if (has_not_enough_free_secs(sbi, 0, 0))
1847
		goto redirty_out;
1848 1849
	else
		set_inode_flag(inode, FI_HOT_DATA);
1850

1851
	err = -EAGAIN;
1852
	if (f2fs_has_inline_data(inode)) {
1853
		err = f2fs_write_inline_data(inode, page);
1854 1855 1856
		if (!err)
			goto out;
	}
1857

1858
	if (err == -EAGAIN) {
1859
		err = do_write_data_page(&fio);
1860 1861 1862 1863 1864
		if (err == -EAGAIN) {
			fio.need_lock = LOCK_REQ;
			err = do_write_data_page(&fio);
		}
	}
1865

1866 1867 1868 1869 1870 1871 1872 1873
	if (err) {
		file_set_keep_isize(inode);
	} else {
		down_write(&F2FS_I(inode)->i_sem);
		if (F2FS_I(inode)->last_disk_size < psize)
			F2FS_I(inode)->last_disk_size = psize;
		up_write(&F2FS_I(inode)->i_sem);
	}
1874

1875 1876 1877
done:
	if (err && err != -ENOENT)
		goto redirty_out;
1878

1879
out:
1880
	inode_dec_dirty_pages(inode);
1881 1882
	if (err)
		ClearPageUptodate(page);
1883 1884

	if (wbc->for_reclaim) {
1885
		f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
1886
		clear_inode_flag(inode, FI_HOT_DATA);
1887
		remove_dirty_inode(inode);
1888
		submitted = NULL;
1889 1890
	}

1891
	unlock_page(page);
J
Jaegeuk Kim 已提交
1892 1893
	if (!S_ISDIR(inode->i_mode))
		f2fs_balance_fs(sbi, need_balance_fs);
1894

1895
	if (unlikely(f2fs_cp_error(sbi))) {
1896
		f2fs_submit_merged_write(sbi, DATA);
1897 1898 1899 1900 1901
		submitted = NULL;
	}

	if (submitted)
		*submitted = fio.submitted;
1902

1903 1904 1905
	return 0;

redirty_out:
1906
	redirty_page_for_writepage(wbc, page);
1907 1908 1909 1910 1911 1912 1913
	/*
	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
	 * file_write_and_wait_range() will see EIO error, which is critical
	 * to return value of fsync() followed by atomic_write failure to user.
	 */
	if (!err || wbc->for_reclaim)
1914
		return AOP_WRITEPAGE_ACTIVATE;
J
Jaegeuk Kim 已提交
1915 1916
	unlock_page(page);
	return err;
1917 1918
}

1919 1920 1921
static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
C
Chao Yu 已提交
1922
	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
1923 1924
}

C
Chao Yu 已提交
1925 1926 1927 1928 1929 1930
/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
C
Chao Yu 已提交
1931 1932
					struct writeback_control *wbc,
					enum iostat_type io_type)
C
Chao Yu 已提交
1933 1934 1935 1936 1937 1938 1939 1940 1941
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
1942
	pgoff_t last_idx = ULONG_MAX;
C
Chao Yu 已提交
1943 1944 1945 1946
	int cycled;
	int range_whole = 0;
	int tag;

1947
	pagevec_init(&pvec);
1948

1949 1950 1951 1952 1953 1954
	if (get_dirty_pages(mapping->host) <=
				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
		set_inode_flag(mapping->host, FI_HOT_DATA);
	else
		clear_inode_flag(mapping->host, FI_HOT_DATA);

C
Chao Yu 已提交
1955 1956 1957 1958 1959 1960 1961 1962 1963
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
1964 1965
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
C
Chao Yu 已提交
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
retry:
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && (index <= end)) {
		int i;

J
Jan Kara 已提交
1981
		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
1982
				tag);
C
Chao Yu 已提交
1983 1984 1985 1986 1987
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
1988
			bool submitted = false;
C
Chao Yu 已提交
1989 1990

			done_index = page->index;
1991
retry_write:
C
Chao Yu 已提交
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}

			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
2007 2008
					f2fs_wait_on_page_writeback(page,
								DATA, true);
C
Chao Yu 已提交
2009 2010 2011 2012 2013 2014 2015 2016
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

C
Chao Yu 已提交
2017
			ret = __write_data_page(page, &submitted, wbc, io_type);
C
Chao Yu 已提交
2018
			if (unlikely(ret)) {
2019 2020 2021 2022 2023 2024 2025 2026
				/*
				 * keep nr_to_write, since vfs uses this to
				 * get # of written pages.
				 */
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
					continue;
2027 2028 2029 2030 2031 2032 2033 2034 2035
				} else if (ret == -EAGAIN) {
					ret = 0;
					if (wbc->sync_mode == WB_SYNC_ALL) {
						cond_resched();
						congestion_wait(BLK_RW_ASYNC,
									HZ/50);
						goto retry_write;
					}
					continue;
2036
				}
J
Jaegeuk Kim 已提交
2037 2038 2039
				done_index = page->index + 1;
				done = 1;
				break;
2040
			} else if (submitted) {
2041
				last_idx = page->index;
C
Chao Yu 已提交
2042 2043
			}

2044 2045 2046 2047
			/* give a priority to WB_SYNC threads */
			if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
					--wbc->nr_to_write <= 0) &&
					wbc->sync_mode == WB_SYNC_NONE) {
C
Chao Yu 已提交
2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064
				done = 1;
				break;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (!cycled && !done) {
		cycled = 1;
		index = 0;
		end = writeback_index - 1;
		goto retry;
	}
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

2065
	if (last_idx != ULONG_MAX)
2066 2067
		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
						0, last_idx, DATA);
C
Chao Yu 已提交
2068

C
Chao Yu 已提交
2069 2070 2071
	return ret;
}

C
Chao Yu 已提交
2072 2073 2074
int __f2fs_write_data_pages(struct address_space *mapping,
						struct writeback_control *wbc,
						enum iostat_type io_type)
2075 2076
{
	struct inode *inode = mapping->host;
2077
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2078
	struct blk_plug plug;
2079 2080
	int ret;

P
P J P 已提交
2081 2082 2083 2084
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

2085 2086 2087 2088
	/* skip writing if there is no dirty page in this inode */
	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
		return 0;

2089 2090 2091 2092
	/* during POR, we don't need to trigger writepage at all. */
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto skip_write;

2093 2094 2095 2096 2097
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
			available_free_memory(sbi, DIRTY_DENTS))
		goto skip_write;

C
Chao Yu 已提交
2098
	/* skip writing during file defragment */
2099
	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
C
Chao Yu 已提交
2100 2101
		goto skip_write;

Y
Yunlei He 已提交
2102 2103
	trace_f2fs_writepages(mapping->host, wbc, DATA);

2104 2105 2106 2107 2108 2109
	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_inc(&sbi->wb_sync_req);
	else if (atomic_read(&sbi->wb_sync_req))
		goto skip_write;

2110
	blk_start_plug(&plug);
C
Chao Yu 已提交
2111
	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
2112
	blk_finish_plug(&plug);
2113 2114 2115

	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_dec(&sbi->wb_sync_req);
2116 2117 2118 2119
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
	 */
J
Jaegeuk Kim 已提交
2120

2121
	remove_dirty_inode(inode);
2122
	return ret;
2123 2124

skip_write:
2125
	wbc->pages_skipped += get_dirty_pages(inode);
Y
Yunlei He 已提交
2126
	trace_f2fs_writepages(mapping->host, wbc, DATA);
2127
	return 0;
2128 2129
}

C
Chao Yu 已提交
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
static int f2fs_write_data_pages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;

	return __f2fs_write_data_pages(mapping, wbc,
			F2FS_I(inode)->cp_task == current ?
			FS_CP_DATA_IO : FS_DATA_IO);
}

2140 2141 2142
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
J
Jaegeuk Kim 已提交
2143
	loff_t i_size = i_size_read(inode);
2144

J
Jaegeuk Kim 已提交
2145
	if (to > i_size) {
2146
		down_write(&F2FS_I(inode)->i_mmap_sem);
J
Jaegeuk Kim 已提交
2147 2148
		truncate_pagecache(inode, i_size);
		truncate_blocks(inode, i_size, true);
2149
		up_write(&F2FS_I(inode)->i_mmap_sem);
2150 2151 2152
	}
}

2153 2154 2155 2156 2157 2158 2159 2160
static int prepare_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	pgoff_t index = page->index;
	struct dnode_of_data dn;
	struct page *ipage;
2161
	bool locked = false;
2162
	struct extent_info ei = {0,0,0};
2163 2164
	int err = 0;

2165 2166 2167 2168
	/*
	 * we already allocated all the blocks, so we don't need to get
	 * the block addresses when there is no need to fill the page.
	 */
2169 2170
	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
			!is_inode_flag_set(inode, FI_NO_PREALLOC))
2171 2172
		return 0;

2173
	if (f2fs_has_inline_data(inode) ||
2174
			(pos & PAGE_MASK) >= i_size_read(inode)) {
2175
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
2176 2177 2178
		locked = true;
	}
restart:
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188
	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
C
Chao Yu 已提交
2189
		if (pos + len <= MAX_INLINE_DATA(inode)) {
2190
			read_inline_data(page, ipage);
2191
			set_inode_flag(inode, FI_DATA_EXIST);
2192 2193
			if (inode->i_nlink)
				set_inline_node(ipage);
2194 2195 2196
		} else {
			err = f2fs_convert_inline_page(&dn, page);
			if (err)
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208
				goto out;
			if (dn.data_blkaddr == NULL_ADDR)
				err = f2fs_get_block(&dn, index);
		}
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
2209
			if (err || dn.data_blkaddr == NULL_ADDR) {
2210
				f2fs_put_dnode(&dn);
2211 2212
				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
								true);
2213 2214 2215
				locked = true;
				goto restart;
			}
2216 2217
		}
	}
2218

2219 2220 2221
	/* convert_inline_page can make node_changed */
	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
2222
out:
2223 2224
	f2fs_put_dnode(&dn);
unlock_out:
2225
	if (locked)
2226
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
2227 2228 2229
	return err;
}

2230 2231 2232 2233 2234
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
2235
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2236
	struct page *page = NULL;
2237
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2238
	bool need_balance = false, drop_atomic = false;
2239
	block_t blkaddr = NULL_ADDR;
2240 2241
	int err = 0;

2242 2243
	trace_f2fs_write_begin(inode, pos, len, flags);

J
Jaegeuk Kim 已提交
2244 2245 2246
	if (f2fs_is_atomic_file(inode) &&
			!available_free_memory(sbi, INMEM_PAGES)) {
		err = -ENOMEM;
2247
		drop_atomic = true;
J
Jaegeuk Kim 已提交
2248 2249 2250
		goto fail;
	}

2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
2261
repeat:
2262 2263 2264 2265
	/*
	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
	 * wait_for_stable_page. Will wait that below with our IO control.
	 */
C
Chao Yu 已提交
2266
	page = f2fs_pagecache_get_page(mapping, index,
2267
				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
2268 2269 2270 2271
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}
2272

2273 2274
	*pagep = page;

2275 2276
	err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
2277
	if (err)
2278
		goto fail;
2279

2280
	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
2281
		unlock_page(page);
J
Jaegeuk Kim 已提交
2282
		f2fs_balance_fs(sbi, true);
2283 2284 2285 2286 2287 2288 2289 2290
		lock_page(page);
		if (page->mapping != mapping) {
			/* The page got truncated from under us */
			f2fs_put_page(page, 1);
			goto repeat;
		}
	}

2291
	f2fs_wait_on_page_writeback(page, DATA, false);
2292

2293 2294
	/* wait for GCed page writeback via META_MAPPING */
	if (f2fs_post_read_required(inode))
2295
		f2fs_wait_on_block_writeback(sbi, blkaddr);
2296

2297 2298
	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
2299

2300 2301 2302 2303 2304
	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
		zero_user_segment(page, len, PAGE_SIZE);
		return 0;
	}

2305
	if (blkaddr == NEW_ADDR) {
2306
		zero_user_segment(page, 0, PAGE_SIZE);
2307
		SetPageUptodate(page);
2308
	} else {
2309 2310
		err = f2fs_submit_page_read(inode, page, blkaddr);
		if (err)
2311
			goto fail;
2312

2313
		lock_page(page);
2314
		if (unlikely(page->mapping != mapping)) {
2315 2316
			f2fs_put_page(page, 1);
			goto repeat;
2317
		}
2318 2319 2320
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
2321
		}
2322 2323
	}
	return 0;
2324

2325
fail:
2326
	f2fs_put_page(page, 1);
2327
	f2fs_write_failed(mapping, pos + len);
2328
	if (drop_atomic)
J
Jaegeuk Kim 已提交
2329
		drop_inmem_pages_all(sbi);
2330
	return err;
2331 2332
}

2333 2334 2335 2336 2337 2338 2339
static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

2340 2341
	trace_f2fs_write_end(inode, pos, len, copied);

2342 2343 2344 2345 2346 2347
	/*
	 * This should be come from len == PAGE_SIZE, and we expect copied
	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
	 * let generic_perform_write() try to copy data again through copied=0.
	 */
	if (!PageUptodate(page)) {
2348
		if (unlikely(copied != len))
2349 2350 2351 2352 2353 2354 2355
			copied = 0;
		else
			SetPageUptodate(page);
	}
	if (!copied)
		goto unlock_out;

2356
	set_page_dirty(page);
2357

2358 2359
	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);
2360
unlock_out:
2361
	f2fs_put_page(page, 1);
2362
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2363 2364 2365
	return copied;
}

2366 2367
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
			   loff_t offset)
2368 2369 2370 2371 2372 2373
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

	if (offset & blocksize_mask)
		return -EINVAL;

A
Al Viro 已提交
2374 2375 2376
	if (iov_iter_alignment(iter) & blocksize_mask)
		return -EINVAL;

2377 2378 2379
	return 0;
}

2380
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2381
{
2382
	struct address_space *mapping = iocb->ki_filp->f_mapping;
2383
	struct inode *inode = mapping->host;
2384
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2385
	size_t count = iov_iter_count(iter);
2386
	loff_t offset = iocb->ki_pos;
2387
	int rw = iov_iter_rw(iter);
2388
	int err;
2389
	enum rw_hint hint = iocb->ki_hint;
2390
	int whint_mode = F2FS_OPTION(sbi).whint_mode;
2391

2392
	err = check_direct_IO(inode, iter, offset);
2393 2394
	if (err)
		return err;
H
Huajun Li 已提交
2395

H
Hyunchul Lee 已提交
2396
	if (f2fs_force_buffered_io(inode, rw))
2397
		return 0;
2398

2399
	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2400

2401 2402 2403
	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
		iocb->ki_hint = WRITE_LIFE_NOT_SET;

C
Chao Yu 已提交
2404
	if (!down_read_trylock(&F2FS_I(inode)->i_gc_rwsem[rw])) {
H
Hyunchul Lee 已提交
2405 2406 2407 2408 2409
		if (iocb->ki_flags & IOCB_NOWAIT) {
			iocb->ki_hint = hint;
			err = -EAGAIN;
			goto out;
		}
C
Chao Yu 已提交
2410
		down_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
H
Hyunchul Lee 已提交
2411 2412
	}

2413
	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
C
Chao Yu 已提交
2414
	up_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
2415 2416

	if (rw == WRITE) {
2417 2418
		if (whint_mode == WHINT_MODE_OFF)
			iocb->ki_hint = hint;
C
Chao Yu 已提交
2419 2420 2421
		if (err > 0) {
			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
									err);
2422
			set_inode_flag(inode, FI_UPDATE_WRITE);
C
Chao Yu 已提交
2423
		} else if (err < 0) {
2424
			f2fs_write_failed(mapping, offset + count);
C
Chao Yu 已提交
2425
		}
2426
	}
2427

H
Hyunchul Lee 已提交
2428
out:
2429
	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2430

2431
	return err;
2432 2433
}

2434 2435
void f2fs_invalidate_page(struct page *page, unsigned int offset,
							unsigned int length)
2436 2437
{
	struct inode *inode = page->mapping->host;
2438
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2439

2440
	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2441
		(offset % PAGE_SIZE || length != PAGE_SIZE))
2442 2443
		return;

2444
	if (PageDirty(page)) {
2445
		if (inode->i_ino == F2FS_META_INO(sbi)) {
2446
			dec_page_count(sbi, F2FS_DIRTY_META);
2447
		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2448
			dec_page_count(sbi, F2FS_DIRTY_NODES);
2449
		} else {
2450
			inode_dec_dirty_pages(inode);
2451 2452
			remove_dirty_inode(inode);
		}
2453
	}
C
Chao Yu 已提交
2454 2455 2456

	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
2457
		return drop_inmem_page(inode, page);
C
Chao Yu 已提交
2458

2459
	set_page_private(page, 0);
2460 2461 2462
	ClearPagePrivate(page);
}

2463
int f2fs_release_page(struct page *page, gfp_t wait)
2464
{
2465 2466 2467 2468
	/* If this is dirty page, keep PagePrivate */
	if (PageDirty(page))
		return 0;

C
Chao Yu 已提交
2469 2470 2471 2472
	/* This is atomic written page, keep Private */
	if (IS_ATOMIC_WRITTEN_PAGE(page))
		return 0;

2473
	set_page_private(page, 0);
2474
	ClearPagePrivate(page);
2475
	return 1;
2476 2477 2478 2479 2480 2481 2482
}

static int f2fs_set_data_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;

2483 2484
	trace_f2fs_set_page_dirty(page, DATA);

2485 2486
	if (!PageUptodate(page))
		SetPageUptodate(page);
2487

C
Chao Yu 已提交
2488
	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
C
Chao Yu 已提交
2489 2490 2491 2492 2493 2494 2495 2496 2497
		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
			register_inmem_page(inode, page);
			return 1;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return 0;
2498 2499
	}

2500
	if (!PageDirty(page)) {
2501
		__set_page_dirty_nobuffers(page);
2502
		update_dirty_page(inode, page);
2503 2504 2505 2506 2507
		return 1;
	}
	return 0;
}

J
Jaegeuk Kim 已提交
2508 2509
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
2510 2511
	struct inode *inode = mapping->host;

J
Jaegeuk Kim 已提交
2512 2513 2514 2515 2516 2517 2518
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

C
Chao Yu 已提交
2519
	return generic_block_bmap(mapping, block, get_data_block_bmap);
2520 2521
}

2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>

int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
2535 2536 2537 2538 2539 2540
	if (atomic_written) {
		if (mode != MIGRATE_SYNC)
			return -EBUSY;
		if (!mutex_trylock(&fi->inmem_lock))
			return -EAGAIN;
	}
2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571

	/*
	 * A reference is expected if PagePrivate set when move mapping,
	 * however F2FS breaks this for maintaining dirty page counts when
	 * truncating pages. So here adjusting the 'extra_count' make it work.
	 */
	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
	rc = migrate_page_move_mapping(mapping, newpage,
				page, NULL, mode, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;
		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	if (PagePrivate(page))
		SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));

2572 2573 2574 2575
	if (mode != MIGRATE_SYNC_NO_COPY)
		migrate_page_copy(newpage, page);
	else
		migrate_page_states(newpage, page);
2576 2577 2578 2579 2580

	return MIGRATEPAGE_SUCCESS;
}
#endif

2581 2582 2583 2584 2585 2586
const struct address_space_operations f2fs_dblock_aops = {
	.readpage	= f2fs_read_data_page,
	.readpages	= f2fs_read_data_pages,
	.writepage	= f2fs_write_data_page,
	.writepages	= f2fs_write_data_pages,
	.write_begin	= f2fs_write_begin,
2587
	.write_end	= f2fs_write_end,
2588
	.set_page_dirty	= f2fs_set_data_page_dirty,
2589 2590
	.invalidatepage	= f2fs_invalidate_page,
	.releasepage	= f2fs_release_page,
2591
	.direct_IO	= f2fs_direct_IO,
J
Jaegeuk Kim 已提交
2592
	.bmap		= f2fs_bmap,
2593 2594 2595
#ifdef CONFIG_MIGRATION
	.migratepage    = f2fs_migrate_page,
#endif
2596
};
2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620

int __init f2fs_init_post_read_processing(void)
{
	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
	if (!bio_post_read_ctx_cache)
		goto fail;
	bio_post_read_ctx_pool =
		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
					 bio_post_read_ctx_cache);
	if (!bio_post_read_ctx_pool)
		goto fail_free_cache;
	return 0;

fail_free_cache:
	kmem_cache_destroy(bio_post_read_ctx_cache);
fail:
	return -ENOMEM;
}

void __exit f2fs_destroy_post_read_processing(void)
{
	mempool_destroy(bio_post_read_ctx_pool);
	kmem_cache_destroy(bio_post_read_ctx_cache);
}