inline.c 15.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * fs/f2fs/inline.c
 * Copyright (c) 2013, Intel Corporation
 * Authors: Huajun Li <huajun.li@intel.com>
 *          Haicheng Li <haicheng.li@intel.com>
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/fs.h>
#include <linux/f2fs_fs.h>

#include "f2fs.h"
J
Jaegeuk Kim 已提交
15
#include "node.h"
16

17
bool f2fs_may_inline_data(struct inode *inode)
18
{
J
Jaegeuk Kim 已提交
19 20 21
	if (f2fs_is_atomic_file(inode))
		return false;

22
	if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
23 24
		return false;

25 26 27
	if (i_size_read(inode) > MAX_INLINE_DATA)
		return false;

28 29 30
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return false;

31 32 33
	return true;
}

34 35 36 37 38 39 40 41 42 43 44
bool f2fs_may_inline_dentry(struct inode *inode)
{
	if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
		return false;

	if (!S_ISDIR(inode->i_mode))
		return false;

	return true;
}

45
void read_inline_data(struct page *page, struct page *ipage)
46 47 48
{
	void *src_addr, *dst_addr;

49 50
	if (PageUptodate(page))
		return;
51

52
	f2fs_bug_on(F2FS_P_SB(page), page->index);
53

54
	zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
55 56 57

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(ipage);
58
	dst_addr = kmap_atomic(page);
59
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
60
	flush_dcache_page(page);
61
	kunmap_atomic(dst_addr);
62 63
	if (!PageUptodate(page))
		SetPageUptodate(page);
64 65
}

66
void truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from)
C
Chao Yu 已提交
67
{
68 69 70
	void *addr;

	if (from >= MAX_INLINE_DATA)
71
		return;
72 73 74

	addr = inline_data_addr(ipage);

75
	f2fs_wait_on_page_writeback(ipage, NODE, true);
76
	memset(addr + from, 0, MAX_INLINE_DATA - from);
77
	set_page_dirty(ipage);
78 79 80

	if (from == 0)
		clear_inode_flag(inode, FI_DATA_EXIST);
C
Chao Yu 已提交
81 82
}

83 84 85 86 87 88 89 90 91
int f2fs_read_inline_data(struct inode *inode, struct page *page)
{
	struct page *ipage;

	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
	if (IS_ERR(ipage)) {
		unlock_page(page);
		return PTR_ERR(ipage);
	}
92

93 94 95 96 97 98
	if (!f2fs_has_inline_data(inode)) {
		f2fs_put_page(ipage, 1);
		return -EAGAIN;
	}

	if (page->index)
99
		zero_user_segment(page, 0, PAGE_SIZE);
100 101 102
	else
		read_inline_data(page, ipage);

103 104
	if (!PageUptodate(page))
		SetPageUptodate(page);
105 106
	f2fs_put_page(ipage, 1);
	unlock_page(page);
107 108 109
	return 0;
}

110
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
111 112
{
	struct f2fs_io_info fio = {
113
		.sbi = F2FS_I_SB(dn->inode),
114
		.type = DATA,
M
Mike Christie 已提交
115
		.op = REQ_OP_WRITE,
116
		.op_flags = REQ_SYNC | REQ_PRIO,
117
		.page = page,
118
		.encrypted_page = NULL,
119
	};
120
	int dirty, err;
121

122 123
	if (!f2fs_exist_data(dn->inode))
		goto clear_out;
124

125
	err = f2fs_reserve_block(dn, 0);
126
	if (err)
127
		return err;
128

129
	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
130

131
	read_inline_data(page, dn->inode_page);
132 133
	set_page_dirty(page);

134 135 136
	/* clear dirty state */
	dirty = clear_page_dirty_for_io(page);

137 138
	/* write data page to try to make data consistent */
	set_page_writeback(page);
139
	fio.old_blkaddr = dn->data_blkaddr;
140
	set_inode_flag(dn->inode, FI_HOT_DATA);
141
	write_data_page(dn, &fio);
142
	f2fs_wait_on_page_writeback(page, DATA, true);
143
	if (dirty) {
144
		inode_dec_dirty_pages(dn->inode);
145 146
		remove_dirty_inode(dn->inode);
	}
147

148
	/* this converted inline_data should be recovered. */
149
	set_inode_flag(dn->inode, FI_APPEND_WRITE);
150

151
	/* clear inline data and flag after data writeback */
152
	truncate_inline_inode(dn->inode, dn->inode_page, 0);
153
	clear_inline_node(dn->inode_page);
154 155
clear_out:
	stat_dec_inline_inode(dn->inode);
156
	clear_inode_flag(dn->inode, FI_INLINE_DATA);
157 158
	f2fs_put_dnode(dn);
	return 0;
159 160
}

161
int f2fs_convert_inline_inode(struct inode *inode)
162
{
163 164 165 166
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage, *page;
	int err = 0;
167

168 169 170
	if (!f2fs_has_inline_data(inode))
		return 0;

171
	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
172 173
	if (!page)
		return -ENOMEM;
174

175 176 177 178
	f2fs_lock_op(sbi);

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
179 180
		err = PTR_ERR(ipage);
		goto out;
181
	}
182

183 184 185 186 187 188
	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode))
		err = f2fs_convert_inline_page(&dn, page);

	f2fs_put_dnode(&dn);
189
out:
190 191 192
	f2fs_unlock_op(sbi);

	f2fs_put_page(page, 1);
193

J
Jaegeuk Kim 已提交
194
	f2fs_balance_fs(sbi, dn.node_changed);
195

196 197 198
	return err;
}

199
int f2fs_write_inline_data(struct inode *inode, struct page *page)
200 201 202 203 204 205 206 207 208 209
{
	void *src_addr, *dst_addr;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;

210
	if (!f2fs_has_inline_data(inode)) {
211 212
		f2fs_put_dnode(&dn);
		return -EAGAIN;
213 214
	}

215 216
	f2fs_bug_on(F2FS_I_SB(inode), page->index);

217
	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
218
	src_addr = kmap_atomic(page);
219 220
	dst_addr = inline_data_addr(dn.inode_page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
221
	kunmap_atomic(src_addr);
222
	set_page_dirty(dn.inode_page);
223

224 225
	set_inode_flag(inode, FI_APPEND_WRITE);
	set_inode_flag(inode, FI_DATA_EXIST);
226

227
	clear_inline_node(dn.inode_page);
228 229 230
	f2fs_put_dnode(&dn);
	return 0;
}
231

232
bool recover_inline_data(struct inode *inode, struct page *npage)
233
{
234
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
	struct f2fs_inode *ri = NULL;
	void *src_addr, *dst_addr;
	struct page *ipage;

	/*
	 * The inline_data recovery policy is as follows.
	 * [prev.] [next] of inline_data flag
	 *    o       o  -> recover inline_data
	 *    o       x  -> remove inline_data, and then recover data blocks
	 *    x       o  -> remove inline_data, and then recover inline_data
	 *    x       x  -> recover data blocks
	 */
	if (IS_INODE(npage))
		ri = F2FS_INODE(npage);

	if (f2fs_has_inline_data(inode) &&
251
			ri && (ri->i_inline & F2FS_INLINE_DATA)) {
252 253
process_inline:
		ipage = get_node_page(sbi, inode->i_ino);
254
		f2fs_bug_on(sbi, IS_ERR(ipage));
255

256
		f2fs_wait_on_page_writeback(ipage, NODE, true);
257

258 259 260
		src_addr = inline_data_addr(npage);
		dst_addr = inline_data_addr(ipage);
		memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
261

262 263
		set_inode_flag(inode, FI_INLINE_DATA);
		set_inode_flag(inode, FI_DATA_EXIST);
264

265
		set_page_dirty(ipage);
266
		f2fs_put_page(ipage, 1);
267
		return true;
268 269 270 271
	}

	if (f2fs_has_inline_data(inode)) {
		ipage = get_node_page(sbi, inode->i_ino);
272
		f2fs_bug_on(sbi, IS_ERR(ipage));
273 274
		truncate_inline_inode(inode, ipage, 0);
		clear_inode_flag(inode, FI_INLINE_DATA);
275
		f2fs_put_page(ipage, 1);
276
	} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
277 278
		if (truncate_blocks(inode, 0, false))
			return false;
279 280
		goto process_inline;
	}
281
	return false;
282
}
283 284

struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
285
			struct fscrypt_name *fname, struct page **res_page)
286 287
{
	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
288
	struct f2fs_inline_dentry *inline_dentry;
289
	struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
290
	struct f2fs_dir_entry *de;
291
	struct f2fs_dentry_ptr d;
292
	struct page *ipage;
293
	f2fs_hash_t namehash;
294 295

	ipage = get_node_page(sbi, dir->i_ino);
296 297
	if (IS_ERR(ipage)) {
		*res_page = ipage;
298
		return NULL;
299
	}
300

301
	namehash = f2fs_dentry_hash(&name, fname);
302

303
	inline_dentry = inline_data_addr(ipage);
304

305
	make_dentry_ptr_inline(NULL, &d, inline_dentry);
306
	de = find_target_dentry(fname, namehash, NULL, &d);
307
	unlock_page(ipage);
308 309 310 311 312
	if (de)
		*res_page = ipage;
	else
		f2fs_put_page(ipage, 0);

313 314 315 316 317 318 319
	return de;
}

int make_empty_inline_dir(struct inode *inode, struct inode *parent,
							struct page *ipage)
{
	struct f2fs_inline_dentry *dentry_blk;
320
	struct f2fs_dentry_ptr d;
321 322 323

	dentry_blk = inline_data_addr(ipage);

324
	make_dentry_ptr_inline(NULL, &d, dentry_blk);
325
	do_make_empty_dir(inode, parent, &d);
326 327 328 329

	set_page_dirty(ipage);

	/* update i_size to MAX_INLINE_DATA */
330
	if (i_size_read(inode) < MAX_INLINE_DATA)
331
		f2fs_i_size_write(inode, MAX_INLINE_DATA);
332 333 334
	return 0;
}

335 336 337 338
/*
 * NOTE: ipage is grabbed by caller, but if any error occurs, we should
 * release ipage in this function.
 */
339
static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
340 341 342 343 344 345 346
				struct f2fs_inline_dentry *inline_dentry)
{
	struct page *page;
	struct dnode_of_data dn;
	struct f2fs_dentry_block *dentry_blk;
	int err;

347
	page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
348 349
	if (!page) {
		f2fs_put_page(ipage, 1);
350
		return -ENOMEM;
351
	}
352 353 354 355 356 357

	set_new_dnode(&dn, dir, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err)
		goto out;

358
	f2fs_wait_on_page_writeback(page, DATA, true);
359
	zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
360

361
	dentry_blk = kmap_atomic(page);
362 363 364 365

	/* copy data from inline dentry block to new dentry block */
	memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
					INLINE_DENTRY_BITMAP_SIZE);
366 367 368 369 370 371 372 373
	memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
			SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
	/*
	 * we do not need to zero out remainder part of dentry and filename
	 * field, since we have used bitmap for marking the usage status of
	 * them, besides, we can also ignore copying/zeroing reserved space
	 * of dentry block, because them haven't been used so far.
	 */
374 375 376 377 378
	memcpy(dentry_blk->dentry, inline_dentry->dentry,
			sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
	memcpy(dentry_blk->filename, inline_dentry->filename,
					NR_INLINE_DENTRY * F2FS_SLOT_LEN);

379
	kunmap_atomic(dentry_blk);
380 381
	if (!PageUptodate(page))
		SetPageUptodate(page);
382 383 384
	set_page_dirty(page);

	/* clear inline dir and flag after data writeback */
385
	truncate_inline_inode(dir, ipage, 0);
386

387
	stat_dec_inline_dir(dir);
388
	clear_inode_flag(dir, FI_INLINE_DENTRY);
389

390
	f2fs_i_depth_write(dir, 1);
391
	if (i_size_read(dir) < PAGE_SIZE)
392
		f2fs_i_size_write(dir, PAGE_SIZE);
393 394 395 396 397
out:
	f2fs_put_page(page, 1);
	return err;
}

398 399 400 401 402 403 404
static int f2fs_add_inline_entries(struct inode *dir,
			struct f2fs_inline_dentry *inline_dentry)
{
	struct f2fs_dentry_ptr d;
	unsigned long bit_pos = 0;
	int err = 0;

405
	make_dentry_ptr_inline(NULL, &d, inline_dentry);
406 407 408 409 410 411 412 413 414 415 416 417 418

	while (bit_pos < d.max) {
		struct f2fs_dir_entry *de;
		struct qstr new_name;
		nid_t ino;
		umode_t fake_mode;

		if (!test_bit_le(bit_pos, d.bitmap)) {
			bit_pos++;
			continue;
		}

		de = &d.dentry[bit_pos];
419 420 421 422 423 424

		if (unlikely(!de->name_len)) {
			bit_pos++;
			continue;
		}

425
		new_name.name = d.filename[bit_pos];
E
Eric Biggers 已提交
426
		new_name.len = le16_to_cpu(de->name_len);
427 428 429 430

		ino = le32_to_cpu(de->ino);
		fake_mode = get_de_type(de) << S_SHIFT;

431
		err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
							ino, fake_mode);
		if (err)
			goto punch_dentry_pages;

		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
	}
	return 0;
punch_dentry_pages:
	truncate_inode_pages(&dir->i_data, 0);
	truncate_blocks(dir, 0, false);
	remove_dirty_inode(dir);
	return err;
}

static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
				struct f2fs_inline_dentry *inline_dentry)
{
	struct f2fs_inline_dentry *backup_dentry;
	int err;

452 453
	backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
			sizeof(struct f2fs_inline_dentry), GFP_F2FS_ZERO);
454 455
	if (!backup_dentry) {
		f2fs_put_page(ipage, 1);
456
		return -ENOMEM;
457
	}
458 459

	memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
460
	truncate_inline_inode(dir, ipage, 0);
461 462 463 464 465 466 467 468 469 470

	unlock_page(ipage);

	err = f2fs_add_inline_entries(dir, backup_dentry);
	if (err)
		goto recover;

	lock_page(ipage);

	stat_dec_inline_dir(dir);
471
	clear_inode_flag(dir, FI_INLINE_DENTRY);
472 473 474 475 476
	kfree(backup_dentry);
	return 0;
recover:
	lock_page(ipage);
	memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
477
	f2fs_i_depth_write(dir, 0);
478
	f2fs_i_size_write(dir, MAX_INLINE_DATA);
479
	set_page_dirty(ipage);
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	f2fs_put_page(ipage, 1);

	kfree(backup_dentry);
	return err;
}

static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
				struct f2fs_inline_dentry *inline_dentry)
{
	if (!F2FS_I(dir)->i_dir_level)
		return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
	else
		return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
}

495 496 497
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
				const struct qstr *orig_name,
				struct inode *inode, nid_t ino, umode_t mode)
498 499 500 501 502 503
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	unsigned int bit_pos;
	f2fs_hash_t name_hash;
	struct f2fs_inline_dentry *dentry_blk = NULL;
504
	struct f2fs_dentry_ptr d;
505
	int slots = GET_DENTRY_SLOTS(new_name->len);
506
	struct page *page = NULL;
507 508 509 510 511 512 513
	int err = 0;

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	dentry_blk = inline_data_addr(ipage);
514 515
	bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
						slots, NR_INLINE_DENTRY);
516 517
	if (bit_pos >= NR_INLINE_DENTRY) {
		err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
518 519 520
		if (err)
			return err;
		err = -EAGAIN;
521 522 523
		goto out;
	}

524 525
	if (inode) {
		down_write(&F2FS_I(inode)->i_sem);
526 527
		page = init_inode_metadata(inode, dir, new_name,
						orig_name, ipage);
528 529 530 531
		if (IS_ERR(page)) {
			err = PTR_ERR(page);
			goto fail;
		}
532
	}
533

534
	f2fs_wait_on_page_writeback(ipage, NODE, true);
535

536
	name_hash = f2fs_dentry_hash(new_name, NULL);
537
	make_dentry_ptr_inline(NULL, &d, dentry_blk);
538
	f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
539

540 541 542
	set_page_dirty(ipage);

	/* we don't need to mark_inode_dirty now */
543
	if (inode) {
544
		f2fs_i_pino_write(inode, dir->i_ino);
545 546
		f2fs_put_page(page, 1);
	}
547 548 549

	update_parent_metadata(dir, inode, 0);
fail:
550 551
	if (inode)
		up_write(&F2FS_I(inode)->i_sem);
552 553 554 555 556 557 558 559 560 561 562 563 564 565
out:
	f2fs_put_page(ipage, 1);
	return err;
}

void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
					struct inode *dir, struct inode *inode)
{
	struct f2fs_inline_dentry *inline_dentry;
	int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
	unsigned int bit_pos;
	int i;

	lock_page(page);
566
	f2fs_wait_on_page_writeback(page, NODE, true);
567 568 569 570

	inline_dentry = inline_data_addr(page);
	bit_pos = dentry - inline_dentry->dentry;
	for (i = 0; i < slots; i++)
571
		__clear_bit_le(bit_pos + i,
572 573 574
				&inline_dentry->dentry_bitmap);

	set_page_dirty(page);
J
Jaegeuk Kim 已提交
575
	f2fs_put_page(page, 1);
576

577
	dir->i_ctime = dir->i_mtime = current_time(dir);
578
	f2fs_mark_inode_dirty_sync(dir, false);
579 580

	if (inode)
J
Jaegeuk Kim 已提交
581
		f2fs_drop_nlink(dir, inode);
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
}

bool f2fs_empty_inline_dir(struct inode *dir)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	unsigned int bit_pos = 2;
	struct f2fs_inline_dentry *dentry_blk;

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return false;

	dentry_blk = inline_data_addr(ipage);
	bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
					NR_INLINE_DENTRY,
					bit_pos);

	f2fs_put_page(ipage, 1);

	if (bit_pos < NR_INLINE_DENTRY)
		return false;

	return true;
}

608
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
609
				struct fscrypt_str *fstr)
610 611 612 613
{
	struct inode *inode = file_inode(file);
	struct f2fs_inline_dentry *inline_dentry = NULL;
	struct page *ipage = NULL;
614
	struct f2fs_dentry_ptr d;
C
Chao Yu 已提交
615
	int err;
616 617 618 619

	if (ctx->pos == NR_INLINE_DENTRY)
		return 0;

620
	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
621 622 623 624 625
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	inline_dentry = inline_data_addr(ipage);

626
	make_dentry_ptr_inline(inode, &d, inline_dentry);
627

C
Chao Yu 已提交
628 629
	err = f2fs_fill_dentries(ctx, &d, 0, fstr);
	if (!err)
630
		ctx->pos = NR_INLINE_DENTRY;
631

632
	f2fs_put_page(ipage, 1);
C
Chao Yu 已提交
633
	return err < 0 ? err : 0;
634
}
J
Jaegeuk Kim 已提交
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669

int f2fs_inline_data_fiemap(struct inode *inode,
		struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
{
	__u64 byteaddr, ilen;
	__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
		FIEMAP_EXTENT_LAST;
	struct node_info ni;
	struct page *ipage;
	int err = 0;

	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	if (!f2fs_has_inline_data(inode)) {
		err = -EAGAIN;
		goto out;
	}

	ilen = min_t(size_t, MAX_INLINE_DATA, i_size_read(inode));
	if (start >= ilen)
		goto out;
	if (start + len < ilen)
		ilen = start + len;
	ilen -= start;

	get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
	byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
	byteaddr += (char *)inline_data_addr(ipage) - (char *)F2FS_INODE(ipage);
	err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
out:
	f2fs_put_page(ipage, 1);
	return err;
}