inline.c 14.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * fs/f2fs/inline.c
 * Copyright (c) 2013, Intel Corporation
 * Authors: Huajun Li <huajun.li@intel.com>
 *          Haicheng Li <haicheng.li@intel.com>
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/fs.h>
#include <linux/f2fs_fs.h>

#include "f2fs.h"
J
Jaegeuk Kim 已提交
15
#include "node.h"
16

17
bool f2fs_may_inline_data(struct inode *inode)
18
{
J
Jaegeuk Kim 已提交
19 20 21
	if (f2fs_is_atomic_file(inode))
		return false;

22
	if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
23 24
		return false;

25 26 27
	if (i_size_read(inode) > MAX_INLINE_DATA)
		return false;

28 29 30
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return false;

31 32 33
	return true;
}

34 35 36 37 38 39 40 41 42 43 44
bool f2fs_may_inline_dentry(struct inode *inode)
{
	if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
		return false;

	if (!S_ISDIR(inode->i_mode))
		return false;

	return true;
}

45
void read_inline_data(struct page *page, struct page *ipage)
46 47 48
{
	void *src_addr, *dst_addr;

49 50
	if (PageUptodate(page))
		return;
51

52
	f2fs_bug_on(F2FS_P_SB(page), page->index);
53

54
	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
55 56 57

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(ipage);
58
	dst_addr = kmap_atomic(page);
59
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
60
	flush_dcache_page(page);
61
	kunmap_atomic(dst_addr);
62
	SetPageUptodate(page);
63 64
}

65
bool truncate_inline_inode(struct page *ipage, u64 from)
C
Chao Yu 已提交
66
{
67 68 69 70 71 72 73
	void *addr;

	if (from >= MAX_INLINE_DATA)
		return false;

	addr = inline_data_addr(ipage);

C
Chao Yu 已提交
74
	f2fs_wait_on_page_writeback(ipage, NODE);
75 76 77
	memset(addr + from, 0, MAX_INLINE_DATA - from);

	return true;
C
Chao Yu 已提交
78 79
}

80 81 82 83 84 85 86 87 88
int f2fs_read_inline_data(struct inode *inode, struct page *page)
{
	struct page *ipage;

	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
	if (IS_ERR(ipage)) {
		unlock_page(page);
		return PTR_ERR(ipage);
	}
89

90 91 92 93 94 95 96 97 98 99 100 101 102
	if (!f2fs_has_inline_data(inode)) {
		f2fs_put_page(ipage, 1);
		return -EAGAIN;
	}

	if (page->index)
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	else
		read_inline_data(page, ipage);

	SetPageUptodate(page);
	f2fs_put_page(ipage, 1);
	unlock_page(page);
103 104 105
	return 0;
}

106
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
107 108 109
{
	void *src_addr, *dst_addr;
	struct f2fs_io_info fio = {
110
		.sbi = F2FS_I_SB(dn->inode),
111 112
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
113
		.page = page,
114
		.encrypted_page = NULL,
115
	};
116
	int dirty, err;
117

118
	f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);
119

120 121
	if (!f2fs_exist_data(dn->inode))
		goto clear_out;
122

123
	err = f2fs_reserve_block(dn, 0);
124
	if (err)
125
		return err;
126

127
	f2fs_wait_on_page_writeback(page, DATA);
128 129 130 131

	if (PageUptodate(page))
		goto no_update;

132
	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
133 134

	/* Copy the whole inline data block */
135
	src_addr = inline_data_addr(dn->inode_page);
136
	dst_addr = kmap_atomic(page);
137
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
138
	flush_dcache_page(page);
139
	kunmap_atomic(dst_addr);
140
	SetPageUptodate(page);
141
no_update:
142 143
	set_page_dirty(page);

144 145 146
	/* clear dirty state */
	dirty = clear_page_dirty_for_io(page);

147 148
	/* write data page to try to make data consistent */
	set_page_writeback(page);
149
	fio.blk_addr = dn->data_blkaddr;
150
	write_data_page(dn, &fio);
151
	set_data_blkaddr(dn);
152
	f2fs_update_extent_cache(dn);
153
	f2fs_wait_on_page_writeback(page, DATA);
154 155
	if (dirty)
		inode_dec_dirty_pages(dn->inode);
156

157 158 159
	/* this converted inline_data should be recovered. */
	set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);

160
	/* clear inline data and flag after data writeback */
161
	truncate_inline_inode(dn->inode_page, 0);
162 163
clear_out:
	stat_dec_inline_inode(dn->inode);
164
	f2fs_clear_inline_inode(dn->inode);
165 166 167
	sync_inode_page(dn);
	f2fs_put_dnode(dn);
	return 0;
168 169
}

170
int f2fs_convert_inline_inode(struct inode *inode)
171
{
172 173 174 175
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage, *page;
	int err = 0;
176

177 178 179
	if (!f2fs_has_inline_data(inode))
		return 0;

180 181 182
	page = grab_cache_page(inode->i_mapping, 0);
	if (!page)
		return -ENOMEM;
183

184 185 186 187
	f2fs_lock_op(sbi);

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
188 189
		err = PTR_ERR(ipage);
		goto out;
190
	}
191

192 193 194 195 196 197
	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode))
		err = f2fs_convert_inline_page(&dn, page);

	f2fs_put_dnode(&dn);
198
out:
199 200 201
	f2fs_unlock_op(sbi);

	f2fs_put_page(page, 1);
202 203 204 205

	if (dn.node_changed)
		f2fs_balance_fs(sbi);

206 207 208
	return err;
}

209
int f2fs_write_inline_data(struct inode *inode, struct page *page)
210 211 212 213 214 215 216 217 218 219
{
	void *src_addr, *dst_addr;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;

220
	if (!f2fs_has_inline_data(inode)) {
221 222
		f2fs_put_dnode(&dn);
		return -EAGAIN;
223 224
	}

225 226 227
	f2fs_bug_on(F2FS_I_SB(inode), page->index);

	f2fs_wait_on_page_writeback(dn.inode_page, NODE);
228
	src_addr = kmap_atomic(page);
229 230
	dst_addr = inline_data_addr(dn.inode_page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
231
	kunmap_atomic(src_addr);
232

233
	set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
234 235
	set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

236 237 238 239
	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	return 0;
}
240

241
bool recover_inline_data(struct inode *inode, struct page *npage)
242
{
243
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
	struct f2fs_inode *ri = NULL;
	void *src_addr, *dst_addr;
	struct page *ipage;

	/*
	 * The inline_data recovery policy is as follows.
	 * [prev.] [next] of inline_data flag
	 *    o       o  -> recover inline_data
	 *    o       x  -> remove inline_data, and then recover data blocks
	 *    x       o  -> remove inline_data, and then recover inline_data
	 *    x       x  -> recover data blocks
	 */
	if (IS_INODE(npage))
		ri = F2FS_INODE(npage);

	if (f2fs_has_inline_data(inode) &&
260
			ri && (ri->i_inline & F2FS_INLINE_DATA)) {
261 262
process_inline:
		ipage = get_node_page(sbi, inode->i_ino);
263
		f2fs_bug_on(sbi, IS_ERR(ipage));
264

265 266
		f2fs_wait_on_page_writeback(ipage, NODE);

267 268 269
		src_addr = inline_data_addr(npage);
		dst_addr = inline_data_addr(ipage);
		memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
270 271 272 273

		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

274 275
		update_inode(inode, ipage);
		f2fs_put_page(ipage, 1);
276
		return true;
277 278 279 280
	}

	if (f2fs_has_inline_data(inode)) {
		ipage = get_node_page(sbi, inode->i_ino);
281
		f2fs_bug_on(sbi, IS_ERR(ipage));
282 283
		if (!truncate_inline_inode(ipage, 0))
			return false;
284
		f2fs_clear_inline_inode(inode);
285 286
		update_inode(inode, ipage);
		f2fs_put_page(ipage, 1);
287
	} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
288 289
		if (truncate_blocks(inode, 0, false))
			return false;
290 291
		goto process_inline;
	}
292
	return false;
293
}
294 295

struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
296
			struct f2fs_filename *fname, struct page **res_page)
297 298
{
	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
299
	struct f2fs_inline_dentry *inline_dentry;
300
	struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
301
	struct f2fs_dir_entry *de;
302
	struct f2fs_dentry_ptr d;
303
	struct page *ipage;
304
	f2fs_hash_t namehash;
305 306 307 308 309

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return NULL;

310 311
	namehash = f2fs_dentry_hash(&name);

312
	inline_dentry = inline_data_addr(ipage);
313

314
	make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
315
	de = find_target_dentry(fname, namehash, NULL, &d);
316
	unlock_page(ipage);
317 318 319 320 321 322 323 324 325
	if (de)
		*res_page = ipage;
	else
		f2fs_put_page(ipage, 0);

	/*
	 * For the most part, it should be a bug when name_len is zero.
	 * We stop here for figuring out where the bugs has occurred.
	 */
326
	f2fs_bug_on(sbi, d.max < 0);
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
	return de;
}

struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir,
							struct page **p)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	struct f2fs_dir_entry *de;
	struct f2fs_inline_dentry *dentry_blk;

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return NULL;

	dentry_blk = inline_data_addr(ipage);
	de = &dentry_blk->dentry[1];
	*p = ipage;
	unlock_page(ipage);
	return de;
}

int make_empty_inline_dir(struct inode *inode, struct inode *parent,
							struct page *ipage)
{
	struct f2fs_inline_dentry *dentry_blk;
353
	struct f2fs_dentry_ptr d;
354 355 356

	dentry_blk = inline_data_addr(ipage);

357
	make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
358
	do_make_empty_dir(inode, parent, &d);
359 360 361 362 363 364 365 366 367 368 369

	set_page_dirty(ipage);

	/* update i_size to MAX_INLINE_DATA */
	if (i_size_read(inode) < MAX_INLINE_DATA) {
		i_size_write(inode, MAX_INLINE_DATA);
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
	}
	return 0;
}

370 371 372 373
/*
 * NOTE: ipage is grabbed by caller, but if any error occurs, we should
 * release ipage in this function.
 */
374
static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
375 376 377 378 379 380 381 382
				struct f2fs_inline_dentry *inline_dentry)
{
	struct page *page;
	struct dnode_of_data dn;
	struct f2fs_dentry_block *dentry_blk;
	int err;

	page = grab_cache_page(dir->i_mapping, 0);
383 384
	if (!page) {
		f2fs_put_page(ipage, 1);
385
		return -ENOMEM;
386
	}
387 388 389 390 391 392 393

	set_new_dnode(&dn, dir, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err)
		goto out;

	f2fs_wait_on_page_writeback(page, DATA);
394
	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
395

396
	dentry_blk = kmap_atomic(page);
397 398 399 400

	/* copy data from inline dentry block to new dentry block */
	memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
					INLINE_DENTRY_BITMAP_SIZE);
401 402 403 404 405 406 407 408
	memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
			SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
	/*
	 * we do not need to zero out remainder part of dentry and filename
	 * field, since we have used bitmap for marking the usage status of
	 * them, besides, we can also ignore copying/zeroing reserved space
	 * of dentry block, because them haven't been used so far.
	 */
409 410 411 412 413
	memcpy(dentry_blk->dentry, inline_dentry->dentry,
			sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
	memcpy(dentry_blk->filename, inline_dentry->filename,
					NR_INLINE_DENTRY * F2FS_SLOT_LEN);

414
	kunmap_atomic(dentry_blk);
415 416 417 418
	SetPageUptodate(page);
	set_page_dirty(page);

	/* clear inline dir and flag after data writeback */
419
	truncate_inline_inode(ipage, 0);
420

421
	stat_dec_inline_dir(dir);
C
Chao Yu 已提交
422
	clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
423 424 425 426 427 428 429 430 431 432 433 434 435

	if (i_size_read(dir) < PAGE_CACHE_SIZE) {
		i_size_write(dir, PAGE_CACHE_SIZE);
		set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	}

	sync_inode_page(&dn);
out:
	f2fs_put_page(page, 1);
	return err;
}

int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
436
			struct inode *inode, nid_t ino, umode_t mode)
437 438 439 440 441 442 443
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	unsigned int bit_pos;
	f2fs_hash_t name_hash;
	size_t namelen = name->len;
	struct f2fs_inline_dentry *dentry_blk = NULL;
444
	struct f2fs_dentry_ptr d;
445
	int slots = GET_DENTRY_SLOTS(namelen);
446
	struct page *page = NULL;
447 448 449 450 451 452 453
	int err = 0;

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	dentry_blk = inline_data_addr(ipage);
454 455
	bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
						slots, NR_INLINE_DENTRY);
456 457
	if (bit_pos >= NR_INLINE_DENTRY) {
		err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
458 459 460
		if (err)
			return err;
		err = -EAGAIN;
461 462 463
		goto out;
	}

464 465 466 467 468 469 470
	if (inode) {
		down_write(&F2FS_I(inode)->i_sem);
		page = init_inode_metadata(inode, dir, name, ipage);
		if (IS_ERR(page)) {
			err = PTR_ERR(page);
			goto fail;
		}
471
	}
472 473

	f2fs_wait_on_page_writeback(ipage, NODE);
474 475

	name_hash = f2fs_dentry_hash(name);
476
	make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
477
	f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
478

479 480 481
	set_page_dirty(ipage);

	/* we don't need to mark_inode_dirty now */
482 483 484 485 486
	if (inode) {
		F2FS_I(inode)->i_pino = dir->i_ino;
		update_inode(inode, page);
		f2fs_put_page(page, 1);
	}
487 488 489

	update_parent_metadata(dir, inode, 0);
fail:
490 491
	if (inode)
		up_write(&F2FS_I(inode)->i_sem);
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510

	if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
		update_inode(dir, ipage);
		clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	}
out:
	f2fs_put_page(ipage, 1);
	return err;
}

void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
					struct inode *dir, struct inode *inode)
{
	struct f2fs_inline_dentry *inline_dentry;
	int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
	unsigned int bit_pos;
	int i;

	lock_page(page);
J
Jaegeuk Kim 已提交
511
	f2fs_wait_on_page_writeback(page, NODE);
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552

	inline_dentry = inline_data_addr(page);
	bit_pos = dentry - inline_dentry->dentry;
	for (i = 0; i < slots; i++)
		test_and_clear_bit_le(bit_pos + i,
				&inline_dentry->dentry_bitmap);

	set_page_dirty(page);

	dir->i_ctime = dir->i_mtime = CURRENT_TIME;

	if (inode)
		f2fs_drop_nlink(dir, inode, page);

	f2fs_put_page(page, 1);
}

bool f2fs_empty_inline_dir(struct inode *dir)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	unsigned int bit_pos = 2;
	struct f2fs_inline_dentry *dentry_blk;

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return false;

	dentry_blk = inline_data_addr(ipage);
	bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
					NR_INLINE_DENTRY,
					bit_pos);

	f2fs_put_page(ipage, 1);

	if (bit_pos < NR_INLINE_DENTRY)
		return false;

	return true;
}

553 554
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
				struct f2fs_str *fstr)
555 556 557 558
{
	struct inode *inode = file_inode(file);
	struct f2fs_inline_dentry *inline_dentry = NULL;
	struct page *ipage = NULL;
559
	struct f2fs_dentry_ptr d;
560 561 562 563

	if (ctx->pos == NR_INLINE_DENTRY)
		return 0;

564
	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
565 566 567 568 569
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	inline_dentry = inline_data_addr(ipage);

570
	make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
571

572
	if (!f2fs_fill_dentries(ctx, &d, 0, fstr))
573
		ctx->pos = NR_INLINE_DENTRY;
574

575
	f2fs_put_page(ipage, 1);
576 577
	return 0;
}
J
Jaegeuk Kim 已提交
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612

int f2fs_inline_data_fiemap(struct inode *inode,
		struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
{
	__u64 byteaddr, ilen;
	__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
		FIEMAP_EXTENT_LAST;
	struct node_info ni;
	struct page *ipage;
	int err = 0;

	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	if (!f2fs_has_inline_data(inode)) {
		err = -EAGAIN;
		goto out;
	}

	ilen = min_t(size_t, MAX_INLINE_DATA, i_size_read(inode));
	if (start >= ilen)
		goto out;
	if (start + len < ilen)
		ilen = start + len;
	ilen -= start;

	get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
	byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
	byteaddr += (char *)inline_data_addr(ipage) - (char *)F2FS_INODE(ipage);
	err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
out:
	f2fs_put_page(ipage, 1);
	return err;
}