inline.c 12.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * fs/f2fs/inline.c
 * Copyright (c) 2013, Intel Corporation
 * Authors: Huajun Li <huajun.li@intel.com>
 *          Haicheng Li <haicheng.li@intel.com>
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/fs.h>
#include <linux/f2fs_fs.h>

#include "f2fs.h"

16
bool f2fs_may_inline_data(struct inode *inode)
17
{
18
	if (!test_opt(F2FS_I_SB(inode), INLINE_DATA))
19 20
		return false;

J
Jaegeuk Kim 已提交
21 22 23
	if (f2fs_is_atomic_file(inode))
		return false;

24
	if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
25 26
		return false;

27 28 29
	if (i_size_read(inode) > MAX_INLINE_DATA)
		return false;

30 31 32
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return false;

33 34 35
	return true;
}

36 37 38 39 40 41 42 43 44 45 46
bool f2fs_may_inline_dentry(struct inode *inode)
{
	if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
		return false;

	if (!S_ISDIR(inode->i_mode))
		return false;

	return true;
}

47
void read_inline_data(struct page *page, struct page *ipage)
48 49 50
{
	void *src_addr, *dst_addr;

51 52
	if (PageUptodate(page))
		return;
53

54
	f2fs_bug_on(F2FS_P_SB(page), page->index);
55

56
	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
57 58 59

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(ipage);
60
	dst_addr = kmap_atomic(page);
61
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
62
	flush_dcache_page(page);
63
	kunmap_atomic(dst_addr);
64
	SetPageUptodate(page);
65 66
}

67
bool truncate_inline_inode(struct page *ipage, u64 from)
C
Chao Yu 已提交
68
{
69 70 71 72 73 74 75
	void *addr;

	if (from >= MAX_INLINE_DATA)
		return false;

	addr = inline_data_addr(ipage);

C
Chao Yu 已提交
76
	f2fs_wait_on_page_writeback(ipage, NODE);
77 78 79
	memset(addr + from, 0, MAX_INLINE_DATA - from);

	return true;
C
Chao Yu 已提交
80 81
}

82 83 84 85 86 87 88 89 90
int f2fs_read_inline_data(struct inode *inode, struct page *page)
{
	struct page *ipage;

	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
	if (IS_ERR(ipage)) {
		unlock_page(page);
		return PTR_ERR(ipage);
	}
91

92 93 94 95 96 97 98 99 100 101 102 103 104
	if (!f2fs_has_inline_data(inode)) {
		f2fs_put_page(ipage, 1);
		return -EAGAIN;
	}

	if (page->index)
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	else
		read_inline_data(page, ipage);

	SetPageUptodate(page);
	f2fs_put_page(ipage, 1);
	unlock_page(page);
105 106 107
	return 0;
}

108
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
109 110 111
{
	void *src_addr, *dst_addr;
	struct f2fs_io_info fio = {
112
		.sbi = F2FS_I_SB(dn->inode),
113 114
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
115
		.page = page,
116
		.encrypted_page = NULL,
117
	};
118
	int dirty, err;
119

120
	f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);
121

122 123
	if (!f2fs_exist_data(dn->inode))
		goto clear_out;
124

125
	err = f2fs_reserve_block(dn, 0);
126
	if (err)
127
		return err;
128

129
	f2fs_wait_on_page_writeback(page, DATA);
130 131 132 133

	if (PageUptodate(page))
		goto no_update;

134
	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
135 136

	/* Copy the whole inline data block */
137
	src_addr = inline_data_addr(dn->inode_page);
138
	dst_addr = kmap_atomic(page);
139
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
140
	flush_dcache_page(page);
141
	kunmap_atomic(dst_addr);
142
	SetPageUptodate(page);
143
no_update:
144 145
	set_page_dirty(page);

146 147 148
	/* clear dirty state */
	dirty = clear_page_dirty_for_io(page);

149 150
	/* write data page to try to make data consistent */
	set_page_writeback(page);
151
	fio.blk_addr = dn->data_blkaddr;
152
	write_data_page(dn, &fio);
153
	set_data_blkaddr(dn);
154
	f2fs_update_extent_cache(dn);
155
	f2fs_wait_on_page_writeback(page, DATA);
156 157
	if (dirty)
		inode_dec_dirty_pages(dn->inode);
158

159 160 161
	/* this converted inline_data should be recovered. */
	set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);

162
	/* clear inline data and flag after data writeback */
163
	truncate_inline_inode(dn->inode_page, 0);
164 165
clear_out:
	stat_dec_inline_inode(dn->inode);
166
	f2fs_clear_inline_inode(dn->inode);
167 168 169
	sync_inode_page(dn);
	f2fs_put_dnode(dn);
	return 0;
170 171
}

172
int f2fs_convert_inline_inode(struct inode *inode)
173
{
174 175 176 177
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage, *page;
	int err = 0;
178

179 180 181
	page = grab_cache_page(inode->i_mapping, 0);
	if (!page)
		return -ENOMEM;
182

183 184 185 186
	f2fs_lock_op(sbi);

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
187 188
		err = PTR_ERR(ipage);
		goto out;
189
	}
190

191 192 193 194 195 196
	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode))
		err = f2fs_convert_inline_page(&dn, page);

	f2fs_put_dnode(&dn);
197
out:
198 199 200
	f2fs_unlock_op(sbi);

	f2fs_put_page(page, 1);
201 202 203
	return err;
}

204
int f2fs_write_inline_data(struct inode *inode, struct page *page)
205 206 207 208 209 210 211 212 213 214
{
	void *src_addr, *dst_addr;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;

215
	if (!f2fs_has_inline_data(inode)) {
216 217
		f2fs_put_dnode(&dn);
		return -EAGAIN;
218 219
	}

220 221 222
	f2fs_bug_on(F2FS_I_SB(inode), page->index);

	f2fs_wait_on_page_writeback(dn.inode_page, NODE);
223
	src_addr = kmap_atomic(page);
224 225
	dst_addr = inline_data_addr(dn.inode_page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
226
	kunmap_atomic(src_addr);
227

228
	set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
229 230
	set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

231 232 233 234
	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	return 0;
}
235

236
bool recover_inline_data(struct inode *inode, struct page *npage)
237
{
238
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
	struct f2fs_inode *ri = NULL;
	void *src_addr, *dst_addr;
	struct page *ipage;

	/*
	 * The inline_data recovery policy is as follows.
	 * [prev.] [next] of inline_data flag
	 *    o       o  -> recover inline_data
	 *    o       x  -> remove inline_data, and then recover data blocks
	 *    x       o  -> remove inline_data, and then recover inline_data
	 *    x       x  -> recover data blocks
	 */
	if (IS_INODE(npage))
		ri = F2FS_INODE(npage);

	if (f2fs_has_inline_data(inode) &&
255
			ri && (ri->i_inline & F2FS_INLINE_DATA)) {
256 257
process_inline:
		ipage = get_node_page(sbi, inode->i_ino);
258
		f2fs_bug_on(sbi, IS_ERR(ipage));
259

260 261
		f2fs_wait_on_page_writeback(ipage, NODE);

262 263 264
		src_addr = inline_data_addr(npage);
		dst_addr = inline_data_addr(ipage);
		memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
265 266 267 268

		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

269 270
		update_inode(inode, ipage);
		f2fs_put_page(ipage, 1);
271
		return true;
272 273 274 275
	}

	if (f2fs_has_inline_data(inode)) {
		ipage = get_node_page(sbi, inode->i_ino);
276
		f2fs_bug_on(sbi, IS_ERR(ipage));
277
		truncate_inline_inode(ipage, 0);
278
		f2fs_clear_inline_inode(inode);
279 280
		update_inode(inode, ipage);
		f2fs_put_page(ipage, 1);
281
	} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
282
		truncate_blocks(inode, 0, false);
283 284
		goto process_inline;
	}
285
	return false;
286
}
287 288

struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
289
			struct f2fs_filename *fname, struct page **res_page)
290 291
{
	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
292
	struct f2fs_inline_dentry *inline_dentry;
293
	struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
294
	struct f2fs_dir_entry *de;
295
	struct f2fs_dentry_ptr d;
296
	struct page *ipage;
297
	f2fs_hash_t namehash;
298 299 300 301 302

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return NULL;

303 304
	namehash = f2fs_dentry_hash(&name);

305
	inline_dentry = inline_data_addr(ipage);
306

307
	make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
308
	de = find_target_dentry(fname, namehash, NULL, &d);
309
	unlock_page(ipage);
310 311 312 313 314 315 316 317 318
	if (de)
		*res_page = ipage;
	else
		f2fs_put_page(ipage, 0);

	/*
	 * For the most part, it should be a bug when name_len is zero.
	 * We stop here for figuring out where the bugs has occurred.
	 */
319
	f2fs_bug_on(sbi, d.max < 0);
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
	return de;
}

struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir,
							struct page **p)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	struct f2fs_dir_entry *de;
	struct f2fs_inline_dentry *dentry_blk;

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return NULL;

	dentry_blk = inline_data_addr(ipage);
	de = &dentry_blk->dentry[1];
	*p = ipage;
	unlock_page(ipage);
	return de;
}

int make_empty_inline_dir(struct inode *inode, struct inode *parent,
							struct page *ipage)
{
	struct f2fs_inline_dentry *dentry_blk;
346
	struct f2fs_dentry_ptr d;
347 348 349

	dentry_blk = inline_data_addr(ipage);

350
	make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
351
	do_make_empty_dir(inode, parent, &d);
352 353 354 355 356 357 358 359 360 361 362

	set_page_dirty(ipage);

	/* update i_size to MAX_INLINE_DATA */
	if (i_size_read(inode) < MAX_INLINE_DATA) {
		i_size_write(inode, MAX_INLINE_DATA);
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
	}
	return 0;
}

363 364 365 366
/*
 * NOTE: ipage is grabbed by caller, but if any error occurs, we should
 * release ipage in this function.
 */
367
static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
368 369 370 371 372 373 374 375
				struct f2fs_inline_dentry *inline_dentry)
{
	struct page *page;
	struct dnode_of_data dn;
	struct f2fs_dentry_block *dentry_blk;
	int err;

	page = grab_cache_page(dir->i_mapping, 0);
376 377
	if (!page) {
		f2fs_put_page(ipage, 1);
378
		return -ENOMEM;
379
	}
380 381 382 383 384 385 386 387 388

	set_new_dnode(&dn, dir, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err)
		goto out;

	f2fs_wait_on_page_writeback(page, DATA);
	zero_user_segment(page, 0, PAGE_CACHE_SIZE);

389
	dentry_blk = kmap_atomic(page);
390 391 392 393 394 395 396 397 398

	/* copy data from inline dentry block to new dentry block */
	memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
					INLINE_DENTRY_BITMAP_SIZE);
	memcpy(dentry_blk->dentry, inline_dentry->dentry,
			sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
	memcpy(dentry_blk->filename, inline_dentry->filename,
					NR_INLINE_DENTRY * F2FS_SLOT_LEN);

399
	kunmap_atomic(dentry_blk);
400 401 402 403
	SetPageUptodate(page);
	set_page_dirty(page);

	/* clear inline dir and flag after data writeback */
404
	truncate_inline_inode(ipage, 0);
405

406
	stat_dec_inline_dir(dir);
C
Chao Yu 已提交
407
	clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
408 409 410 411 412 413 414 415 416 417 418 419 420

	if (i_size_read(dir) < PAGE_CACHE_SIZE) {
		i_size_write(dir, PAGE_CACHE_SIZE);
		set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	}

	sync_inode_page(&dn);
out:
	f2fs_put_page(page, 1);
	return err;
}

int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
421
			struct inode *inode, nid_t ino, umode_t mode)
422 423 424 425 426 427 428
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	unsigned int bit_pos;
	f2fs_hash_t name_hash;
	size_t namelen = name->len;
	struct f2fs_inline_dentry *dentry_blk = NULL;
429
	struct f2fs_dentry_ptr d;
430
	int slots = GET_DENTRY_SLOTS(namelen);
431
	struct page *page = NULL;
432 433 434 435 436 437 438
	int err = 0;

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	dentry_blk = inline_data_addr(ipage);
439 440
	bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
						slots, NR_INLINE_DENTRY);
441 442
	if (bit_pos >= NR_INLINE_DENTRY) {
		err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
443 444 445
		if (err)
			return err;
		err = -EAGAIN;
446 447 448
		goto out;
	}

449 450 451 452 453 454 455
	if (inode) {
		down_write(&F2FS_I(inode)->i_sem);
		page = init_inode_metadata(inode, dir, name, ipage);
		if (IS_ERR(page)) {
			err = PTR_ERR(page);
			goto fail;
		}
456
	}
457 458

	f2fs_wait_on_page_writeback(ipage, NODE);
459 460

	name_hash = f2fs_dentry_hash(name);
461
	make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
462
	f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
463

464 465 466
	set_page_dirty(ipage);

	/* we don't need to mark_inode_dirty now */
467 468 469 470 471
	if (inode) {
		F2FS_I(inode)->i_pino = dir->i_ino;
		update_inode(inode, page);
		f2fs_put_page(page, 1);
	}
472 473 474

	update_parent_metadata(dir, inode, 0);
fail:
475 476
	if (inode)
		up_write(&F2FS_I(inode)->i_sem);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495

	if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
		update_inode(dir, ipage);
		clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	}
out:
	f2fs_put_page(ipage, 1);
	return err;
}

void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
					struct inode *dir, struct inode *inode)
{
	struct f2fs_inline_dentry *inline_dentry;
	int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
	unsigned int bit_pos;
	int i;

	lock_page(page);
J
Jaegeuk Kim 已提交
496
	f2fs_wait_on_page_writeback(page, NODE);
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537

	inline_dentry = inline_data_addr(page);
	bit_pos = dentry - inline_dentry->dentry;
	for (i = 0; i < slots; i++)
		test_and_clear_bit_le(bit_pos + i,
				&inline_dentry->dentry_bitmap);

	set_page_dirty(page);

	dir->i_ctime = dir->i_mtime = CURRENT_TIME;

	if (inode)
		f2fs_drop_nlink(dir, inode, page);

	f2fs_put_page(page, 1);
}

bool f2fs_empty_inline_dir(struct inode *dir)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	unsigned int bit_pos = 2;
	struct f2fs_inline_dentry *dentry_blk;

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return false;

	dentry_blk = inline_data_addr(ipage);
	bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
					NR_INLINE_DENTRY,
					bit_pos);

	f2fs_put_page(ipage, 1);

	if (bit_pos < NR_INLINE_DENTRY)
		return false;

	return true;
}

538 539
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
				struct f2fs_str *fstr)
540 541 542 543
{
	struct inode *inode = file_inode(file);
	struct f2fs_inline_dentry *inline_dentry = NULL;
	struct page *ipage = NULL;
544
	struct f2fs_dentry_ptr d;
545 546 547 548

	if (ctx->pos == NR_INLINE_DENTRY)
		return 0;

549
	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
550 551 552 553 554
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	inline_dentry = inline_data_addr(ipage);

555
	make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
556

557
	if (!f2fs_fill_dentries(ctx, &d, 0, fstr))
558
		ctx->pos = NR_INLINE_DENTRY;
559

560
	f2fs_put_page(ipage, 1);
561 562
	return 0;
}