recovery.c 15.3 KB
Newer Older
J
Jaegeuk Kim 已提交
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * fs/f2fs/recovery.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 *             http://www.samsung.com/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * Roll forward recovery scenarios.
 *
 * [Term] F: fsync_mark, D: dentry_mark
 *
 * 1. inode(x) | CP | inode(x) | dnode(F)
 * -> Update the latest inode(x).
 *
 * 2. inode(x) | CP | inode(F) | dnode(F)
 * -> No problem.
 *
 * 3. inode(x) | CP | dnode(F) | inode(x)
 * -> Recover to the latest dnode(F), and drop the last inode(x)
 *
 * 4. inode(x) | CP | dnode(F) | inode(F)
 * -> No problem.
 *
 * 5. CP | inode(x) | dnode(F)
 * -> The inode(DF) was missing. Should drop this dnode(F).
 *
 * 6. CP | inode(DF) | dnode(F)
 * -> No problem.
 *
 * 7. CP | dnode(F) | inode(DF)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *
 * 8. CP | dnode(F) | inode(x)
 * -> If f2fs_iget fails, then goto next to find inode(DF).
 *    But it will fail due to no inode(DF).
 */

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
static struct kmem_cache *fsync_entry_slab;

bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
	if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
			> sbi->user_block_count)
		return false;
	return true;
}

static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
								nid_t ino)
{
	struct fsync_inode_entry *entry;

63
	list_for_each_entry(entry, head, list)
64 65
		if (entry->inode->i_ino == ino)
			return entry;
66

67 68 69
	return NULL;
}

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
							struct inode *inode)
{
	struct fsync_inode_entry *entry;

	entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
	if (!entry)
		return NULL;

	entry->inode = inode;
	list_add_tail(&entry->list, head);

	return entry;
}

static void del_fsync_inode(struct fsync_inode_entry *entry)
{
	iput(entry->inode);
	list_del(&entry->list);
	kmem_cache_free(fsync_entry_slab, entry);
}

C
Chao Yu 已提交
92 93
static int recover_dentry(struct inode *inode, struct page *ipage,
						struct list_head *dir_list)
94
{
95
	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
96
	nid_t pino = le32_to_cpu(raw_inode->i_pino);
J
Jaegeuk Kim 已提交
97
	struct f2fs_dir_entry *de;
98
	struct qstr name;
99
	struct page *page;
J
Jaegeuk Kim 已提交
100
	struct inode *dir, *einode;
C
Chao Yu 已提交
101
	struct fsync_inode_entry *entry;
102 103
	int err = 0;

C
Chao Yu 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117
	entry = get_fsync_inode(dir_list, pino);
	if (!entry) {
		dir = f2fs_iget(inode->i_sb, pino);
		if (IS_ERR(dir)) {
			err = PTR_ERR(dir);
			goto out;
		}

		entry = add_fsync_inode(dir_list, dir);
		if (!entry) {
			err = -ENOMEM;
			iput(dir);
			goto out;
		}
118 119
	}

C
Chao Yu 已提交
120 121 122
	dir = entry->inode;

	if (file_enc_name(inode))
123 124
		return 0;

125 126
	name.len = le32_to_cpu(raw_inode->i_namelen);
	name.name = raw_inode->i_name;
127 128 129 130

	if (unlikely(name.len > F2FS_NAME_LEN)) {
		WARN_ON(1);
		err = -ENAMETOOLONG;
C
Chao Yu 已提交
131
		goto out;
132
	}
J
Jaegeuk Kim 已提交
133 134
retry:
	de = f2fs_find_entry(dir, &name, &page);
135
	if (de && inode->i_ino == le32_to_cpu(de->ino))
136
		goto out_unmap_put;
137

J
Jaegeuk Kim 已提交
138 139 140 141
	if (de) {
		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
		if (IS_ERR(einode)) {
			WARN_ON(1);
142 143
			err = PTR_ERR(einode);
			if (err == -ENOENT)
J
Jaegeuk Kim 已提交
144
				err = -EEXIST;
145 146
			goto out_unmap_put;
		}
147
		err = acquire_orphan_inode(F2FS_I_SB(inode));
148 149 150
		if (err) {
			iput(einode);
			goto out_unmap_put;
J
Jaegeuk Kim 已提交
151
		}
152
		f2fs_delete_entry(de, page, dir, einode);
J
Jaegeuk Kim 已提交
153 154
		iput(einode);
		goto retry;
155
	}
156
	err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
157

158 159 160
	goto out;

out_unmap_put:
161
	f2fs_dentry_kunmap(dir, page);
162
	f2fs_put_page(page, 0);
163
out:
C
Chris Fries 已提交
164 165 166
	f2fs_msg(inode->i_sb, KERN_NOTICE,
			"%s: ino = %x, name = %s, dir = %lx, err = %d",
			__func__, ino_of_node(ipage), raw_inode->i_name,
D
Dan Carpenter 已提交
167
			IS_ERR(dir) ? 0 : dir->i_ino, err);
168 169 170
	return err;
}

171
static void recover_inode(struct inode *inode, struct page *page)
172
{
173
	struct f2fs_inode *raw = F2FS_INODE(page);
174
	char *name;
175 176 177 178 179 180 181 182 183

	inode->i_mode = le16_to_cpu(raw->i_mode);
	i_size_write(inode, le64_to_cpu(raw->i_size));
	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
184

185 186 187 188 189
	if (file_enc_name(inode))
		name = "<encrypted>";
	else
		name = F2FS_INODE(page)->i_name;

190
	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
191
			ino_of_node(page), name);
192 193
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
static bool is_same_inode(struct inode *inode, struct page *ipage)
{
	struct f2fs_inode *ri = F2FS_INODE(ipage);
	struct timespec disk;

	if (!IS_INODE(ipage))
		return true;

	disk.tv_sec = le64_to_cpu(ri->i_ctime);
	disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	if (timespec_compare(&inode->i_ctime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_atime);
	disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	if (timespec_compare(&inode->i_atime, &disk) > 0)
		return false;

	disk.tv_sec = le64_to_cpu(ri->i_mtime);
	disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	if (timespec_compare(&inode->i_mtime, &disk) > 0)
		return false;

	return true;
}

220 221
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
222
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
223
	struct curseg_info *curseg;
224
	struct inode *inode;
225
	struct page *page = NULL;
226 227 228 229 230
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
231
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
232 233 234 235

	while (1) {
		struct fsync_inode_entry *entry;

236
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
237
			return 0;
238

239
		page = get_tmp_page(sbi, blkaddr);
240

241
		if (cp_ver != cpver_of_node(page))
242
			break;
243 244 245 246 247

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
248 249 250 251
		if (entry) {
			if (!is_same_inode(entry->inode, page))
				goto next;
		} else {
252
			if (IS_INODE(page) && is_dent_dnode(page)) {
253 254
				err = recover_inode_page(sbi, page);
				if (err)
255
					break;
256 257
			}

258 259 260 261
			/*
			 * CP | dnode(F) | inode(DF)
			 * For this case, we should not give up now.
			 */
262 263 264
			inode = f2fs_iget(sbi->sb, ino_of_node(page));
			if (IS_ERR(inode)) {
				err = PTR_ERR(inode);
265 266
				if (err == -ENOENT) {
					err = 0;
267
					goto next;
268
				}
269
				break;
270
			}
271 272 273 274 275 276 277 278

			/* add this fsync inode to the list */
			entry = add_fsync_inode(head, inode);
			if (!entry) {
				err = -ENOMEM;
				iput(inode);
				break;
			}
279
		}
J
Jaegeuk Kim 已提交
280 281
		entry->blkaddr = blkaddr;

282 283
		if (IS_INODE(page) && is_dent_dnode(page))
			entry->last_dentry = blkaddr;
284 285 286
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
287
		f2fs_put_page(page, 1);
288 289

		ra_meta_pages_cond(sbi, blkaddr);
290
	}
291
	f2fs_put_page(page, 1);
292 293 294
	return err;
}

295
static void destroy_fsync_dnodes(struct list_head *head)
296
{
297 298
	struct fsync_inode_entry *entry, *tmp;

299 300
	list_for_each_entry_safe(entry, tmp, head, list)
		del_fsync_inode(entry);
301 302
}

303
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
304
			block_t blkaddr, struct dnode_of_data *dn)
305 306 307
{
	struct seg_entry *sentry;
	unsigned int segno = GET_SEGNO(sbi, blkaddr);
J
Jaegeuk Kim 已提交
308
	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
J
Jaegeuk Kim 已提交
309
	struct f2fs_summary_block *sum_node;
310
	struct f2fs_summary sum;
J
Jaegeuk Kim 已提交
311
	struct page *sum_page, *node_page;
312
	struct dnode_of_data tdn = *dn;
313
	nid_t ino, nid;
314
	struct inode *inode;
315
	unsigned int offset;
316 317 318 319 320
	block_t bidx;
	int i;

	sentry = get_seg_entry(sbi, segno);
	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
321
		return 0;
322 323 324 325 326 327

	/* Get the previous summary */
	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
		struct curseg_info *curseg = CURSEG_I(sbi, i);
		if (curseg->segno == segno) {
			sum = curseg->sum_blk->entries[blkoff];
J
Jaegeuk Kim 已提交
328
			goto got_it;
329 330 331
		}
	}

J
Jaegeuk Kim 已提交
332 333 334 335 336
	sum_page = get_sum_page(sbi, segno);
	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
	sum = sum_node->entries[blkoff];
	f2fs_put_page(sum_page, 1);
got_it:
337 338 339 340
	/* Use the locked dnode page and inode */
	nid = le32_to_cpu(sum.nid);
	if (dn->inode->i_ino == nid) {
		tdn.nid = nid;
341 342
		if (!dn->inode_page_locked)
			lock_page(dn->inode_page);
343
		tdn.node_page = dn->inode_page;
344
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
345
		goto truncate_out;
346
	} else if (dn->nid == nid) {
347
		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
348
		goto truncate_out;
349 350
	}

351
	/* Get the node page */
352
	node_page = get_node_page(sbi, nid);
353 354
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);
355 356

	offset = ofs_of_node(node_page);
357 358 359
	ino = ino_of_node(node_page);
	f2fs_put_page(node_page, 1);

360 361 362 363 364 365 366 367
	if (ino != dn->inode->i_ino) {
		/* Deallocate previous index in the node page */
		inode = f2fs_iget(sbi->sb, ino);
		if (IS_ERR(inode))
			return PTR_ERR(inode);
	} else {
		inode = dn->inode;
	}
368

369
	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
370

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
	/*
	 * if inode page is locked, unlock temporarily, but its reference
	 * count keeps alive.
	 */
	if (ino == dn->inode->i_ino && dn->inode_page_locked)
		unlock_page(dn->inode_page);

	set_new_dnode(&tdn, inode, NULL, NULL, 0);
	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
		goto out;

	if (tdn.data_blkaddr == blkaddr)
		truncate_data_blocks_range(&tdn, 1);

	f2fs_put_dnode(&tdn);
out:
	if (ino != dn->inode->i_ino)
388
		iput(inode);
389 390 391 392 393 394 395 396 397
	else if (dn->inode_page_locked)
		lock_page(dn->inode_page);
	return 0;

truncate_out:
	if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
		truncate_data_blocks_range(&tdn, 1);
	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
		unlock_page(dn->inode_page);
398
	return 0;
399 400
}

401
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
402 403 404 405
					struct page *page, block_t blkaddr)
{
	struct dnode_of_data dn;
	struct node_info ni;
406
	unsigned int start, end;
407
	int err = 0, recovered = 0;
408

409 410 411 412
	/* step 1: recover xattr */
	if (IS_INODE(page)) {
		recover_inline_xattr(inode, page);
	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
413 414 415 416
		/*
		 * Deprecated; xattr blocks should be found from cold log.
		 * But, we should remain this for backward compatibility.
		 */
417
		recover_xattr_data(inode, page, blkaddr);
418
		goto out;
419
	}
420

421 422
	/* step 2: recover inline data */
	if (recover_inline_data(inode, page))
423 424
		goto out;

425
	/* step 3: recover data indices */
426 427
	start = start_bidx_of_node(ofs_of_node(page), inode);
	end = start + ADDRS_PER_PAGE(page, inode);
428 429

	set_new_dnode(&dn, inode, NULL, NULL, 0);
430

431
	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
432
	if (err)
433
		goto out;
434

435
	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
436 437

	get_node_info(sbi, dn.nid, &ni);
438 439
	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
440

441
	for (; start < end; start++, dn.ofs_in_node++) {
442 443 444 445 446
		block_t src, dest;

		src = datablock_addr(dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(page, dn.ofs_in_node);

447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
		/* skip recovering if dest is the same as src */
		if (src == dest)
			continue;

		/* dest is invalid, just invalidate src block */
		if (dest == NULL_ADDR) {
			truncate_data_blocks_range(&dn, 1);
			continue;
		}

		/*
		 * dest is reserved block, invalidate src block
		 * and then reserve one new block in dnode page.
		 */
		if (dest == NEW_ADDR) {
			truncate_data_blocks_range(&dn, 1);
			err = reserve_new_block(&dn);
			f2fs_bug_on(sbi, err);
			continue;
		}

		/* dest is valid block, try to recover from src to dest */
		if (is_valid_blkaddr(sbi, dest, META_POR)) {
470

471
			if (src == NULL_ADDR) {
472
				err = reserve_new_block(&dn);
473
				/* We should not get -ENOSPC */
474
				f2fs_bug_on(sbi, err);
475 476 477
			}

			/* Check the previous node page having this index */
478 479 480
			err = check_index_in_prev_nodes(sbi, dest, &dn);
			if (err)
				goto err;
481 482

			/* write dummy data page */
483
			f2fs_replace_block(sbi, &dn, src, dest,
484
						ni.version, false, false);
485
			recovered++;
486 487 488 489 490 491 492 493 494 495
		}
	}

	if (IS_INODE(dn.node_page))
		sync_inode_page(&dn);

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);
496
err:
497
	f2fs_put_dnode(&dn);
498
out:
C
Chris Fries 已提交
499 500 501
	f2fs_msg(sbi->sb, KERN_NOTICE,
		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
		inode->i_ino, recovered, err);
502
	return err;
503 504
}

C
Chao Yu 已提交
505 506
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
						struct list_head *dir_list)
507
{
508
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
509
	struct curseg_info *curseg;
510
	struct page *page = NULL;
511
	int err = 0;
512 513 514
	block_t blkaddr;

	/* get node pages in the current segment */
C
Chao Yu 已提交
515
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
516 517 518 519 520
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	while (1) {
		struct fsync_inode_entry *entry;

521
		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
522
			break;
523

524 525
		ra_meta_pages_cond(sbi, blkaddr);

526
		page = get_tmp_page(sbi, blkaddr);
527

528 529
		if (cp_ver != cpver_of_node(page)) {
			f2fs_put_page(page, 1);
530
			break;
531
		}
532

C
Chao Yu 已提交
533
		entry = get_fsync_inode(inode_list, ino_of_node(page));
534 535
		if (!entry)
			goto next;
536 537 538
		/*
		 * inode(x) | CP | inode(x) | dnode(F)
		 * In this case, we can lose the latest inode(x).
539
		 * So, call recover_inode for the inode update.
540
		 */
541
		if (IS_INODE(page))
542 543
			recover_inode(entry->inode, page);
		if (entry->last_dentry == blkaddr) {
C
Chao Yu 已提交
544
			err = recover_dentry(entry->inode, page, dir_list);
545 546 547 548 549
			if (err) {
				f2fs_put_page(page, 1);
				break;
			}
		}
550
		err = do_recover_data(sbi, entry->inode, page, blkaddr);
551 552
		if (err) {
			f2fs_put_page(page, 1);
553
			break;
554
		}
555

556 557
		if (entry->blkaddr == blkaddr)
			del_fsync_inode(entry);
558 559 560
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
561
		f2fs_put_page(page, 1);
562
	}
563 564 565
	if (!err)
		allocate_new_segments(sbi);
	return err;
566 567
}

568
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
569
{
570
	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
571
	struct list_head inode_list;
C
Chao Yu 已提交
572
	struct list_head dir_list;
573
	block_t blkaddr;
574
	int err;
575
	int ret = 0;
H
Haicheng Li 已提交
576
	bool need_writecp = false;
577 578

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
579
			sizeof(struct fsync_inode_entry));
580
	if (!fsync_entry_slab)
581
		return -ENOMEM;
582 583

	INIT_LIST_HEAD(&inode_list);
C
Chao Yu 已提交
584
	INIT_LIST_HEAD(&dir_list);
585

586 587 588
	/* prevent checkpoint */
	mutex_lock(&sbi->cp_mutex);

589 590
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

591
	/* step #1: find fsynced inode numbers */
592
	err = find_fsync_dnodes(sbi, &inode_list);
593
	if (err || list_empty(&inode_list))
594 595
		goto out;

596 597
	if (check_only) {
		ret = 1;
598
		goto out;
599
	}
600

H
Haicheng Li 已提交
601
	need_writecp = true;
602

603
	/* step #2: recover data */
C
Chao Yu 已提交
604
	err = recover_data(sbi, &inode_list, &dir_list);
605
	if (!err)
606
		f2fs_bug_on(sbi, !list_empty(&inode_list));
607
out:
608
	destroy_fsync_dnodes(&inode_list);
609

610 611
	/* truncate meta pages to be used by the recovery */
	truncate_inode_pages_range(META_MAPPING(sbi),
612
			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
613

614 615 616 617 618
	if (err) {
		truncate_inode_pages_final(NODE_MAPPING(sbi));
		truncate_inode_pages_final(META_MAPPING(sbi));
	}

619
	clear_sbi_flag(sbi, SBI_POR_DOING);
620
	if (err) {
C
Chao Yu 已提交
621 622 623 624
		bool invalidate = false;

		if (discard_next_dnode(sbi, blkaddr))
			invalidate = true;
625 626 627 628

		/* Flush all the NAT/SIT pages */
		while (get_pages(sbi, F2FS_DIRTY_META))
			sync_meta_pages(sbi, META, LONG_MAX);
C
Chao Yu 已提交
629 630 631 632 633 634

		/* invalidate temporary meta page */
		if (invalidate)
			invalidate_mapping_pages(META_MAPPING(sbi),
							blkaddr, blkaddr);

635 636
		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
		mutex_unlock(&sbi->cp_mutex);
637
	} else if (need_writecp) {
638
		struct cp_control cpc = {
639
			.reason = CP_RECOVERY,
640
		};
641
		mutex_unlock(&sbi->cp_mutex);
C
Chao Yu 已提交
642
		err = write_checkpoint(sbi, &cpc);
643 644
	} else {
		mutex_unlock(&sbi->cp_mutex);
645
	}
C
Chao Yu 已提交
646 647 648

	destroy_fsync_dnodes(&dir_list);
	kmem_cache_destroy(fsync_entry_slab);
649
	return ret ? ret: err;
650
}