xfs_inode_buf.c 17.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
 * All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include "xfs.h"
#include "xfs_fs.h"
20
#include "xfs_shared.h"
21 22 23
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
24
#include "xfs_mount.h"
25
#include "xfs_defer.h"
26
#include "xfs_inode.h"
27
#include "xfs_errortag.h"
28 29 30
#include "xfs_error.h"
#include "xfs_cksum.h"
#include "xfs_icache.h"
31
#include "xfs_trans.h"
32
#include "xfs_ialloc.h"
A
Amir Goldstein 已提交
33
#include "xfs_dir2.h"
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51

/*
 * Check that none of the inode's in the buffer have a next
 * unlinked field of 0.
 */
#if defined(DEBUG)
void
xfs_inobp_check(
	xfs_mount_t	*mp,
	xfs_buf_t	*bp)
{
	int		i;
	int		j;
	xfs_dinode_t	*dip;

	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;

	for (i = 0; i < j; i++) {
52
		dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
53 54
		if (!dip->di_next_unlinked)  {
			xfs_alert(mp,
55 56
	"Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
				i, (long long)bp->b_bn);
57 58 59 60 61
		}
	}
}
#endif

62 63 64 65 66 67 68 69 70 71 72
bool
xfs_dinode_good_version(
	struct xfs_mount *mp,
	__u8		version)
{
	if (xfs_sb_version_hascrc(&mp->m_sb))
		return version == 3;

	return version == 1 || version == 2;
}

73 74 75 76 77 78
/*
 * If we are doing readahead on an inode buffer, we might be in log recovery
 * reading an inode allocation buffer that hasn't yet been replayed, and hence
 * has not had the inode cores stamped into it. Hence for readahead, the buffer
 * may be potentially invalid.
 *
79 80 81 82 83 84
 * If the readahead buffer is invalid, we need to mark it with an error and
 * clear the DONE status of the buffer so that a followup read will re-read it
 * from disk. We don't report the error otherwise to avoid warnings during log
 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
 * because all we want to do is say readahead failed; there is no-one to report
 * the error to, so this will distinguish it from a non-ra verifier failure.
85 86
 * Changes to this readahead error behavour also need to be reflected in
 * xfs_dquot_buf_readahead_verify().
87
 */
88 89
static void
xfs_inode_buf_verify(
90 91
	struct xfs_buf	*bp,
	bool		readahead)
92 93 94 95 96 97 98 99 100 101 102 103 104
{
	struct xfs_mount *mp = bp->b_target->bt_mount;
	int		i;
	int		ni;

	/*
	 * Validate the magic number and version of every inode in the buffer
	 */
	ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
	for (i = 0; i < ni; i++) {
		int		di_ok;
		xfs_dinode_t	*dip;

105
		dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
106
		di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
107
			xfs_dinode_good_version(mp, dip->di_version);
108
		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
109
						XFS_ERRTAG_ITOBP_INOTOBP))) {
110 111
			if (readahead) {
				bp->b_flags &= ~XBF_DONE;
112
				xfs_buf_ioerror(bp, -EIO);
113 114 115
				return;
			}

116
			xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
117
#ifdef DEBUG
118
			xfs_alert(mp,
119 120 121 122 123 124 125 126 127 128 129 130 131 132
				"bad inode magic/vsn daddr %lld #%d (magic=%x)",
				(unsigned long long)bp->b_bn, i,
				be16_to_cpu(dip->di_magic));
#endif
		}
	}
	xfs_inobp_check(mp, bp);
}


static void
xfs_inode_buf_read_verify(
	struct xfs_buf	*bp)
{
133 134 135 136 137 138 139 140
	xfs_inode_buf_verify(bp, false);
}

static void
xfs_inode_buf_readahead_verify(
	struct xfs_buf	*bp)
{
	xfs_inode_buf_verify(bp, true);
141 142 143 144 145 146
}

static void
xfs_inode_buf_write_verify(
	struct xfs_buf	*bp)
{
147
	xfs_inode_buf_verify(bp, false);
148 149 150
}

const struct xfs_buf_ops xfs_inode_buf_ops = {
151
	.name = "xfs_inode",
152 153 154 155
	.verify_read = xfs_inode_buf_read_verify,
	.verify_write = xfs_inode_buf_write_verify,
};

156
const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
157
	.name = "xxfs_inode_ra",
158 159 160 161
	.verify_read = xfs_inode_buf_readahead_verify,
	.verify_write = xfs_inode_buf_write_verify,
};

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189

/*
 * This routine is called to map an inode to the buffer containing the on-disk
 * version of the inode.  It returns a pointer to the buffer containing the
 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
 * pointer to the on-disk inode within that buffer.
 *
 * If a non-zero error is returned, then the contents of bpp and dipp are
 * undefined.
 */
int
xfs_imap_to_bp(
	struct xfs_mount	*mp,
	struct xfs_trans	*tp,
	struct xfs_imap		*imap,
	struct xfs_dinode       **dipp,
	struct xfs_buf		**bpp,
	uint			buf_flags,
	uint			iget_flags)
{
	struct xfs_buf		*bp;
	int			error;

	buf_flags |= XBF_UNMAPPED;
	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
				   (int)imap->im_len, buf_flags, &bp,
				   &xfs_inode_buf_ops);
	if (error) {
D
Dave Chinner 已提交
190
		if (error == -EAGAIN) {
191 192 193 194
			ASSERT(buf_flags & XBF_TRYLOCK);
			return error;
		}

D
Dave Chinner 已提交
195
		if (error == -EFSCORRUPTED &&
196
		    (iget_flags & XFS_IGET_UNTRUSTED))
D
Dave Chinner 已提交
197
			return -EINVAL;
198 199 200 201 202 203 204

		xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
			__func__, error);
		return error;
	}

	*bpp = bp;
205
	*dipp = xfs_buf_offset(bp, imap->im_boffset);
206 207 208
	return 0;
}

209
void
210 211
xfs_inode_from_disk(
	struct xfs_inode	*ip,
212
	struct xfs_dinode	*from)
213
{
214 215 216
	struct xfs_icdinode	*to = &ip->i_d;
	struct inode		*inode = VFS_I(ip);

217 218 219 220 221

	/*
	 * Convert v1 inodes immediately to v2 inode format as this is the
	 * minimum inode version format we support in the rest of the code.
	 */
D
Dave Chinner 已提交
222
	to->di_version = from->di_version;
223
	if (to->di_version == 1) {
224
		set_nlink(inode, be16_to_cpu(from->di_onlink));
225 226 227 228
		to->di_projid_lo = 0;
		to->di_projid_hi = 0;
		to->di_version = 2;
	} else {
229
		set_nlink(inode, be32_to_cpu(from->di_nlink));
230 231 232 233
		to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
		to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
	}

234 235 236 237
	to->di_format = from->di_format;
	to->di_uid = be32_to_cpu(from->di_uid);
	to->di_gid = be32_to_cpu(from->di_gid);
	to->di_flushiter = be16_to_cpu(from->di_flushiter);
238 239 240 241 242 243 244 245 246 247 248 249 250

	/*
	 * Time is signed, so need to convert to signed 32 bit before
	 * storing in inode timestamp which may be 64 bit. Otherwise
	 * a time before epoch is converted to a time long after epoch
	 * on 64 bit systems.
	 */
	inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
	inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
	inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
	inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
	inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
	inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
251
	inode->i_generation = be32_to_cpu(from->di_gen);
D
Dave Chinner 已提交
252
	inode->i_mode = be16_to_cpu(from->di_mode);
253

254 255 256 257 258 259 260 261 262 263 264 265
	to->di_size = be64_to_cpu(from->di_size);
	to->di_nblocks = be64_to_cpu(from->di_nblocks);
	to->di_extsize = be32_to_cpu(from->di_extsize);
	to->di_nextents = be32_to_cpu(from->di_nextents);
	to->di_anextents = be16_to_cpu(from->di_anextents);
	to->di_forkoff = from->di_forkoff;
	to->di_aformat	= from->di_aformat;
	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
	to->di_flags	= be16_to_cpu(from->di_flags);

	if (to->di_version == 3) {
266
		inode->i_version = be64_to_cpu(from->di_changecount);
267 268 269
		to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
		to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
		to->di_flags2 = be64_to_cpu(from->di_flags2);
270
		to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
271 272 273 274
	}
}

void
275 276
xfs_inode_to_disk(
	struct xfs_inode	*ip,
277 278
	struct xfs_dinode	*to,
	xfs_lsn_t		lsn)
279 280 281 282
{
	struct xfs_icdinode	*from = &ip->i_d;
	struct inode		*inode = VFS_I(ip);

283
	to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
284
	to->di_onlink = 0;
285 286

	to->di_version = from->di_version;
287 288 289 290 291 292
	to->di_format = from->di_format;
	to->di_uid = cpu_to_be32(from->di_uid);
	to->di_gid = cpu_to_be32(from->di_gid);
	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);

293
	memset(to->di_pad, 0, sizeof(to->di_pad));
294 295 296 297 298 299
	to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
	to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
	to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
	to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
	to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
	to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
300
	to->di_nlink = cpu_to_be32(inode->i_nlink);
301
	to->di_gen = cpu_to_be32(inode->i_generation);
D
Dave Chinner 已提交
302
	to->di_mode = cpu_to_be16(inode->i_mode);
303 304 305 306 307 308 309 310 311 312 313 314 315

	to->di_size = cpu_to_be64(from->di_size);
	to->di_nblocks = cpu_to_be64(from->di_nblocks);
	to->di_extsize = cpu_to_be32(from->di_extsize);
	to->di_nextents = cpu_to_be32(from->di_nextents);
	to->di_anextents = cpu_to_be16(from->di_anextents);
	to->di_forkoff = from->di_forkoff;
	to->di_aformat = from->di_aformat;
	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
	to->di_dmstate = cpu_to_be16(from->di_dmstate);
	to->di_flags = cpu_to_be16(from->di_flags);

	if (from->di_version == 3) {
316
		to->di_changecount = cpu_to_be64(inode->i_version);
317 318 319
		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
		to->di_flags2 = cpu_to_be64(from->di_flags2);
320
		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
321 322 323 324
		to->di_ino = cpu_to_be64(ip->i_ino);
		to->di_lsn = cpu_to_be64(lsn);
		memset(to->di_pad2, 0, sizeof(to->di_pad2));
		uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
325 326 327 328 329 330 331 332 333 334
		to->di_flushiter = 0;
	} else {
		to->di_flushiter = cpu_to_be16(from->di_flushiter);
	}
}

void
xfs_log_dinode_to_disk(
	struct xfs_log_dinode	*from,
	struct xfs_dinode	*to)
335 336 337
{
	to->di_magic = cpu_to_be16(from->di_magic);
	to->di_mode = cpu_to_be16(from->di_mode);
338
	to->di_version = from->di_version;
339
	to->di_format = from->di_format;
340
	to->di_onlink = 0;
341 342 343 344 345 346
	to->di_uid = cpu_to_be32(from->di_uid);
	to->di_gid = cpu_to_be32(from->di_gid);
	to->di_nlink = cpu_to_be32(from->di_nlink);
	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
347

348 349 350 351 352 353
	to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
	to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
	to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
	to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
	to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
	to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
354

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
	to->di_size = cpu_to_be64(from->di_size);
	to->di_nblocks = cpu_to_be64(from->di_nblocks);
	to->di_extsize = cpu_to_be32(from->di_extsize);
	to->di_nextents = cpu_to_be32(from->di_nextents);
	to->di_anextents = cpu_to_be16(from->di_anextents);
	to->di_forkoff = from->di_forkoff;
	to->di_aformat = from->di_aformat;
	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
	to->di_dmstate = cpu_to_be16(from->di_dmstate);
	to->di_flags = cpu_to_be16(from->di_flags);
	to->di_gen = cpu_to_be32(from->di_gen);

	if (from->di_version == 3) {
		to->di_changecount = cpu_to_be64(from->di_changecount);
		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
		to->di_flags2 = cpu_to_be64(from->di_flags2);
372
		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
373 374 375 376 377 378 379 380 381 382
		to->di_ino = cpu_to_be64(from->di_ino);
		to->di_lsn = cpu_to_be64(from->di_lsn);
		memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
		uuid_copy(&to->di_uuid, &from->di_uuid);
		to->di_flushiter = 0;
	} else {
		to->di_flushiter = cpu_to_be16(from->di_flushiter);
	}
}

383
xfs_failaddr_t
384 385
xfs_dinode_verify(
	struct xfs_mount	*mp,
386
	xfs_ino_t		ino,
387 388
	struct xfs_dinode	*dip)
{
389
	uint16_t		mode;
390 391 392
	uint16_t		flags;
	uint64_t		flags2;

393
	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
394
		return __this_address;
395

396 397 398 399 400 401 402 403 404 405 406 407 408
	/* Verify v3 integrity information first */
	if (dip->di_version >= 3) {
		if (!xfs_sb_version_hascrc(&mp->m_sb))
			return __this_address;
		if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
				      XFS_DINODE_CRC_OFF))
			return __this_address;
		if (be64_to_cpu(dip->di_ino) != ino)
			return __this_address;
		if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
			return __this_address;
	}

409 410
	/* don't allow invalid i_size */
	if (be64_to_cpu(dip->di_size) & (1ULL << 63))
411
		return __this_address;
412

413
	mode = be16_to_cpu(dip->di_mode);
A
Amir Goldstein 已提交
414
	if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
415
		return __this_address;
416 417 418

	/* No zero-length symlinks/dirs. */
	if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
419
		return __this_address;
420

421 422
	/* only version 3 or greater inodes are extensively verified here */
	if (dip->di_version < 3)
423
		return NULL;
424

425 426 427 428 429 430
	flags = be16_to_cpu(dip->di_flags);
	flags2 = be64_to_cpu(dip->di_flags2);

	/* don't allow reflink/cowextsize if we don't have reflink */
	if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
            !xfs_sb_version_hasreflink(&mp->m_sb))
431
		return __this_address;
432 433 434

	/* don't let reflink and realtime mix */
	if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
435
		return __this_address;
436

437 438
	/* don't let reflink and dax mix */
	if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
439
		return __this_address;
440

441
	return NULL;
442 443 444 445 446 447 448
}

void
xfs_dinode_calc_crc(
	struct xfs_mount	*mp,
	struct xfs_dinode	*dip)
{
449
	uint32_t		crc;
450 451 452 453 454

	if (dip->di_version < 3)
		return;

	ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
D
Dave Chinner 已提交
455
	crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
456
			      XFS_DINODE_CRC_OFF);
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	dip->di_crc = xfs_end_cksum(crc);
}

/*
 * Read the disk inode attributes into the in-core inode structure.
 *
 * For version 5 superblocks, if we are initialising a new inode and we are not
 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
 * inode core with a random generation number. If we are keeping inodes around,
 * we need to read the inode cluster to get the existing generation number off
 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
 * format) then log recovery is dependent on the di_flushiter field being
 * initialised from the current on-disk value and hence we must also read the
 * inode off disk.
 */
int
xfs_iread(
	xfs_mount_t	*mp,
	xfs_trans_t	*tp,
	xfs_inode_t	*ip,
	uint		iget_flags)
{
	xfs_buf_t	*bp;
	xfs_dinode_t	*dip;
481
	xfs_failaddr_t	fa;
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	int		error;

	/*
	 * Fill in the location information in the in-core inode.
	 */
	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
	if (error)
		return error;

	/* shortcut IO on inode allocation if possible */
	if ((iget_flags & XFS_IGET_CREATE) &&
	    xfs_sb_version_hascrc(&mp->m_sb) &&
	    !(mp->m_flags & XFS_MOUNT_IKEEP)) {
		/* initialise the on-disk inode core */
		memset(&ip->i_d, 0, sizeof(ip->i_d));
497
		VFS_I(ip)->i_generation = prandom_u32();
498
		if (xfs_sb_version_hascrc(&mp->m_sb))
499
			ip->i_d.di_version = 3;
500
		else
501 502 503 504 505 506 507 508 509 510 511 512
			ip->i_d.di_version = 2;
		return 0;
	}

	/*
	 * Get pointers to the on-disk inode and the buffer containing it.
	 */
	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
	if (error)
		return error;

	/* even unallocated inodes are verified */
513 514 515 516
	fa = xfs_dinode_verify(mp, ip->i_ino, dip);
	if (fa) {
		xfs_alert(mp, "%s: validation failed for inode %lld at %pS",
				__func__, ip->i_ino, fa);
517 518

		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
D
Dave Chinner 已提交
519
		error = -EFSCORRUPTED;
520 521 522 523 524 525 526 527 528 529 530
		goto out_brelse;
	}

	/*
	 * If the on-disk inode is already linked to a directory
	 * entry, copy all of the inode into the in-core inode.
	 * xfs_iformat_fork() handles copying in the inode format
	 * specific information.
	 * Otherwise, just get the truly permanent information.
	 */
	if (dip->di_mode) {
531
		xfs_inode_from_disk(ip, dip);
532 533 534 535 536 537 538 539 540 541 542 543 544 545
		error = xfs_iformat_fork(ip, dip);
		if (error)  {
#ifdef DEBUG
			xfs_alert(mp, "%s: xfs_iformat() returned error %d",
				__func__, error);
#endif /* DEBUG */
			goto out_brelse;
		}
	} else {
		/*
		 * Partial initialisation of the in-core inode. Just the bits
		 * that xfs_ialloc won't overwrite or relies on being correct.
		 */
		ip->i_d.di_version = dip->di_version;
546
		VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
547 548 549 550 551 552 553 554 555
		ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);

		/*
		 * Make sure to pull in the mode here as well in
		 * case the inode is released without being used.
		 * This ensures that xfs_inactive() will see that
		 * the inode is already free and not try to mess
		 * with the uninitialized part of it.
		 */
D
Dave Chinner 已提交
556
		VFS_I(ip)->i_mode = 0;
557 558
	}

559
	ASSERT(ip->i_d.di_version >= 2);
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	ip->i_delayed_blks = 0;

	/*
	 * Mark the buffer containing the inode as something to keep
	 * around for a while.  This helps to keep recently accessed
	 * meta-data in-core longer.
	 */
	xfs_buf_set_ref(bp, XFS_INO_REF);

	/*
	 * Use xfs_trans_brelse() to release the buffer containing the on-disk
	 * inode, because it was acquired with xfs_trans_read_buf() in
	 * xfs_imap_to_bp() above.  If tp is NULL, this is just a normal
	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
	 * will only release the buffer if it is not dirty within the
	 * transaction.  It will be OK to release the buffer in this case,
	 * because inodes on disk are never destroyed and we will be locking the
	 * new in-core inode before putting it in the cache where other
	 * processes can find it.  Thus we don't have to worry about the inode
	 * being changed just because we released the buffer.
	 */
 out_brelse:
	xfs_trans_brelse(tp, bp);
	return error;
}