xfs_iget.c 23.1 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17 18
 */
#include "xfs.h"
19
#include "xfs_fs.h"
L
Linus Torvalds 已提交
20
#include "xfs_types.h"
21
#include "xfs_acl.h"
22
#include "xfs_bit.h"
L
Linus Torvalds 已提交
23
#include "xfs_log.h"
24
#include "xfs_inum.h"
L
Linus Torvalds 已提交
25 26 27 28 29 30 31
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
32
#include "xfs_alloc_btree.h"
L
Linus Torvalds 已提交
33 34
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
35
#include "xfs_attr_sf.h"
L
Linus Torvalds 已提交
36 37
#include "xfs_dinode.h"
#include "xfs_inode.h"
38 39
#include "xfs_btree.h"
#include "xfs_ialloc.h"
L
Linus Torvalds 已提交
40 41
#include "xfs_quota.h"
#include "xfs_utils.h"
42 43
#include "xfs_trans_priv.h"
#include "xfs_inode_item.h"
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
#include "xfs_bmap.h"
#include "xfs_btree_trace.h"
#include "xfs_dir2_trace.h"


/*
 * Allocate and initialise an xfs_inode.
 */
STATIC struct xfs_inode *
xfs_inode_alloc(
	struct xfs_mount	*mp,
	xfs_ino_t		ino)
{
	struct xfs_inode	*ip;

	/*
	 * if this didn't occur in transactions, we could use
	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
	 * code up to do this anyway.
	 */
	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
	if (!ip)
		return NULL;

	ASSERT(atomic_read(&ip->i_iocount) == 0);
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(completion_done(&ip->i_flush));

	/* initialise the xfs inode */
	ip->i_ino = ino;
	ip->i_mount = mp;
	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
	ip->i_afp = NULL;
	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
	ip->i_flags = 0;
	ip->i_update_core = 0;
	ip->i_update_size = 0;
	ip->i_delayed_blks = 0;
	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
	ip->i_size = 0;
	ip->i_new_size = 0;
86
	xfs_inode_init_acls(ip);
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108

	/*
	 * Initialize inode's trace buffers.
	 */
#ifdef	XFS_INODE_TRACE
	ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_BMAP_TRACE
	ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_BTREE_TRACE
	ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_RW_TRACE
	ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_ILOCK_TRACE
	ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
#endif
#ifdef XFS_DIR2_TRACE
	ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
#endif
D
Dave Chinner 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122
	/*
	* Now initialise the VFS inode. We do this after the xfs_inode
	* initialisation as internal failures will result in ->destroy_inode
	* being called and that will pass down through the reclaim path and
	* free the XFS inode. This path requires the XFS inode to already be
	* initialised. Hence if this call fails, the xfs_inode has already
	* been freed and we should not reference it at all in the error
	* handling.
	*/
	if (!inode_init_always(mp->m_super, VFS_I(ip)))
		return NULL;

	/* prevent anyone from using this yet */
	VFS_I(ip)->i_state = I_NEW|I_LOCK;
123 124 125

	return ip;
}
L
Linus Torvalds 已提交
126 127

/*
128
 * Check the validity of the inode we just found it the cache
L
Linus Torvalds 已提交
129
 */
130 131 132 133 134 135
static int
xfs_iget_cache_hit(
	struct xfs_perag	*pag,
	struct xfs_inode	*ip,
	int			flags,
	int			lock_flags) __releases(pag->pag_ici_lock)
L
Linus Torvalds 已提交
136
{
137
	struct xfs_mount	*mp = ip->i_mount;
138
	int			error = EAGAIN;
139

140 141
	/*
	 * If INEW is set this inode is being set up
142
	 * If IRECLAIM is set this inode is being torn down
143 144
	 * Pause and try again.
	 */
145
	if (xfs_iflags_test(ip, (XFS_INEW|XFS_IRECLAIM))) {
146 147 148
		XFS_STATS_INC(xs_ig_frecycle);
		goto out_error;
	}
149

150 151 152
	/* If IRECLAIMABLE is set, we've torn down the vfs inode part */
	if (xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {

153
		/*
154 155 156
		 * If lookup is racing with unlink, then we should return an
		 * error immediately so we don't remove it from the reclaim
		 * list and potentially leak the inode.
157
		 */
158 159
		if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
			error = ENOENT;
160 161
			goto out_error;
		}
162 163

		xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
164

165
		/*
166 167 168 169
		 * We need to re-initialise the VFS inode as it has been
		 * 'freed' by the VFS. Do this here so we can deal with
		 * errors cleanly, then tag it so it can be set up correctly
		 * later.
170
		 */
171 172
		if (!inode_init_always(mp->m_super, VFS_I(ip))) {
			error = ENOMEM;
173
			goto out_error;
174
		}
175 176 177 178 179 180 181 182

		/*
		 * We must set the XFS_INEW flag before clearing the
		 * XFS_IRECLAIMABLE flag so that if a racing lookup does
		 * not find the XFS_IRECLAIMABLE above but has the igrab()
		 * below succeed we can safely check XFS_INEW to detect
		 * that this inode is still being initialised.
		 */
183
		xfs_iflags_set(ip, XFS_INEW);
184
		xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
185 186 187

		/* clear the radix tree reclaim flag as well. */
		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
188 189 190 191
	} else if (!igrab(VFS_I(ip))) {
		/* If the VFS inode is being torn down, pause and try again. */
		XFS_STATS_INC(xs_ig_frecycle);
		goto out_error;
192 193 194 195 196 197 198 199
	} else if (xfs_iflags_test(ip, XFS_INEW)) {
		/*
		 * We are racing with another cache hit that is
		 * currently recycling this inode out of the XFS_IRECLAIMABLE
		 * state. Wait for the initialisation to complete before
		 * continuing.
		 */
		wait_on_inode(VFS_I(ip));
200
	}
L
Linus Torvalds 已提交
201

202 203
	if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
		error = ENOENT;
204 205
		iput(VFS_I(ip));
		goto out_error;
206
	}
207

208 209 210
	/* We've got a live one. */
	read_unlock(&pag->pag_ici_lock);

211 212
	if (lock_flags != 0)
		xfs_ilock(ip, lock_flags);
213

214 215 216 217
	xfs_iflags_clear(ip, XFS_ISTALE);
	xfs_itrace_exit_tag(ip, "xfs_iget.found");
	XFS_STATS_INC(xs_ig_found);
	return 0;
L
Linus Torvalds 已提交
218

219
out_error:
220
	read_unlock(&pag->pag_ici_lock);
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
	return error;
}


static int
xfs_iget_cache_miss(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	xfs_trans_t		*tp,
	xfs_ino_t		ino,
	struct xfs_inode	**ipp,
	xfs_daddr_t		bno,
	int			flags,
	int			lock_flags) __releases(pag->pag_ici_lock)
{
	struct xfs_inode	*ip;
	int			error;
	unsigned long		first_index, mask;
	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
L
Linus Torvalds 已提交
240

241 242 243 244 245
	ip = xfs_inode_alloc(mp, ino);
	if (!ip)
		return ENOMEM;

	error = xfs_iread(mp, tp, ip, bno, flags);
246
	if (error)
247
		goto out_destroy;
L
Linus Torvalds 已提交
248

249
	xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
L
Linus Torvalds 已提交
250

251
	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
252 253
		error = ENOENT;
		goto out_destroy;
L
Linus Torvalds 已提交
254 255 256
	}

	/*
257
	 * Preload the radix tree so we can insert safely under the
258 259
	 * write spinlock. Note that we cannot sleep inside the preload
	 * region.
L
Linus Torvalds 已提交
260
	 */
261
	if (radix_tree_preload(GFP_KERNEL)) {
262
		error = EAGAIN;
263 264 265 266 267 268 269 270 271 272
		goto out_destroy;
	}

	/*
	 * Because the inode hasn't been added to the radix-tree yet it can't
	 * be found by another thread, so we can do the non-sleeping lock here.
	 */
	if (lock_flags) {
		if (!xfs_ilock_nowait(ip, lock_flags))
			BUG();
273
	}
274

275 276 277
	mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
	first_index = agino & mask;
	write_lock(&pag->pag_ici_lock);
278 279

	/* insert the new inode */
280 281
	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
	if (unlikely(error)) {
282
		WARN_ON(error != -EEXIST);
283
		XFS_STATS_INC(xs_ig_dup);
284
		error = EAGAIN;
285
		goto out_preload_end;
L
Linus Torvalds 已提交
286 287
	}

288
	/* These values _must_ be set before releasing the radix tree lock! */
L
Linus Torvalds 已提交
289
	ip->i_udquot = ip->i_gdquot = NULL;
290
	xfs_iflags_set(ip, XFS_INEW);
L
Linus Torvalds 已提交
291

292 293
	write_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();
294 295 296
	*ipp = ip;
	return 0;

297
out_preload_end:
298 299
	write_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();
300 301
	if (lock_flags)
		xfs_iunlock(ip, lock_flags);
302
out_destroy:
303
	xfs_destroy_inode(ip);
304 305 306 307 308 309
	return error;
}

/*
 * Look up an inode by number in the given file system.
 * The inode is looked up in the cache held in each AG.
310 311
 * If the inode is found in the cache, initialise the vfs inode
 * if necessary.
312 313
 *
 * If it is not in core, read it in from the file system's device,
314
 * add it to the cache and initialise the vfs inode.
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
 *
 * The inode is locked according to the value of the lock_flags parameter.
 * This flag parameter indicates how and if the inode's IO lock and inode lock
 * should be taken.
 *
 * mp -- the mount point structure for the current file system.  It points
 *       to the inode hash table.
 * tp -- a pointer to the current transaction if there is one.  This is
 *       simply passed through to the xfs_iread() call.
 * ino -- the number of the inode desired.  This is the unique identifier
 *        within the file system for the inode being requested.
 * lock_flags -- flags indicating how to lock the inode.  See the comment
 *		 for xfs_ilock() for a list of valid values.
 * bno -- the block number starting the buffer containing the inode,
 *	  if known (as by bulkstat), else 0.
 */
331 332
int
xfs_iget(
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	xfs_mount_t	*mp,
	xfs_trans_t	*tp,
	xfs_ino_t	ino,
	uint		flags,
	uint		lock_flags,
	xfs_inode_t	**ipp,
	xfs_daddr_t	bno)
{
	xfs_inode_t	*ip;
	int		error;
	xfs_perag_t	*pag;
	xfs_agino_t	agino;

	/* the radix tree exists only in inode capable AGs */
	if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
		return EINVAL;

	/* get the perag structure and ensure that it's inode capable */
	pag = xfs_get_perag(mp, ino);
	if (!pag->pagi_inodeok)
		return EINVAL;
	ASSERT(pag->pag_ici_init);
	agino = XFS_INO_TO_AGINO(mp, ino);

again:
	error = 0;
	read_lock(&pag->pag_ici_lock);
	ip = radix_tree_lookup(&pag->pag_ici_root, agino);

	if (ip) {
363
		error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
364 365 366 367 368 369 370 371 372 373 374
		if (error)
			goto out_error_or_again;
	} else {
		read_unlock(&pag->pag_ici_lock);
		XFS_STATS_INC(xs_ig_missed);

		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
							flags, lock_flags);
		if (error)
			goto out_error_or_again;
	}
375
	xfs_put_perag(mp, pag);
L
Linus Torvalds 已提交
376 377 378

	*ipp = ip;

379 380
	ASSERT(ip->i_df.if_ext_max ==
	       XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
L
Linus Torvalds 已提交
381 382 383 384
	/*
	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
	 */
385
	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
386
		xfs_setup_inode(ip);
L
Linus Torvalds 已提交
387
	return 0;
388 389 390 391 392 393 394 395

out_error_or_again:
	if (error == EAGAIN) {
		delay(1);
		goto again;
	}
	xfs_put_perag(mp, pag);
	return error;
L
Linus Torvalds 已提交
396 397 398 399 400 401 402 403 404 405 406 407 408 409
}


/*
 * Look for the inode corresponding to the given ino in the hash table.
 * If it is there and its i_transp pointer matches tp, return it.
 * Otherwise, return NULL.
 */
xfs_inode_t *
xfs_inode_incore(xfs_mount_t	*mp,
		 xfs_ino_t	ino,
		 xfs_trans_t	*tp)
{
	xfs_inode_t	*ip;
410 411 412 413 414 415 416 417 418 419 420 421
	xfs_perag_t	*pag;

	pag = xfs_get_perag(mp, ino);
	read_lock(&pag->pag_ici_lock);
	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ino));
	read_unlock(&pag->pag_ici_lock);
	xfs_put_perag(mp, pag);

	/* the returned inode must match the transaction */
	if (ip && (ip->i_transp != tp))
		return NULL;
	return ip;
L
Linus Torvalds 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435
}

/*
 * Decrement reference count of an inode structure and unlock it.
 *
 * ip -- the inode being released
 * lock_flags -- this parameter indicates the inode's locks to be
 *       to be released.  See the comment on xfs_iunlock() for a list
 *	 of valid values.
 */
void
xfs_iput(xfs_inode_t	*ip,
	 uint		lock_flags)
{
436
	xfs_itrace_entry(ip);
L
Linus Torvalds 已提交
437
	xfs_iunlock(ip, lock_flags);
438
	IRELE(ip);
L
Linus Torvalds 已提交
439 440 441 442 443 444
}

/*
 * Special iput for brand-new inodes that are still locked
 */
void
445 446 447
xfs_iput_new(
	xfs_inode_t	*ip,
	uint		lock_flags)
L
Linus Torvalds 已提交
448
{
449
	struct inode	*inode = VFS_I(ip);
L
Linus Torvalds 已提交
450

451
	xfs_itrace_entry(ip);
L
Linus Torvalds 已提交
452 453

	if ((ip->i_d.di_mode == 0)) {
454
		ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
455
		make_bad_inode(inode);
L
Linus Torvalds 已提交
456 457 458 459 460
	}
	if (inode->i_state & I_NEW)
		unlock_new_inode(inode);
	if (lock_flags)
		xfs_iunlock(ip, lock_flags);
461
	IRELE(ip);
L
Linus Torvalds 已提交
462 463 464
}

/*
C
Christoph Hellwig 已提交
465 466 467 468 469 470 471 472
 * This is called free all the memory associated with an inode.
 * It must free the inode itself and any buffers allocated for
 * if_extents/if_data and if_broot.  It must also free the lock
 * associated with the inode.
 *
 * Note: because we don't initialise everything on reallocation out
 * of the zone, we must ensure we nullify everything correctly before
 * freeing the structure.
L
Linus Torvalds 已提交
473 474
 */
void
C
Christoph Hellwig 已提交
475 476
xfs_ireclaim(
	struct xfs_inode	*ip)
L
Linus Torvalds 已提交
477
{
C
Christoph Hellwig 已提交
478 479
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_perag	*pag;
L
Linus Torvalds 已提交
480

C
Christoph Hellwig 已提交
481
	XFS_STATS_INC(xs_ig_reclaims);
L
Linus Torvalds 已提交
482 483

	/*
C
Christoph Hellwig 已提交
484 485 486
	 * Remove the inode from the per-AG radix tree.  It doesn't matter
	 * if it was never added to it because radix_tree_delete can deal
	 * with that case just fine.
L
Linus Torvalds 已提交
487
	 */
C
Christoph Hellwig 已提交
488 489 490 491 492
	pag = xfs_get_perag(mp, ip->i_ino);
	write_lock(&pag->pag_ici_lock);
	radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
	write_unlock(&pag->pag_ici_lock);
	xfs_put_perag(mp, pag);
L
Linus Torvalds 已提交
493 494

	/*
C
Christoph Hellwig 已提交
495 496 497 498 499 500 501 502
	 * Here we do an (almost) spurious inode lock in order to coordinate
	 * with inode cache radix tree lookups.  This is because the lookup
	 * can reference the inodes in the cache without taking references.
	 *
	 * We make that OK here by ensuring that we wait until the inode is
	 * unlocked after the lookup before we go ahead and free it.  We get
	 * both the ilock and the iolock because the code may need to drop the
	 * ilock one but will still hold the iolock.
L
Linus Torvalds 已提交
503
	 */
C
Christoph Hellwig 已提交
504
	xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
C
Christoph Hellwig 已提交
505
	xfs_qm_dqdetach(ip);
T
Tim Shimmin 已提交
506
	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
L
Linus Torvalds 已提交
507

C
Christoph Hellwig 已提交
508 509 510 511 512 513 514
	switch (ip->i_d.di_mode & S_IFMT) {
	case S_IFREG:
	case S_IFDIR:
	case S_IFLNK:
		xfs_idestroy_fork(ip, XFS_DATA_FORK);
		break;
	}
515

C
Christoph Hellwig 已提交
516 517
	if (ip->i_afp)
		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
L
Linus Torvalds 已提交
518

C
Christoph Hellwig 已提交
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
#ifdef XFS_INODE_TRACE
	ktrace_free(ip->i_trace);
#endif
#ifdef XFS_BMAP_TRACE
	ktrace_free(ip->i_xtrace);
#endif
#ifdef XFS_BTREE_TRACE
	ktrace_free(ip->i_btrace);
#endif
#ifdef XFS_RW_TRACE
	ktrace_free(ip->i_rwtrace);
#endif
#ifdef XFS_ILOCK_TRACE
	ktrace_free(ip->i_lock_trace);
#endif
#ifdef XFS_DIR2_TRACE
	ktrace_free(ip->i_dir_trace);
#endif
	if (ip->i_itemp) {
		/*
		 * Only if we are shutting down the fs will we see an
		 * inode still in the AIL. If it is there, we should remove
		 * it to prevent a use-after-free from occurring.
		 */
		xfs_log_item_t	*lip = &ip->i_itemp->ili_item;
		struct xfs_ail	*ailp = lip->li_ailp;

		ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
				       XFS_FORCED_SHUTDOWN(ip->i_mount));
		if (lip->li_flags & XFS_LI_IN_AIL) {
			spin_lock(&ailp->xa_lock);
			if (lip->li_flags & XFS_LI_IN_AIL)
				xfs_trans_ail_delete(ailp, lip);
			else
				spin_unlock(&ailp->xa_lock);
		}
		xfs_inode_item_destroy(ip);
		ip->i_itemp = NULL;
	}
	/* asserts to verify all state is correct here */
	ASSERT(atomic_read(&ip->i_iocount) == 0);
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(completion_done(&ip->i_flush));
563
	xfs_inode_clear_acls(ip);
C
Christoph Hellwig 已提交
564
	kmem_zone_free(xfs_inode_zone, ip);
L
Linus Torvalds 已提交
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
}

/*
 * This is a wrapper routine around the xfs_ilock() routine
 * used to centralize some grungy code.  It is used in places
 * that wish to lock the inode solely for reading the extents.
 * The reason these places can't just call xfs_ilock(SHARED)
 * is that the inode lock also guards to bringing in of the
 * extents from disk for a file in b-tree format.  If the inode
 * is in b-tree format, then we need to lock the inode exclusively
 * until the extents are read in.  Locking it exclusively all
 * the time would limit our parallelism unnecessarily, though.
 * What we do instead is check to see if the extents have been
 * read in yet, and only lock the inode exclusively if they
 * have not.
 *
 * The function returns a value which should be given to the
 * corresponding xfs_iunlock_map_shared().  This value is
 * the mode in which the lock was actually taken.
 */
uint
xfs_ilock_map_shared(
	xfs_inode_t	*ip)
{
	uint	lock_mode;

	if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
	    ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
		lock_mode = XFS_ILOCK_EXCL;
	} else {
		lock_mode = XFS_ILOCK_SHARED;
	}

	xfs_ilock(ip, lock_mode);

	return lock_mode;
}

/*
 * This is simply the unlock routine to go with xfs_ilock_map_shared().
 * All it does is call xfs_iunlock() with the given lock_mode.
 */
void
xfs_iunlock_map_shared(
	xfs_inode_t	*ip,
	unsigned int	lock_mode)
{
	xfs_iunlock(ip, lock_mode);
}

/*
 * The xfs inode contains 2 locks: a multi-reader lock called the
 * i_iolock and a multi-reader lock called the i_lock.  This routine
 * allows either or both of the locks to be obtained.
 *
 * The 2 locks should always be ordered so that the IO lock is
 * obtained first in order to prevent deadlock.
 *
 * ip -- the inode being locked
 * lock_flags -- this parameter indicates the inode's locks
 *       to be locked.  It can be:
 *		XFS_IOLOCK_SHARED,
 *		XFS_IOLOCK_EXCL,
 *		XFS_ILOCK_SHARED,
 *		XFS_ILOCK_EXCL,
 *		XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
 *		XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
 *		XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
 *		XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
 */
void
C
Christoph Hellwig 已提交
636 637 638
xfs_ilock(
	xfs_inode_t		*ip,
	uint			lock_flags)
L
Linus Torvalds 已提交
639 640 641 642 643 644 645 646 647 648
{
	/*
	 * You can't set both SHARED and EXCL for the same lock,
	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
	 */
	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
649
	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
L
Linus Torvalds 已提交
650

C
Christoph Hellwig 已提交
651
	if (lock_flags & XFS_IOLOCK_EXCL)
652
		mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
C
Christoph Hellwig 已提交
653
	else if (lock_flags & XFS_IOLOCK_SHARED)
654
		mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
C
Christoph Hellwig 已提交
655 656

	if (lock_flags & XFS_ILOCK_EXCL)
657
		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
C
Christoph Hellwig 已提交
658
	else if (lock_flags & XFS_ILOCK_SHARED)
659
		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
C
Christoph Hellwig 已提交
660

L
Linus Torvalds 已提交
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
	xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
}

/*
 * This is just like xfs_ilock(), except that the caller
 * is guaranteed not to sleep.  It returns 1 if it gets
 * the requested locks and 0 otherwise.  If the IO lock is
 * obtained but the inode lock cannot be, then the IO lock
 * is dropped before returning.
 *
 * ip -- the inode being locked
 * lock_flags -- this parameter indicates the inode's locks to be
 *       to be locked.  See the comment for xfs_ilock() for a list
 *	 of valid values.
 */
int
C
Christoph Hellwig 已提交
677 678 679
xfs_ilock_nowait(
	xfs_inode_t		*ip,
	uint			lock_flags)
L
Linus Torvalds 已提交
680 681 682 683 684 685 686 687 688 689
{
	/*
	 * You can't set both SHARED and EXCL for the same lock,
	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
	 */
	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
690
	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
L
Linus Torvalds 已提交
691 692

	if (lock_flags & XFS_IOLOCK_EXCL) {
C
Christoph Hellwig 已提交
693 694
		if (!mrtryupdate(&ip->i_iolock))
			goto out;
L
Linus Torvalds 已提交
695
	} else if (lock_flags & XFS_IOLOCK_SHARED) {
C
Christoph Hellwig 已提交
696 697
		if (!mrtryaccess(&ip->i_iolock))
			goto out;
L
Linus Torvalds 已提交
698 699
	}
	if (lock_flags & XFS_ILOCK_EXCL) {
C
Christoph Hellwig 已提交
700 701
		if (!mrtryupdate(&ip->i_lock))
			goto out_undo_iolock;
L
Linus Torvalds 已提交
702
	} else if (lock_flags & XFS_ILOCK_SHARED) {
C
Christoph Hellwig 已提交
703 704
		if (!mrtryaccess(&ip->i_lock))
			goto out_undo_iolock;
L
Linus Torvalds 已提交
705 706 707
	}
	xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
	return 1;
C
Christoph Hellwig 已提交
708 709 710 711 712 713 714 715

 out_undo_iolock:
	if (lock_flags & XFS_IOLOCK_EXCL)
		mrunlock_excl(&ip->i_iolock);
	else if (lock_flags & XFS_IOLOCK_SHARED)
		mrunlock_shared(&ip->i_iolock);
 out:
	return 0;
L
Linus Torvalds 已提交
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
}

/*
 * xfs_iunlock() is used to drop the inode locks acquired with
 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
 * that we know which locks to drop.
 *
 * ip -- the inode being unlocked
 * lock_flags -- this parameter indicates the inode's locks to be
 *       to be unlocked.  See the comment for xfs_ilock() for a list
 *	 of valid values for this parameter.
 *
 */
void
C
Christoph Hellwig 已提交
731 732 733
xfs_iunlock(
	xfs_inode_t		*ip,
	uint			lock_flags)
L
Linus Torvalds 已提交
734 735 736 737 738 739 740 741 742 743
{
	/*
	 * You can't set both SHARED and EXCL for the same lock,
	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
	 */
	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
744 745
	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
			XFS_LOCK_DEP_MASK)) == 0);
L
Linus Torvalds 已提交
746 747
	ASSERT(lock_flags != 0);

C
Christoph Hellwig 已提交
748 749 750 751
	if (lock_flags & XFS_IOLOCK_EXCL)
		mrunlock_excl(&ip->i_iolock);
	else if (lock_flags & XFS_IOLOCK_SHARED)
		mrunlock_shared(&ip->i_iolock);
L
Linus Torvalds 已提交
752

C
Christoph Hellwig 已提交
753 754 755 756
	if (lock_flags & XFS_ILOCK_EXCL)
		mrunlock_excl(&ip->i_lock);
	else if (lock_flags & XFS_ILOCK_SHARED)
		mrunlock_shared(&ip->i_lock);
L
Linus Torvalds 已提交
757

C
Christoph Hellwig 已提交
758 759
	if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
	    !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
L
Linus Torvalds 已提交
760 761 762 763 764
		/*
		 * Let the AIL know that this item has been unlocked in case
		 * it is in the AIL and anyone is waiting on it.  Don't do
		 * this if the caller has asked us not to.
		 */
765
		xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
C
Christoph Hellwig 已提交
766
					(xfs_log_item_t*)(ip->i_itemp));
L
Linus Torvalds 已提交
767 768 769 770 771 772 773 774 775
	}
	xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
}

/*
 * give up write locks.  the i/o lock cannot be held nested
 * if it is being demoted.
 */
void
C
Christoph Hellwig 已提交
776 777 778
xfs_ilock_demote(
	xfs_inode_t		*ip,
	uint			lock_flags)
L
Linus Torvalds 已提交
779 780 781 782
{
	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
	ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);

C
Christoph Hellwig 已提交
783
	if (lock_flags & XFS_ILOCK_EXCL)
L
Linus Torvalds 已提交
784
		mrdemote(&ip->i_lock);
C
Christoph Hellwig 已提交
785
	if (lock_flags & XFS_IOLOCK_EXCL)
L
Linus Torvalds 已提交
786
		mrdemote(&ip->i_iolock);
C
Christoph Hellwig 已提交
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
}

#ifdef DEBUG
/*
 * Debug-only routine, without additional rw_semaphore APIs, we can
 * now only answer requests regarding whether we hold the lock for write
 * (reader state is outside our visibility, we only track writer state).
 *
 * Note: this means !xfs_isilocked would give false positives, so don't do that.
 */
int
xfs_isilocked(
	xfs_inode_t		*ip,
	uint			lock_flags)
{
	if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
			XFS_ILOCK_EXCL) {
		if (!ip->i_lock.mr_writer)
			return 0;
L
Linus Torvalds 已提交
806
	}
C
Christoph Hellwig 已提交
807 808 809 810 811 812 813 814

	if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
			XFS_IOLOCK_EXCL) {
		if (!ip->i_iolock.mr_writer)
			return 0;
	}

	return 1;
L
Linus Torvalds 已提交
815
}
C
Christoph Hellwig 已提交
816
#endif
L
Linus Torvalds 已提交
817

818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
#ifdef	XFS_INODE_TRACE

#define KTRACE_ENTER(ip, vk, s, line, ra)			\
	ktrace_enter((ip)->i_trace,				\
/*  0 */		(void *)(__psint_t)(vk),		\
/*  1 */		(void *)(s),				\
/*  2 */		(void *)(__psint_t) line,		\
/*  3 */		(void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
/*  4 */		(void *)(ra),				\
/*  5 */		NULL,					\
/*  6 */		(void *)(__psint_t)current_cpu(),	\
/*  7 */		(void *)(__psint_t)current_pid(),	\
/*  8 */		(void *)__return_address,		\
/*  9 */		NULL, NULL, NULL, NULL, NULL, NULL, NULL)

/*
 * Vnode tracing code.
 */
void
_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
{
	KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
}

void
_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
{
	KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
}

void
xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
{
	KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
}

void
_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
{
	KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
}

void
xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
{
	KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
}
#endif	/* XFS_INODE_TRACE */