xfs_inode.c 103.0 KB
Newer Older
D
Dave Chinner 已提交
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4
 * All Rights Reserved.
L
Linus Torvalds 已提交
5
 */
J
Jeff Layton 已提交
6
#include <linux/iversion.h>
7

L
Linus Torvalds 已提交
8
#include "xfs.h"
9
#include "xfs_fs.h"
10
#include "xfs_shared.h"
11 12 13
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
L
Linus Torvalds 已提交
14
#include "xfs_mount.h"
15
#include "xfs_defer.h"
16
#include "xfs_inode.h"
D
Dave Chinner 已提交
17 18
#include "xfs_dir2.h"
#include "xfs_attr.h"
19 20
#include "xfs_trans_space.h"
#include "xfs_trans.h"
L
Linus Torvalds 已提交
21
#include "xfs_buf_item.h"
22 23 24
#include "xfs_inode_item.h"
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
D
Dave Chinner 已提交
25
#include "xfs_bmap_util.h"
26
#include "xfs_errortag.h"
L
Linus Torvalds 已提交
27 28
#include "xfs_error.h"
#include "xfs_quota.h"
29
#include "xfs_filestream.h"
C
Christoph Hellwig 已提交
30
#include "xfs_trace.h"
D
Dave Chinner 已提交
31
#include "xfs_icache.h"
D
Dave Chinner 已提交
32
#include "xfs_symlink.h"
33 34
#include "xfs_trans_priv.h"
#include "xfs_log.h"
35
#include "xfs_bmap_btree.h"
36
#include "xfs_reflink.h"
37
#include "xfs_ag.h"
L
Linus Torvalds 已提交
38 39 40 41

kmem_zone_t *xfs_inode_zone;

/*
42
 * Used in xfs_itruncate_extents().  This is the maximum number of extents
L
Linus Torvalds 已提交
43 44 45 46
 * freed from a file in a single transaction.
 */
#define	XFS_ITRUNC_MAX_EXTENTS	2

47 48
STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
49

50 51 52 53 54 55 56
/*
 * helper function to extract extent size hint from inode
 */
xfs_extlen_t
xfs_get_extsz_hint(
	struct xfs_inode	*ip)
{
57 58 59 60 61 62
	/*
	 * No point in aligning allocations if we need to COW to actually
	 * write to them.
	 */
	if (xfs_is_always_cow_inode(ip))
		return 0;
63
	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
64
		return ip->i_extsize;
65 66 67 68 69
	if (XFS_IS_REALTIME_INODE(ip))
		return ip->i_mount->m_sb.sb_rextsize;
	return 0;
}

70 71 72
/*
 * Helper function to extract CoW extent size hint from inode.
 * Between the extent size hint and the CoW extent size hint, we
73 74
 * return the greater of the two.  If the value is zero (automatic),
 * use the default size.
75 76 77 78 79 80 81 82
 */
xfs_extlen_t
xfs_get_cowextsz_hint(
	struct xfs_inode	*ip)
{
	xfs_extlen_t		a, b;

	a = 0;
83
	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
84
		a = ip->i_cowextsize;
85 86
	b = xfs_get_extsz_hint(ip);

87 88 89 90
	a = max(a, b);
	if (a == 0)
		return XFS_DEFAULT_COWEXTSZ_HINT;
	return a;
91 92
}

93
/*
94 95 96 97 98 99 100 101 102 103
 * These two are wrapper routines around the xfs_ilock() routine used to
 * centralize some grungy code.  They are used in places that wish to lock the
 * inode solely for reading the extents.  The reason these places can't just
 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
 * bringing in of the extents from disk for a file in b-tree format.  If the
 * inode is in b-tree format, then we need to lock the inode exclusively until
 * the extents are read in.  Locking it exclusively all the time would limit
 * our parallelism unnecessarily, though.  What we do instead is check to see
 * if the extents have been read in yet, and only lock the inode exclusively
 * if they have not.
104
 *
105
 * The functions return a value which should be given to the corresponding
106
 * xfs_iunlock() call.
107 108
 */
uint
109 110
xfs_ilock_data_map_shared(
	struct xfs_inode	*ip)
111
{
112
	uint			lock_mode = XFS_ILOCK_SHARED;
113

C
Christoph Hellwig 已提交
114
	if (xfs_need_iread_extents(&ip->i_df))
115 116 117 118 119
		lock_mode = XFS_ILOCK_EXCL;
	xfs_ilock(ip, lock_mode);
	return lock_mode;
}

120 121 122
uint
xfs_ilock_attr_map_shared(
	struct xfs_inode	*ip)
123
{
124 125
	uint			lock_mode = XFS_ILOCK_SHARED;

C
Christoph Hellwig 已提交
126
	if (ip->i_afp && xfs_need_iread_extents(ip->i_afp))
127 128 129
		lock_mode = XFS_ILOCK_EXCL;
	xfs_ilock(ip, lock_mode);
	return lock_mode;
130 131 132
}

/*
133 134 135
 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
 * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
 * various combinations of the locks to be obtained.
136
 *
D
Dave Chinner 已提交
137 138
 * The 3 locks should always be ordered so that the IO lock is obtained first,
 * the mmap lock second and the ilock last in order to prevent deadlock.
139
 *
D
Dave Chinner 已提交
140 141
 * Basic locking order:
 *
142
 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
D
Dave Chinner 已提交
143
 *
144
 * mmap_lock locking order:
D
Dave Chinner 已提交
145
 *
146 147
 * i_rwsem -> page lock -> mmap_lock
 * mmap_lock -> i_mmap_lock -> page_lock
D
Dave Chinner 已提交
148
 *
149
 * The difference in mmap_lock locking order mean that we cannot hold the
D
Dave Chinner 已提交
150
 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
151
 * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
D
Dave Chinner 已提交
152
 * in get_user_pages() to map the user pages into the kernel address space for
153
 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
154
 * page faults already hold the mmap_lock.
D
Dave Chinner 已提交
155 156
 *
 * Hence to serialise fully against both syscall and mmap based IO, we need to
157
 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
D
Dave Chinner 已提交
158 159 160
 * taken in places where we need to invalidate the page cache in a race
 * free manner (e.g. truncate, hole punch and other extent manipulation
 * functions).
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
 */
void
xfs_ilock(
	xfs_inode_t		*ip,
	uint			lock_flags)
{
	trace_xfs_ilock(ip, lock_flags, _RET_IP_);

	/*
	 * You can't set both SHARED and EXCL for the same lock,
	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
	 */
	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
D
Dave Chinner 已提交
176 177
	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
178 179
	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
180
	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
181

182 183 184 185 186 187 188
	if (lock_flags & XFS_IOLOCK_EXCL) {
		down_write_nested(&VFS_I(ip)->i_rwsem,
				  XFS_IOLOCK_DEP(lock_flags));
	} else if (lock_flags & XFS_IOLOCK_SHARED) {
		down_read_nested(&VFS_I(ip)->i_rwsem,
				 XFS_IOLOCK_DEP(lock_flags));
	}
189

D
Dave Chinner 已提交
190 191 192 193 194
	if (lock_flags & XFS_MMAPLOCK_EXCL)
		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
	else if (lock_flags & XFS_MMAPLOCK_SHARED)
		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	if (lock_flags & XFS_ILOCK_EXCL)
		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
	else if (lock_flags & XFS_ILOCK_SHARED)
		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
}

/*
 * This is just like xfs_ilock(), except that the caller
 * is guaranteed not to sleep.  It returns 1 if it gets
 * the requested locks and 0 otherwise.  If the IO lock is
 * obtained but the inode lock cannot be, then the IO lock
 * is dropped before returning.
 *
 * ip -- the inode being locked
 * lock_flags -- this parameter indicates the inode's locks to be
 *       to be locked.  See the comment for xfs_ilock() for a list
 *	 of valid values.
 */
int
xfs_ilock_nowait(
	xfs_inode_t		*ip,
	uint			lock_flags)
{
	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);

	/*
	 * You can't set both SHARED and EXCL for the same lock,
	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
	 */
	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
D
Dave Chinner 已提交
227 228
	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
229 230
	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
231
	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
232 233

	if (lock_flags & XFS_IOLOCK_EXCL) {
234
		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
235 236
			goto out;
	} else if (lock_flags & XFS_IOLOCK_SHARED) {
237
		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
238 239
			goto out;
	}
D
Dave Chinner 已提交
240 241 242 243 244 245 246 247 248

	if (lock_flags & XFS_MMAPLOCK_EXCL) {
		if (!mrtryupdate(&ip->i_mmaplock))
			goto out_undo_iolock;
	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
		if (!mrtryaccess(&ip->i_mmaplock))
			goto out_undo_iolock;
	}

249 250
	if (lock_flags & XFS_ILOCK_EXCL) {
		if (!mrtryupdate(&ip->i_lock))
D
Dave Chinner 已提交
251
			goto out_undo_mmaplock;
252 253
	} else if (lock_flags & XFS_ILOCK_SHARED) {
		if (!mrtryaccess(&ip->i_lock))
D
Dave Chinner 已提交
254
			goto out_undo_mmaplock;
255 256 257
	}
	return 1;

D
Dave Chinner 已提交
258 259 260 261 262 263
out_undo_mmaplock:
	if (lock_flags & XFS_MMAPLOCK_EXCL)
		mrunlock_excl(&ip->i_mmaplock);
	else if (lock_flags & XFS_MMAPLOCK_SHARED)
		mrunlock_shared(&ip->i_mmaplock);
out_undo_iolock:
264
	if (lock_flags & XFS_IOLOCK_EXCL)
265
		up_write(&VFS_I(ip)->i_rwsem);
266
	else if (lock_flags & XFS_IOLOCK_SHARED)
267
		up_read(&VFS_I(ip)->i_rwsem);
D
Dave Chinner 已提交
268
out:
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
	return 0;
}

/*
 * xfs_iunlock() is used to drop the inode locks acquired with
 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
 * that we know which locks to drop.
 *
 * ip -- the inode being unlocked
 * lock_flags -- this parameter indicates the inode's locks to be
 *       to be unlocked.  See the comment for xfs_ilock() for a list
 *	 of valid values for this parameter.
 *
 */
void
xfs_iunlock(
	xfs_inode_t		*ip,
	uint			lock_flags)
{
	/*
	 * You can't set both SHARED and EXCL for the same lock,
	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
	 */
	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
D
Dave Chinner 已提交
296 297
	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
298 299
	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
300
	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
301 302 303
	ASSERT(lock_flags != 0);

	if (lock_flags & XFS_IOLOCK_EXCL)
304
		up_write(&VFS_I(ip)->i_rwsem);
305
	else if (lock_flags & XFS_IOLOCK_SHARED)
306
		up_read(&VFS_I(ip)->i_rwsem);
307

D
Dave Chinner 已提交
308 309 310 311 312
	if (lock_flags & XFS_MMAPLOCK_EXCL)
		mrunlock_excl(&ip->i_mmaplock);
	else if (lock_flags & XFS_MMAPLOCK_SHARED)
		mrunlock_shared(&ip->i_mmaplock);

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	if (lock_flags & XFS_ILOCK_EXCL)
		mrunlock_excl(&ip->i_lock);
	else if (lock_flags & XFS_ILOCK_SHARED)
		mrunlock_shared(&ip->i_lock);

	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
}

/*
 * give up write locks.  the i/o lock cannot be held nested
 * if it is being demoted.
 */
void
xfs_ilock_demote(
	xfs_inode_t		*ip,
	uint			lock_flags)
{
D
Dave Chinner 已提交
330 331 332
	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
	ASSERT((lock_flags &
		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
333 334 335

	if (lock_flags & XFS_ILOCK_EXCL)
		mrdemote(&ip->i_lock);
D
Dave Chinner 已提交
336 337
	if (lock_flags & XFS_MMAPLOCK_EXCL)
		mrdemote(&ip->i_mmaplock);
338
	if (lock_flags & XFS_IOLOCK_EXCL)
339
		downgrade_write(&VFS_I(ip)->i_rwsem);
340 341 342 343

	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
}

D
Dave Chinner 已提交
344
#if defined(DEBUG) || defined(XFS_WARN)
345 346 347 348 349 350 351 352 353 354 355
int
xfs_isilocked(
	xfs_inode_t		*ip,
	uint			lock_flags)
{
	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
		if (!(lock_flags & XFS_ILOCK_SHARED))
			return !!ip->i_lock.mr_writer;
		return rwsem_is_locked(&ip->i_lock.mr_lock);
	}

D
Dave Chinner 已提交
356 357 358 359 360 361
	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
			return !!ip->i_mmaplock.mr_writer;
		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
	}

362 363
	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
		if (!(lock_flags & XFS_IOLOCK_SHARED))
364 365 366
			return !debug_locks ||
				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
367 368 369 370 371 372 373
	}

	ASSERT(0);
	return 0;
}
#endif

374 375 376 377 378 379 380
/*
 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
 * errors and warnings.
 */
#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
381 382 383 384 385 386 387 388 389 390
static bool
xfs_lockdep_subclass_ok(
	int subclass)
{
	return subclass < MAX_LOCKDEP_SUBCLASSES;
}
#else
#define xfs_lockdep_subclass_ok(subclass)	(true)
#endif

D
Dave Chinner 已提交
391
/*
D
Dave Chinner 已提交
392
 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
393 394 395
 * value. This can be called for any type of inode lock combination, including
 * parent locking. Care must be taken to ensure we don't overrun the subclass
 * storage fields in the class mask we build.
D
Dave Chinner 已提交
396 397 398 399
 */
static inline int
xfs_lock_inumorder(int lock_mode, int subclass)
{
400 401 402 403
	int	class = 0;

	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
			      XFS_ILOCK_RTSUM)));
404
	ASSERT(xfs_lockdep_subclass_ok(subclass));
405

D
Dave Chinner 已提交
406
	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
407 408
		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
		class += subclass << XFS_IOLOCK_SHIFT;
D
Dave Chinner 已提交
409 410 411
	}

	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
412 413
		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
		class += subclass << XFS_MMAPLOCK_SHIFT;
D
Dave Chinner 已提交
414 415
	}

416 417 418 419
	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
		class += subclass << XFS_ILOCK_SHIFT;
	}
D
Dave Chinner 已提交
420

421
	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
D
Dave Chinner 已提交
422 423 424
}

/*
425 426
 * The following routine will lock n inodes in exclusive mode.  We assume the
 * caller calls us with the inodes in i_ino order.
D
Dave Chinner 已提交
427
 *
428 429 430 431 432
 * We need to detect deadlock where an inode that we lock is in the AIL and we
 * start waiting for another inode that is locked by a thread in a long running
 * transaction (such as truncate). This can result in deadlock since the long
 * running trans might need to wait for the inode we just locked in order to
 * push the tail and free space in the log.
433 434 435 436 437
 *
 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
 * lock more than one at a time, lockdep will report false positives saying we
 * have violated locking orders.
D
Dave Chinner 已提交
438
 */
439
static void
D
Dave Chinner 已提交
440
xfs_lock_inodes(
441 442 443
	struct xfs_inode	**ips,
	int			inodes,
	uint			lock_mode)
D
Dave Chinner 已提交
444
{
445 446
	int			attempts = 0, i, j, try_lock;
	struct xfs_log_item	*lp;
D
Dave Chinner 已提交
447

448 449 450
	/*
	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
	 * support an arbitrary depth of locking here, but absolute limits on
451
	 * inodes depend on the type of locking and the limits placed by
452 453 454
	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
	 * the asserts.
	 */
455
	ASSERT(ips && inodes >= 2 && inodes <= 5);
456 457 458 459 460 461 462 463 464 465 466 467 468
	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
			    XFS_ILOCK_EXCL));
	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
			      XFS_ILOCK_SHARED)));
	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);

	if (lock_mode & XFS_IOLOCK_EXCL) {
		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
D
Dave Chinner 已提交
469 470 471 472 473 474 475

	try_lock = 0;
	i = 0;
again:
	for (; i < inodes; i++) {
		ASSERT(ips[i]);

476
		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
D
Dave Chinner 已提交
477 478 479
			continue;

		/*
480 481
		 * If try_lock is not set yet, make sure all locked inodes are
		 * not in the AIL.  If any are, set try_lock to be used later.
D
Dave Chinner 已提交
482 483 484
		 */
		if (!try_lock) {
			for (j = (i - 1); j >= 0 && !try_lock; j--) {
485
				lp = &ips[j]->i_itemp->ili_item;
D
Dave Chinner 已提交
486
				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
D
Dave Chinner 已提交
487 488 489 490 491 492 493 494 495 496
					try_lock++;
			}
		}

		/*
		 * If any of the previous locks we have locked is in the AIL,
		 * we must TRY to get the second and subsequent locks. If
		 * we can't get any, we must release all we have
		 * and try again.
		 */
497 498 499 500 501 502 503 504 505
		if (!try_lock) {
			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
			continue;
		}

		/* try_lock means we have an inode locked that is in the AIL. */
		ASSERT(i != 0);
		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
			continue;
D
Dave Chinner 已提交
506

507 508 509 510 511 512
		/*
		 * Unlock all previous guys and try again.  xfs_iunlock will try
		 * to push the tail if the inode is in the AIL.
		 */
		attempts++;
		for (j = i - 1; j >= 0; j--) {
D
Dave Chinner 已提交
513
			/*
514 515 516
			 * Check to see if we've already unlocked this one.  Not
			 * the first one going back, and the inode ptr is the
			 * same.
D
Dave Chinner 已提交
517
			 */
518 519
			if (j != (i - 1) && ips[j] == ips[j + 1])
				continue;
D
Dave Chinner 已提交
520

521 522
			xfs_iunlock(ips[j], lock_mode);
		}
D
Dave Chinner 已提交
523

524 525
		if ((attempts % 5) == 0) {
			delay(1); /* Don't just spin the CPU */
D
Dave Chinner 已提交
526
		}
527 528 529
		i = 0;
		try_lock = 0;
		goto again;
D
Dave Chinner 已提交
530 531 532 533
	}
}

/*
D
Dave Chinner 已提交
534
 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
535 536 537 538 539
 * the mmaplock or the ilock, but not more than one type at a time. If we lock
 * more than one at a time, lockdep will report false positives saying we have
 * violated locking orders.  The iolock must be double-locked separately since
 * we use i_rwsem for that.  We now support taking one lock EXCL and the other
 * SHARED.
D
Dave Chinner 已提交
540 541 542
 */
void
xfs_lock_two_inodes(
543 544 545 546
	struct xfs_inode	*ip0,
	uint			ip0_mode,
	struct xfs_inode	*ip1,
	uint			ip1_mode)
D
Dave Chinner 已提交
547
{
548 549
	struct xfs_inode	*temp;
	uint			mode_temp;
D
Dave Chinner 已提交
550
	int			attempts = 0;
551
	struct xfs_log_item	*lp;
D
Dave Chinner 已提交
552

553 554 555 556 557 558 559 560 561 562 563 564
	ASSERT(hweight32(ip0_mode) == 1);
	ASSERT(hweight32(ip1_mode) == 1);
	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
D
Dave Chinner 已提交
565

D
Dave Chinner 已提交
566 567 568 569 570 571
	ASSERT(ip0->i_ino != ip1->i_ino);

	if (ip0->i_ino > ip1->i_ino) {
		temp = ip0;
		ip0 = ip1;
		ip1 = temp;
572 573 574
		mode_temp = ip0_mode;
		ip0_mode = ip1_mode;
		ip1_mode = mode_temp;
D
Dave Chinner 已提交
575 576 577
	}

 again:
578
	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
D
Dave Chinner 已提交
579 580 581 582 583 584

	/*
	 * If the first lock we have locked is in the AIL, we must TRY to get
	 * the second lock. If we can't get it, we must release the first one
	 * and try again.
	 */
585
	lp = &ip0->i_itemp->ili_item;
D
Dave Chinner 已提交
586
	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
587 588
		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
			xfs_iunlock(ip0, ip0_mode);
D
Dave Chinner 已提交
589 590 591 592 593
			if ((++attempts % 5) == 0)
				delay(1); /* Don't just spin the CPU */
			goto again;
		}
	} else {
594
		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
D
Dave Chinner 已提交
595 596 597
	}
}

598 599 600
uint
xfs_ip2xflags(
	struct xfs_inode	*ip)
L
Linus Torvalds 已提交
601 602 603
{
	uint			flags = 0;

604 605
	if (ip->i_diflags & XFS_DIFLAG_ANY) {
		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
606
			flags |= FS_XFLAG_REALTIME;
607
		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
608
			flags |= FS_XFLAG_PREALLOC;
609
		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
610
			flags |= FS_XFLAG_IMMUTABLE;
611
		if (ip->i_diflags & XFS_DIFLAG_APPEND)
612
			flags |= FS_XFLAG_APPEND;
613
		if (ip->i_diflags & XFS_DIFLAG_SYNC)
614
			flags |= FS_XFLAG_SYNC;
615
		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
616
			flags |= FS_XFLAG_NOATIME;
617
		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
618
			flags |= FS_XFLAG_NODUMP;
619
		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
620
			flags |= FS_XFLAG_RTINHERIT;
621
		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
622
			flags |= FS_XFLAG_PROJINHERIT;
623
		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
624
			flags |= FS_XFLAG_NOSYMLINKS;
625
		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
626
			flags |= FS_XFLAG_EXTSIZE;
627
		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
628
			flags |= FS_XFLAG_EXTSZINHERIT;
629
		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
630
			flags |= FS_XFLAG_NODEFRAG;
631
		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
632
			flags |= FS_XFLAG_FILESTREAM;
L
Linus Torvalds 已提交
633 634
	}

635 636
	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
637
			flags |= FS_XFLAG_DAX;
638
		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
639
			flags |= FS_XFLAG_COWEXTSIZE;
640 641
	}

642
	if (XFS_IFORK_Q(ip))
643
		flags |= FS_XFLAG_HASATTR;
L
Linus Torvalds 已提交
644 645 646
	return flags;
}

D
Dave Chinner 已提交
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
/*
 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
 * is allowed, otherwise it has to be an exact match. If a CI match is found,
 * ci_name->name will point to a the actual name (caller must free) or
 * will be set to NULL if an exact match is found.
 */
int
xfs_lookup(
	xfs_inode_t		*dp,
	struct xfs_name		*name,
	xfs_inode_t		**ipp,
	struct xfs_name		*ci_name)
{
	xfs_ino_t		inum;
	int			error;

	trace_xfs_lookup(dp, name);

	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
D
Dave Chinner 已提交
666
		return -EIO;
D
Dave Chinner 已提交
667 668 669

	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
	if (error)
670
		goto out_unlock;
D
Dave Chinner 已提交
671 672 673 674 675 676 677 678 679 680

	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
	if (error)
		goto out_free_name;

	return 0;

out_free_name:
	if (ci_name)
		kmem_free(ci_name->name);
681
out_unlock:
D
Dave Chinner 已提交
682 683 684 685
	*ipp = NULL;
	return error;
}

686 687 688 689 690 691 692 693 694 695
/* Propagate di_flags from a parent inode to a child inode. */
static void
xfs_inode_inherit_flags(
	struct xfs_inode	*ip,
	const struct xfs_inode	*pip)
{
	unsigned int		di_flags = 0;
	umode_t			mode = VFS_I(ip)->i_mode;

	if (S_ISDIR(mode)) {
696
		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
697
			di_flags |= XFS_DIFLAG_RTINHERIT;
698
		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
699
			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
700
			ip->i_extsize = pip->i_extsize;
701
		}
702
		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
703 704
			di_flags |= XFS_DIFLAG_PROJINHERIT;
	} else if (S_ISREG(mode)) {
705
		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
706
		    xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
707
			di_flags |= XFS_DIFLAG_REALTIME;
708
		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
709
			di_flags |= XFS_DIFLAG_EXTSIZE;
710
			ip->i_extsize = pip->i_extsize;
711 712
		}
	}
713
	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
714 715
	    xfs_inherit_noatime)
		di_flags |= XFS_DIFLAG_NOATIME;
716
	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
717 718
	    xfs_inherit_nodump)
		di_flags |= XFS_DIFLAG_NODUMP;
719
	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
720 721
	    xfs_inherit_sync)
		di_flags |= XFS_DIFLAG_SYNC;
722
	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
723 724
	    xfs_inherit_nosymlinks)
		di_flags |= XFS_DIFLAG_NOSYMLINKS;
725
	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
726 727
	    xfs_inherit_nodefrag)
		di_flags |= XFS_DIFLAG_NODEFRAG;
728
	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
729 730
		di_flags |= XFS_DIFLAG_FILESTREAM;

731
	ip->i_diflags |= di_flags;
732 733 734 735 736 737 738 739
}

/* Propagate di_flags2 from a parent inode to a child inode. */
static void
xfs_inode_inherit_flags2(
	struct xfs_inode	*ip,
	const struct xfs_inode	*pip)
{
740 741
	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
742
		ip->i_cowextsize = pip->i_cowextsize;
743
	}
744 745
	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
746 747
}

L
Linus Torvalds 已提交
748
/*
749 750
 * Initialise a newly allocated inode and return the in-core inode to the
 * caller locked exclusively.
L
Linus Torvalds 已提交
751
 */
752
static int
753
xfs_init_new_inode(
C
Christoph Hellwig 已提交
754
	struct user_namespace	*mnt_userns,
755 756 757 758 759 760 761
	struct xfs_trans	*tp,
	struct xfs_inode	*pip,
	xfs_ino_t		ino,
	umode_t			mode,
	xfs_nlink_t		nlink,
	dev_t			rdev,
	prid_t			prid,
762
	bool			init_xattrs,
763
	struct xfs_inode	**ipp)
L
Linus Torvalds 已提交
764
{
765
	struct inode		*dir = pip ? VFS_I(pip) : NULL;
766 767 768 769 770 771
	struct xfs_mount	*mp = tp->t_mountp;
	struct xfs_inode	*ip;
	unsigned int		flags;
	int			error;
	struct timespec64	tv;
	struct inode		*inode;
L
Linus Torvalds 已提交
772

773 774 775 776 777 778 779 780 781 782 783 784
	/*
	 * Protect against obviously corrupt allocation btree records. Later
	 * xfs_iget checks will catch re-allocation of other active in-memory
	 * and on-disk inodes. If we don't catch reallocating the parent inode
	 * here we will deadlock in xfs_iget() so we have to do these checks
	 * first.
	 */
	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
		return -EFSCORRUPTED;
	}

L
Linus Torvalds 已提交
785
	/*
786 787
	 * Get the in-core inode with the lock held exclusively to prevent
	 * others from looking at until we're done.
L
Linus Torvalds 已提交
788
	 */
789
	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
790
	if (error)
L
Linus Torvalds 已提交
791
		return error;
792

L
Linus Torvalds 已提交
793
	ASSERT(ip != NULL);
794
	inode = VFS_I(ip);
795
	set_nlink(inode, nlink);
C
Christoph Hellwig 已提交
796
	inode->i_rdev = rdev;
797
	ip->i_projid = prid;
L
Linus Torvalds 已提交
798

799 800
	if (dir && !(dir->i_mode & S_ISGID) &&
	    (mp->m_flags & XFS_MOUNT_GRPID)) {
801
		inode_fsuid_set(inode, mnt_userns);
802 803
		inode->i_gid = dir->i_gid;
		inode->i_mode = mode;
804
	} else {
805
		inode_init_owner(mnt_userns, inode, dir, mode);
L
Linus Torvalds 已提交
806 807 808 809 810 811 812
	}

	/*
	 * If the group ID of the new file does not match the effective group
	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
	 * (and only if the irix_sgid_inherit compatibility variable is set).
	 */
813
	if (irix_sgid_inherit &&
C
Christoph Hellwig 已提交
814 815
	    (inode->i_mode & S_ISGID) &&
	    !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
D
Dave Chinner 已提交
816
		inode->i_mode &= ~S_ISGID;
L
Linus Torvalds 已提交
817

818
	ip->i_disk_size = 0;
819
	ip->i_df.if_nextents = 0;
820
	ASSERT(ip->i_nblocks == 0);
821

822
	tv = current_time(inode);
823 824 825
	inode->i_mtime = tv;
	inode->i_atime = tv;
	inode->i_ctime = tv;
826

827
	ip->i_extsize = 0;
828
	ip->i_diflags = 0;
829

830
	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
J
Jeff Layton 已提交
831
		inode_set_iversion(inode, 1);
832
		ip->i_cowextsize = 0;
833
		ip->i_crtime = tv;
834 835
	}

L
Linus Torvalds 已提交
836 837 838 839 840 841
	flags = XFS_ILOG_CORE;
	switch (mode & S_IFMT) {
	case S_IFIFO:
	case S_IFCHR:
	case S_IFBLK:
	case S_IFSOCK:
842
		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
L
Linus Torvalds 已提交
843 844 845 846
		flags |= XFS_ILOG_DEV;
		break;
	case S_IFREG:
	case S_IFDIR:
847
		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
848
			xfs_inode_inherit_flags(ip, pip);
849
		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
850
			xfs_inode_inherit_flags2(ip, pip);
L
Linus Torvalds 已提交
851 852
		/* FALLTHROUGH */
	case S_IFLNK:
853
		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
C
Christoph Hellwig 已提交
854
		ip->i_df.if_bytes = 0;
855
		ip->i_df.if_u1.if_root = NULL;
L
Linus Torvalds 已提交
856 857 858 859 860
		break;
	default:
		ASSERT(0);
	}

861 862 863 864 865 866 867 868 869
	/*
	 * If we need to create attributes immediately after allocating the
	 * inode, initialise an empty attribute fork right now. We use the
	 * default fork offset for attributes here as we don't know exactly what
	 * size or how many attributes we might be adding. We can do this
	 * safely here because we know the data fork is completely empty and
	 * this saves us from needing to run a separate transaction to set the
	 * fork offset in the immediate future.
	 */
870
	if (init_xattrs && xfs_sb_version_hasattr(&mp->m_sb)) {
871
		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
872 873 874
		ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
	}

L
Linus Torvalds 已提交
875 876 877
	/*
	 * Log the new values stuffed into the inode.
	 */
878
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
L
Linus Torvalds 已提交
879 880
	xfs_trans_log_inode(tp, ip, flags);

881
	/* now that we have an i_mode we can setup the inode structure */
882
	xfs_setup_inode(ip);
L
Linus Torvalds 已提交
883 884 885 886 887

	*ipp = ip;
	return 0;
}

D
Dave Chinner 已提交
888
/*
889 890 891 892
 * Allocates a new inode from disk and return a pointer to the incore copy. This
 * routine will internally commit the current transaction and allocate a new one
 * if we needed to allocate more on-disk free inodes to perform the requested
 * operation.
D
Dave Chinner 已提交
893
 *
894 895 896 897
 * If we are allocating quota inodes, we do not have a parent inode to attach to
 * or associate with (i.e. dp == NULL) because they are not linked into the
 * directory structure - they are attached directly to the superblock - and so
 * have no parent.
D
Dave Chinner 已提交
898 899 900
 */
int
xfs_dir_ialloc(
C
Christoph Hellwig 已提交
901
	struct user_namespace	*mnt_userns,
902 903 904 905 906 907
	struct xfs_trans	**tpp,
	struct xfs_inode	*dp,
	umode_t			mode,
	xfs_nlink_t		nlink,
	dev_t			rdev,
	prid_t			prid,
908
	bool			init_xattrs,
909
	struct xfs_inode	**ipp)
D
Dave Chinner 已提交
910
{
911
	struct xfs_buf		*agibp;
912 913 914
	xfs_ino_t		parent_ino = dp ? dp->i_ino : 0;
	xfs_ino_t		ino;
	int			error;
D
Dave Chinner 已提交
915

916
	ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
D
Dave Chinner 已提交
917 918

	/*
919
	 * Call the space management code to pick the on-disk inode to be
920
	 * allocated.
D
Dave Chinner 已提交
921
	 */
922
	error = xfs_dialloc_select_ag(tpp, parent_ino, mode, &agibp);
923 924
	if (error)
		return error;
D
Dave Chinner 已提交
925

926 927 928 929 930 931
	/* Allocate an inode from the selected AG */
	error = xfs_dialloc_ag(*tpp, agibp, parent_ino, &ino);
	if (error)
		return error;
	ASSERT(ino != NULLFSINO);

C
Christoph Hellwig 已提交
932
	return xfs_init_new_inode(mnt_userns, *tpp, dp, ino, mode, nlink, rdev,
933
				  prid, init_xattrs, ipp);
D
Dave Chinner 已提交
934 935 936
}

/*
937 938 939
 * Decrement the link count on an inode & log the change.  If this causes the
 * link count to go to zero, move the inode to AGI unlinked list so that it can
 * be freed when the last active reference goes away via xfs_inactive().
D
Dave Chinner 已提交
940
 */
941
static int			/* error */
D
Dave Chinner 已提交
942 943 944 945 946 947 948 949 950
xfs_droplink(
	xfs_trans_t *tp,
	xfs_inode_t *ip)
{
	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);

	drop_nlink(VFS_I(ip));
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

951 952 953 954
	if (VFS_I(ip)->i_nlink)
		return 0;

	return xfs_iunlink(tp, ip);
D
Dave Chinner 已提交
955 956 957 958 959
}

/*
 * Increment the link count on an inode & log the change.
 */
960
static void
D
Dave Chinner 已提交
961 962 963 964 965 966 967 968 969 970
xfs_bumplink(
	xfs_trans_t *tp,
	xfs_inode_t *ip)
{
	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);

	inc_nlink(VFS_I(ip));
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}

D
Dave Chinner 已提交
971 972
int
xfs_create(
C
Christoph Hellwig 已提交
973
	struct user_namespace	*mnt_userns,
D
Dave Chinner 已提交
974 975 976
	xfs_inode_t		*dp,
	struct xfs_name		*name,
	umode_t			mode,
C
Christoph Hellwig 已提交
977
	dev_t			rdev,
978
	bool			init_xattrs,
D
Dave Chinner 已提交
979 980 981 982 983 984 985 986 987 988 989 990
	xfs_inode_t		**ipp)
{
	int			is_dir = S_ISDIR(mode);
	struct xfs_mount	*mp = dp->i_mount;
	struct xfs_inode	*ip = NULL;
	struct xfs_trans	*tp = NULL;
	int			error;
	bool                    unlock_dp_on_error = false;
	prid_t			prid;
	struct xfs_dquot	*udqp = NULL;
	struct xfs_dquot	*gdqp = NULL;
	struct xfs_dquot	*pdqp = NULL;
991
	struct xfs_trans_res	*tres;
D
Dave Chinner 已提交
992 993 994 995 996
	uint			resblks;

	trace_xfs_create(dp, name);

	if (XFS_FORCED_SHUTDOWN(mp))
D
Dave Chinner 已提交
997
		return -EIO;
D
Dave Chinner 已提交
998

999
	prid = xfs_get_initial_prid(dp);
D
Dave Chinner 已提交
1000 1001 1002 1003

	/*
	 * Make sure that we have allocated dquot(s) on disk.
	 */
1004 1005
	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns),
			mapped_fsgid(mnt_userns), prid,
1006 1007
			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
			&udqp, &gdqp, &pdqp);
D
Dave Chinner 已提交
1008 1009 1010 1011 1012
	if (error)
		return error;

	if (is_dir) {
		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1013
		tres = &M_RES(mp)->tr_mkdir;
D
Dave Chinner 已提交
1014 1015
	} else {
		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1016
		tres = &M_RES(mp)->tr_create;
D
Dave Chinner 已提交
1017 1018 1019 1020 1021 1022 1023 1024
	}

	/*
	 * Initially assume that the file does not exist and
	 * reserve the resources for that case.  If that is not
	 * the case we'll drop the one we have and get a more
	 * appropriate transaction later.
	 */
1025 1026
	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
			&tp);
D
Dave Chinner 已提交
1027
	if (error == -ENOSPC) {
D
Dave Chinner 已提交
1028 1029
		/* flush outstanding delalloc blocks and retry */
		xfs_flush_inodes(mp);
1030 1031
		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
				resblks, &tp);
D
Dave Chinner 已提交
1032
	}
1033
	if (error)
1034
		goto out_release_dquots;
D
Dave Chinner 已提交
1035

1036
	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
D
Dave Chinner 已提交
1037 1038
	unlock_dp_on_error = true;

1039 1040 1041 1042 1043
	error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK,
			XFS_IEXT_DIR_MANIP_CNT(mp));
	if (error)
		goto out_trans_cancel;

D
Dave Chinner 已提交
1044 1045 1046 1047 1048
	/*
	 * A newly created regular or special file just has one directory
	 * entry pointing to them, but a directory also the "." entry
	 * pointing to itself.
	 */
C
Christoph Hellwig 已提交
1049
	error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, is_dir ? 2 : 1, rdev,
1050
			       prid, init_xattrs, &ip);
1051
	if (error)
1052
		goto out_trans_cancel;
D
Dave Chinner 已提交
1053 1054 1055 1056 1057 1058 1059 1060

	/*
	 * Now we join the directory inode to the transaction.  We do not do it
	 * earlier because xfs_dir_ialloc might commit the previous transaction
	 * (and release all the locks).  An error from here on will result in
	 * the transaction cancel unlocking dp so don't do it explicitly in the
	 * error path.
	 */
1061
	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
1062 1063
	unlock_dp_on_error = false;

1064
	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1065
					resblks - XFS_IALLOC_SPACE_RES(mp));
D
Dave Chinner 已提交
1066
	if (error) {
D
Dave Chinner 已提交
1067
		ASSERT(error != -ENOSPC);
1068
		goto out_trans_cancel;
D
Dave Chinner 已提交
1069 1070 1071 1072 1073 1074 1075
	}
	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);

	if (is_dir) {
		error = xfs_dir_init(tp, ip, dp);
		if (error)
1076
			goto out_trans_cancel;
D
Dave Chinner 已提交
1077

1078
		xfs_bumplink(tp, dp);
D
Dave Chinner 已提交
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	}

	/*
	 * If this is a synchronous mount, make sure that the
	 * create transaction goes to disk before returning to
	 * the user.
	 */
	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
		xfs_trans_set_sync(tp);

	/*
	 * Attach the dquot(s) to the inodes and modify them incore.
	 * These ids of the inode couldn't have changed since the new
	 * inode has been locked ever since it was created.
	 */
	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);

1096
	error = xfs_trans_commit(tp);
D
Dave Chinner 已提交
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	if (error)
		goto out_release_inode;

	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	*ipp = ip;
	return 0;

 out_trans_cancel:
1108
	xfs_trans_cancel(tp);
D
Dave Chinner 已提交
1109 1110
 out_release_inode:
	/*
1111 1112 1113
	 * Wait until after the current transaction is aborted to finish the
	 * setup of the inode and release the inode.  This prevents recursive
	 * transactions and deadlocks from xfs_inactive.
D
Dave Chinner 已提交
1114
	 */
1115 1116
	if (ip) {
		xfs_finish_inode_setup(ip);
1117
		xfs_irele(ip);
1118
	}
1119
 out_release_dquots:
D
Dave Chinner 已提交
1120 1121 1122 1123 1124
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	if (unlock_dp_on_error)
1125
		xfs_iunlock(dp, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
1126 1127 1128
	return error;
}

Z
Zhi Yong Wu 已提交
1129 1130
int
xfs_create_tmpfile(
C
Christoph Hellwig 已提交
1131
	struct user_namespace	*mnt_userns,
Z
Zhi Yong Wu 已提交
1132
	struct xfs_inode	*dp,
1133 1134
	umode_t			mode,
	struct xfs_inode	**ipp)
Z
Zhi Yong Wu 已提交
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
{
	struct xfs_mount	*mp = dp->i_mount;
	struct xfs_inode	*ip = NULL;
	struct xfs_trans	*tp = NULL;
	int			error;
	prid_t                  prid;
	struct xfs_dquot	*udqp = NULL;
	struct xfs_dquot	*gdqp = NULL;
	struct xfs_dquot	*pdqp = NULL;
	struct xfs_trans_res	*tres;
	uint			resblks;

	if (XFS_FORCED_SHUTDOWN(mp))
D
Dave Chinner 已提交
1148
		return -EIO;
Z
Zhi Yong Wu 已提交
1149 1150 1151 1152 1153 1154

	prid = xfs_get_initial_prid(dp);

	/*
	 * Make sure that we have allocated dquot(s) on disk.
	 */
1155 1156
	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns),
			mapped_fsgid(mnt_userns), prid,
1157 1158
			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
			&udqp, &gdqp, &pdqp);
Z
Zhi Yong Wu 已提交
1159 1160 1161 1162 1163
	if (error)
		return error;

	resblks = XFS_IALLOC_SPACE_RES(mp);
	tres = &M_RES(mp)->tr_create_tmpfile;
1164

1165 1166
	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
			&tp);
Z
Zhi Yong Wu 已提交
1167
	if (error)
1168
		goto out_release_dquots;
Z
Zhi Yong Wu 已提交
1169

1170 1171
	error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, 0, 0, prid,
				false, &ip);
1172
	if (error)
1173
		goto out_trans_cancel;
Z
Zhi Yong Wu 已提交
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186

	if (mp->m_flags & XFS_MOUNT_WSYNC)
		xfs_trans_set_sync(tp);

	/*
	 * Attach the dquot(s) to the inodes and modify them incore.
	 * These ids of the inode couldn't have changed since the new
	 * inode has been locked ever since it was created.
	 */
	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);

	error = xfs_iunlink(tp, ip);
	if (error)
1187
		goto out_trans_cancel;
Z
Zhi Yong Wu 已提交
1188

1189
	error = xfs_trans_commit(tp);
Z
Zhi Yong Wu 已提交
1190 1191 1192 1193 1194 1195 1196
	if (error)
		goto out_release_inode;

	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

1197
	*ipp = ip;
Z
Zhi Yong Wu 已提交
1198 1199 1200
	return 0;

 out_trans_cancel:
1201
	xfs_trans_cancel(tp);
Z
Zhi Yong Wu 已提交
1202 1203
 out_release_inode:
	/*
1204 1205 1206
	 * Wait until after the current transaction is aborted to finish the
	 * setup of the inode and release the inode.  This prevents recursive
	 * transactions and deadlocks from xfs_inactive.
Z
Zhi Yong Wu 已提交
1207
	 */
1208 1209
	if (ip) {
		xfs_finish_inode_setup(ip);
1210
		xfs_irele(ip);
1211
	}
1212
 out_release_dquots:
Z
Zhi Yong Wu 已提交
1213 1214 1215 1216 1217 1218 1219
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	return error;
}

D
Dave Chinner 已提交
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
int
xfs_link(
	xfs_inode_t		*tdp,
	xfs_inode_t		*sip,
	struct xfs_name		*target_name)
{
	xfs_mount_t		*mp = tdp->i_mount;
	xfs_trans_t		*tp;
	int			error;
	int			resblks;

	trace_xfs_link(tdp, target_name);

D
Dave Chinner 已提交
1233
	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
D
Dave Chinner 已提交
1234 1235

	if (XFS_FORCED_SHUTDOWN(mp))
D
Dave Chinner 已提交
1236
		return -EIO;
D
Dave Chinner 已提交
1237

1238
	error = xfs_qm_dqattach(sip);
D
Dave Chinner 已提交
1239 1240 1241
	if (error)
		goto std_return;

1242
	error = xfs_qm_dqattach(tdp);
D
Dave Chinner 已提交
1243 1244 1245 1246
	if (error)
		goto std_return;

	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1247
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
D
Dave Chinner 已提交
1248
	if (error == -ENOSPC) {
D
Dave Chinner 已提交
1249
		resblks = 0;
1250
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
D
Dave Chinner 已提交
1251
	}
1252
	if (error)
1253
		goto std_return;
D
Dave Chinner 已提交
1254

1255
	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
1256 1257

	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1258
	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
1259

1260 1261 1262 1263 1264
	error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK,
			XFS_IEXT_DIR_MANIP_CNT(mp));
	if (error)
		goto error_return;

D
Dave Chinner 已提交
1265 1266 1267 1268 1269
	/*
	 * If we are using project inheritance, we only allow hard link
	 * creation in our tree when the project IDs are the same; else
	 * the tree quota mechanism could be circumvented.
	 */
1270
	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1271
		     tdp->i_projid != sip->i_projid)) {
D
Dave Chinner 已提交
1272
		error = -EXDEV;
D
Dave Chinner 已提交
1273 1274 1275
		goto error_return;
	}

1276 1277 1278 1279 1280
	if (!resblks) {
		error = xfs_dir_canenter(tp, tdp, target_name);
		if (error)
			goto error_return;
	}
D
Dave Chinner 已提交
1281

1282 1283 1284 1285
	/*
	 * Handle initial link state of O_TMPFILE inode
	 */
	if (VFS_I(sip)->i_nlink == 0) {
1286 1287
		error = xfs_iunlink_remove(tp, sip);
		if (error)
1288
			goto error_return;
1289 1290
	}

D
Dave Chinner 已提交
1291
	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1292
				   resblks);
D
Dave Chinner 已提交
1293
	if (error)
1294
		goto error_return;
D
Dave Chinner 已提交
1295 1296 1297
	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);

1298
	xfs_bumplink(tp, sip);
D
Dave Chinner 已提交
1299 1300 1301 1302 1303 1304

	/*
	 * If this is a synchronous mount, make sure that the
	 * link transaction goes to disk before returning to
	 * the user.
	 */
1305
	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
D
Dave Chinner 已提交
1306 1307
		xfs_trans_set_sync(tp);

1308
	return xfs_trans_commit(tp);
D
Dave Chinner 已提交
1309 1310

 error_return:
1311
	xfs_trans_cancel(tp);
D
Dave Chinner 已提交
1312 1313 1314 1315
 std_return:
	return error;
}

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
/* Clear the reflink flag and the cowblocks tag if possible. */
static void
xfs_itruncate_clear_reflink_flags(
	struct xfs_inode	*ip)
{
	struct xfs_ifork	*dfork;
	struct xfs_ifork	*cfork;

	if (!xfs_is_reflink_inode(ip))
		return;
	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1329
		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1330 1331 1332 1333
	if (cfork->if_bytes == 0)
		xfs_inode_clear_cowblocks_tag(ip);
}

L
Linus Torvalds 已提交
1334
/*
1335 1336 1337
 * Free up the underlying blocks past new_size.  The new size must be smaller
 * than the current size.  This routine can be used both for the attribute and
 * data fork, and does not modify the inode size, which is left to the caller.
L
Linus Torvalds 已提交
1338
 *
1339 1340 1341 1342 1343 1344 1345 1346 1347
 * The transaction passed to this routine must have made a permanent log
 * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
 * given transaction and start new ones, so make sure everything involved in
 * the transaction is tidy before calling here.  Some transaction will be
 * returned to the caller to be committed.  The incoming transaction must
 * already include the inode, and both inode locks must be held exclusively.
 * The inode must also be "held" within the transaction.  On return the inode
 * will be "held" within the returned transaction.  This routine does NOT
 * require any disk space to be reserved for it within the transaction.
L
Linus Torvalds 已提交
1348
 *
1349 1350 1351 1352 1353
 * If we get an error, we must return with the inode locked and linked into the
 * current transaction. This keeps things simple for the higher level code,
 * because it always knows that the inode is locked and held in the transaction
 * that returns to it whether errors occur or not.  We don't mark the inode
 * dirty on error so that transactions can be easily aborted if possible.
L
Linus Torvalds 已提交
1354 1355
 */
int
B
Brian Foster 已提交
1356
xfs_itruncate_extents_flags(
1357 1358 1359
	struct xfs_trans	**tpp,
	struct xfs_inode	*ip,
	int			whichfork,
1360
	xfs_fsize_t		new_size,
B
Brian Foster 已提交
1361
	int			flags)
L
Linus Torvalds 已提交
1362
{
1363 1364 1365 1366 1367
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp = *tpp;
	xfs_fileoff_t		first_unmap_block;
	xfs_filblks_t		unmap_len;
	int			error = 0;
L
Linus Torvalds 已提交
1368

1369 1370 1371
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1372
	ASSERT(new_size <= XFS_ISIZE(ip));
1373
	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
L
Linus Torvalds 已提交
1374
	ASSERT(ip->i_itemp != NULL);
1375
	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1376
	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
L
Linus Torvalds 已提交
1377

1378 1379
	trace_xfs_itruncate_extents_start(ip, new_size);

B
Brian Foster 已提交
1380
	flags |= xfs_bmapi_aflag(whichfork);
1381

L
Linus Torvalds 已提交
1382 1383 1384 1385 1386
	/*
	 * Since it is possible for space to become allocated beyond
	 * the end of the file (in a crash where the space is allocated
	 * but the inode size is not yet updated), simply remove any
	 * blocks which show up between the new EOF and the maximum
1387 1388 1389 1390
	 * possible file size.
	 *
	 * We have to free all the blocks to the bmbt maximum offset, even if
	 * the page cache can't scale that far.
L
Linus Torvalds 已提交
1391
	 */
1392
	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1393
	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1394
		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1395
		return 0;
1396
	}
1397

1398 1399
	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
	while (unmap_len > 0) {
1400
		ASSERT(tp->t_firstblock == NULLFSBLOCK);
1401 1402
		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
				flags, XFS_ITRUNC_MAX_EXTENTS);
1403
		if (error)
1404
			goto out;
L
Linus Torvalds 已提交
1405

1406
		/* free the just unmapped extents */
1407
		error = xfs_defer_finish(&tp);
1408
		if (error)
1409
			goto out;
L
Linus Torvalds 已提交
1410
	}
1411

1412 1413 1414
	if (whichfork == XFS_DATA_FORK) {
		/* Remove all pending CoW reservations. */
		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1415
				first_unmap_block, XFS_MAX_FILEOFF, true);
1416 1417
		if (error)
			goto out;
1418

1419 1420
		xfs_itruncate_clear_reflink_flags(ip);
	}
1421

1422 1423 1424 1425 1426 1427 1428 1429
	/*
	 * Always re-log the inode so that our permanent transaction can keep
	 * on rolling it forward in the log.
	 */
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

	trace_xfs_itruncate_extents_end(ip, new_size);

1430 1431 1432 1433 1434
out:
	*tpp = tp;
	return error;
}

D
Dave Chinner 已提交
1435 1436 1437 1438 1439
int
xfs_release(
	xfs_inode_t	*ip)
{
	xfs_mount_t	*mp = ip->i_mount;
1440
	int		error = 0;
D
Dave Chinner 已提交
1441

D
Dave Chinner 已提交
1442
	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
D
Dave Chinner 已提交
1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
		return 0;

	/* If this is a read-only mount, don't do this (would generate I/O) */
	if (mp->m_flags & XFS_MOUNT_RDONLY)
		return 0;

	if (!XFS_FORCED_SHUTDOWN(mp)) {
		int truncated;

		/*
		 * If we previously truncated this file and removed old data
		 * in the process, we want to initiate "early" writeout on
		 * the last close.  This is an attempt to combat the notorious
		 * NULL files problem which is particularly noticeable from a
		 * truncate down, buffered (re-)write (delalloc), followed by
		 * a crash.  What we are effectively doing here is
		 * significantly reducing the time window where we'd otherwise
		 * be exposed to that problem.
		 */
		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
		if (truncated) {
			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
D
Dave Chinner 已提交
1465
			if (ip->i_delayed_blks > 0) {
D
Dave Chinner 已提交
1466
				error = filemap_flush(VFS_I(ip)->i_mapping);
D
Dave Chinner 已提交
1467 1468 1469 1470 1471 1472
				if (error)
					return error;
			}
		}
	}

1473
	if (VFS_I(ip)->i_nlink == 0)
D
Dave Chinner 已提交
1474 1475
		return 0;

1476 1477 1478 1479 1480 1481 1482 1483
	/*
	 * If we can't get the iolock just skip truncating the blocks past EOF
	 * because we could deadlock with the mmap_lock otherwise. We'll get
	 * another chance to drop them once the last reference to the inode is
	 * dropped, so we'll never leak blocks permanently.
	 */
	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
		return 0;
D
Dave Chinner 已提交
1484

1485
	if (xfs_can_free_eofblocks(ip, false)) {
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
		/*
		 * Check if the inode is being opened, written and closed
		 * frequently and we have delayed allocation blocks outstanding
		 * (e.g. streaming writes from the NFS server), truncating the
		 * blocks past EOF will cause fragmentation to occur.
		 *
		 * In this case don't do the truncation, but we have to be
		 * careful how we detect this case. Blocks beyond EOF show up as
		 * i_delayed_blks even when the inode is clean, so we need to
		 * truncate them away first before checking for a dirty release.
		 * Hence on the first dirty close we will still remove the
		 * speculative allocation, but after that we will leave it in
		 * place.
		 */
		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1501 1502 1503 1504 1505
			goto out_unlock;

		error = xfs_free_eofblocks(ip);
		if (error)
			goto out_unlock;
D
Dave Chinner 已提交
1506 1507 1508 1509 1510

		/* delalloc blocks after truncation means it really is dirty */
		if (ip->i_delayed_blks)
			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
	}
1511 1512 1513 1514

out_unlock:
	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
	return error;
D
Dave Chinner 已提交
1515 1516
}

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
/*
 * xfs_inactive_truncate
 *
 * Called to perform a truncate when an inode becomes unlinked.
 */
STATIC int
xfs_inactive_truncate(
	struct xfs_inode *ip)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error;

1530
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
	if (error) {
		ASSERT(XFS_FORCED_SHUTDOWN(mp));
		return error;
	}
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, 0);

	/*
	 * Log the inode size first to prevent stale data exposure in the event
	 * of a system crash before the truncate completes. See the related
1541
	 * comment in xfs_vn_setattr_size() for details.
1542
	 */
1543
	ip->i_disk_size = 0;
1544 1545 1546 1547 1548 1549
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
	if (error)
		goto error_trans_cancel;

1550
	ASSERT(ip->i_df.if_nextents == 0);
1551

1552
	error = xfs_trans_commit(tp);
1553 1554 1555 1556 1557 1558 1559
	if (error)
		goto error_unlock;

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return 0;

error_trans_cancel:
1560
	xfs_trans_cancel(tp);
1561 1562 1563 1564 1565
error_unlock:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
}

1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
/*
 * xfs_inactive_ifree()
 *
 * Perform the inode free when an inode is unlinked.
 */
STATIC int
xfs_inactive_ifree(
	struct xfs_inode *ip)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	int			error;

1579
	/*
1580 1581 1582 1583 1584
	 * We try to use a per-AG reservation for any block needed by the finobt
	 * tree, but as the finobt feature predates the per-AG reservation
	 * support a degraded file system might not have enough space for the
	 * reservation at mount time.  In that case try to dip into the reserved
	 * pool and pray.
1585 1586 1587 1588 1589
	 *
	 * Send a warning if the reservation does happen to fail, as the inode
	 * now remains allocated and sits on the unlinked list until the fs is
	 * repaired.
	 */
1590
	if (unlikely(mp->m_finobt_nores)) {
1591 1592 1593 1594 1595 1596
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
				&tp);
	} else {
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
	}
1597
	if (error) {
D
Dave Chinner 已提交
1598
		if (error == -ENOSPC) {
1599 1600 1601 1602 1603 1604
			xfs_warn_ratelimited(mp,
			"Failed to remove inode(s) from unlinked list. "
			"Please free space, unmount and run xfs_repair.");
		} else {
			ASSERT(XFS_FORCED_SHUTDOWN(mp));
		}
1605 1606 1607
		return error;
	}

1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
	/*
	 * We do not hold the inode locked across the entire rolling transaction
	 * here. We only need to hold it for the first transaction that
	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
	 * here breaks the relationship between cluster buffer invalidation and
	 * stale inode invalidation on cluster buffer item journal commit
	 * completion, and can result in leaving dirty stale inodes hanging
	 * around in memory.
	 *
	 * We have no need for serialising this inode operation against other
	 * operations - we freed the inode and hence reallocation is required
	 * and that will serialise on reallocating the space the deferops need
	 * to free. Hence we can unlock the inode on the first commit of
	 * the transaction rather than roll it right through the deferops. This
	 * avoids relogging the XFS_ISTALE inode.
	 *
	 * We check that xfs_ifree() hasn't grown an internal transaction roll
	 * by asserting that the inode is still locked when it returns.
	 */
1628
	xfs_ilock(ip, XFS_ILOCK_EXCL);
1629
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1630

1631
	error = xfs_ifree(tp, ip);
1632
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
	if (error) {
		/*
		 * If we fail to free the inode, shut down.  The cancel
		 * might do that, we need to make sure.  Otherwise the
		 * inode might be lost for a long time or forever.
		 */
		if (!XFS_FORCED_SHUTDOWN(mp)) {
			xfs_notice(mp, "%s: xfs_ifree returned error %d",
				__func__, error);
			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
		}
1644
		xfs_trans_cancel(tp);
1645 1646 1647 1648 1649 1650 1651 1652 1653
		return error;
	}

	/*
	 * Credit the quota account(s). The inode is gone.
	 */
	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);

	/*
1654 1655
	 * Just ignore errors at this point.  There is nothing we can do except
	 * to try to keep going. Make sure it's not a silent error.
1656
	 */
1657
	error = xfs_trans_commit(tp);
1658 1659 1660 1661 1662 1663 1664
	if (error)
		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
			__func__, error);

	return 0;
}

D
Dave Chinner 已提交
1665 1666 1667 1668 1669 1670 1671 1672
/*
 * xfs_inactive
 *
 * This is called when the vnode reference count for the vnode
 * goes to zero.  If the file has been unlinked, then it must
 * now be truncated.  Also, we clear all of the read-ahead state
 * kept for the inode here since the file is now closed.
 */
1673
void
D
Dave Chinner 已提交
1674 1675 1676
xfs_inactive(
	xfs_inode_t	*ip)
{
1677 1678 1679
	struct xfs_mount	*mp;
	int			error;
	int			truncate = 0;
D
Dave Chinner 已提交
1680 1681 1682 1683 1684

	/*
	 * If the inode is already free, then there can be nothing
	 * to clean up here.
	 */
D
Dave Chinner 已提交
1685
	if (VFS_I(ip)->i_mode == 0) {
D
Dave Chinner 已提交
1686
		ASSERT(ip->i_df.if_broot_bytes == 0);
1687
		return;
D
Dave Chinner 已提交
1688 1689 1690
	}

	mp = ip->i_mount;
1691
	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
D
Dave Chinner 已提交
1692 1693 1694

	/* If this is a read-only mount, don't do this (would generate I/O) */
	if (mp->m_flags & XFS_MOUNT_RDONLY)
1695
		return;
D
Dave Chinner 已提交
1696

1697 1698 1699 1700
	/* Metadata inodes require explicit resource cleanup. */
	if (xfs_is_metadata_inode(ip))
		return;

1701
	/* Try to clean out the cow blocks if there are any. */
1702
	if (xfs_inode_has_cow_data(ip))
1703 1704
		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);

1705
	if (VFS_I(ip)->i_nlink != 0) {
D
Dave Chinner 已提交
1706 1707 1708 1709
		/*
		 * force is true because we are evicting an inode from the
		 * cache. Post-eof blocks must be freed, lest we end up with
		 * broken free space accounting.
1710 1711 1712 1713
		 *
		 * Note: don't bother with iolock here since lockdep complains
		 * about acquiring it in reclaim context. We have the only
		 * reference to the inode at this point anyways.
D
Dave Chinner 已提交
1714
		 */
1715
		if (xfs_can_free_eofblocks(ip, true))
1716
			xfs_free_eofblocks(ip);
1717 1718

		return;
D
Dave Chinner 已提交
1719 1720
	}

D
Dave Chinner 已提交
1721
	if (S_ISREG(VFS_I(ip)->i_mode) &&
1722
	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1723
	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
D
Dave Chinner 已提交
1724 1725
		truncate = 1;

1726
	error = xfs_qm_dqattach(ip);
D
Dave Chinner 已提交
1727
	if (error)
1728
		return;
D
Dave Chinner 已提交
1729

D
Dave Chinner 已提交
1730
	if (S_ISLNK(VFS_I(ip)->i_mode))
1731
		error = xfs_inactive_symlink(ip);
1732 1733 1734
	else if (truncate)
		error = xfs_inactive_truncate(ip);
	if (error)
1735
		return;
D
Dave Chinner 已提交
1736 1737 1738 1739

	/*
	 * If there are attributes associated with the file then blow them away
	 * now.  The code calls a routine that recursively deconstructs the
1740
	 * attribute fork. If also blows away the in-core attribute fork.
D
Dave Chinner 已提交
1741
	 */
1742
	if (XFS_IFORK_Q(ip)) {
D
Dave Chinner 已提交
1743 1744
		error = xfs_attr_inactive(ip);
		if (error)
1745
			return;
D
Dave Chinner 已提交
1746 1747
	}

1748
	ASSERT(!ip->i_afp);
1749
	ASSERT(ip->i_forkoff == 0);
D
Dave Chinner 已提交
1750 1751 1752 1753

	/*
	 * Free the inode.
	 */
1754 1755
	error = xfs_inactive_ifree(ip);
	if (error)
1756
		return;
D
Dave Chinner 已提交
1757 1758 1759 1760 1761 1762 1763

	/*
	 * Release the dquots held by inode, if any.
	 */
	xfs_qm_dqdetach(ip);
}

1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886
/*
 * In-Core Unlinked List Lookups
 * =============================
 *
 * Every inode is supposed to be reachable from some other piece of metadata
 * with the exception of the root directory.  Inodes with a connection to a
 * file descriptor but not linked from anywhere in the on-disk directory tree
 * are collectively known as unlinked inodes, though the filesystem itself
 * maintains links to these inodes so that on-disk metadata are consistent.
 *
 * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
 * header contains a number of buckets that point to an inode, and each inode
 * record has a pointer to the next inode in the hash chain.  This
 * singly-linked list causes scaling problems in the iunlink remove function
 * because we must walk that list to find the inode that points to the inode
 * being removed from the unlinked hash bucket list.
 *
 * What if we modelled the unlinked list as a collection of records capturing
 * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
 * have a fast way to look up unlinked list predecessors, which avoids the
 * slow list walk.  That's exactly what we do here (in-core) with a per-AG
 * rhashtable.
 *
 * Because this is a backref cache, we ignore operational failures since the
 * iunlink code can fall back to the slow bucket walk.  The only errors that
 * should bubble out are for obviously incorrect situations.
 *
 * All users of the backref cache MUST hold the AGI buffer lock to serialize
 * access or have otherwise provided for concurrency control.
 */

/* Capture a "X.next_unlinked = Y" relationship. */
struct xfs_iunlink {
	struct rhash_head	iu_rhash_head;
	xfs_agino_t		iu_agino;		/* X */
	xfs_agino_t		iu_next_unlinked;	/* Y */
};

/* Unlinked list predecessor lookup hashtable construction */
static int
xfs_iunlink_obj_cmpfn(
	struct rhashtable_compare_arg	*arg,
	const void			*obj)
{
	const xfs_agino_t		*key = arg->key;
	const struct xfs_iunlink	*iu = obj;

	if (iu->iu_next_unlinked != *key)
		return 1;
	return 0;
}

static const struct rhashtable_params xfs_iunlink_hash_params = {
	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
	.key_len		= sizeof(xfs_agino_t),
	.key_offset		= offsetof(struct xfs_iunlink,
					   iu_next_unlinked),
	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
	.automatic_shrinking	= true,
	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
};

/*
 * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
 * relation is found.
 */
static xfs_agino_t
xfs_iunlink_lookup_backref(
	struct xfs_perag	*pag,
	xfs_agino_t		agino)
{
	struct xfs_iunlink	*iu;

	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
			xfs_iunlink_hash_params);
	return iu ? iu->iu_agino : NULLAGINO;
}

/*
 * Take ownership of an iunlink cache entry and insert it into the hash table.
 * If successful, the entry will be owned by the cache; if not, it is freed.
 * Either way, the caller does not own @iu after this call.
 */
static int
xfs_iunlink_insert_backref(
	struct xfs_perag	*pag,
	struct xfs_iunlink	*iu)
{
	int			error;

	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
			&iu->iu_rhash_head, xfs_iunlink_hash_params);
	/*
	 * Fail loudly if there already was an entry because that's a sign of
	 * corruption of in-memory data.  Also fail loudly if we see an error
	 * code we didn't anticipate from the rhashtable code.  Currently we
	 * only anticipate ENOMEM.
	 */
	if (error) {
		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
		kmem_free(iu);
	}
	/*
	 * Absorb any runtime errors that aren't a result of corruption because
	 * this is a cache and we can always fall back to bucket list scanning.
	 */
	if (error != 0 && error != -EEXIST)
		error = 0;
	return error;
}

/* Remember that @prev_agino.next_unlinked = @this_agino. */
static int
xfs_iunlink_add_backref(
	struct xfs_perag	*pag,
	xfs_agino_t		prev_agino,
	xfs_agino_t		this_agino)
{
	struct xfs_iunlink	*iu;

	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
		return 0;

1887
	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
	iu->iu_agino = prev_agino;
	iu->iu_next_unlinked = this_agino;

	return xfs_iunlink_insert_backref(pag, iu);
}

/*
 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
 * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
 * wasn't any such entry then we don't bother.
 */
static int
xfs_iunlink_change_backref(
	struct xfs_perag	*pag,
	xfs_agino_t		agino,
	xfs_agino_t		next_unlinked)
{
	struct xfs_iunlink	*iu;
	int			error;

	/* Look up the old entry; if there wasn't one then exit. */
	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
			xfs_iunlink_hash_params);
	if (!iu)
		return 0;

	/*
	 * Remove the entry.  This shouldn't ever return an error, but if we
	 * couldn't remove the old entry we don't want to add it again to the
	 * hash table, and if the entry disappeared on us then someone's
	 * violated the locking rules and we need to fail loudly.  Either way
	 * we cannot remove the inode because internal state is or would have
	 * been corrupt.
	 */
	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
			&iu->iu_rhash_head, xfs_iunlink_hash_params);
	if (error)
		return error;

	/* If there is no new next entry just free our item and return. */
	if (next_unlinked == NULLAGINO) {
		kmem_free(iu);
		return 0;
	}

	/* Update the entry and re-add it to the hash table. */
	iu->iu_next_unlinked = next_unlinked;
	return xfs_iunlink_insert_backref(pag, iu);
}

/* Set up the in-core predecessor structures. */
int
xfs_iunlink_init(
	struct xfs_perag	*pag)
{
	return rhashtable_init(&pag->pagi_unlinked_hash,
			&xfs_iunlink_hash_params);
}

/* Free the in-core predecessor structures. */
static void
xfs_iunlink_free_item(
	void			*ptr,
	void			*arg)
{
	struct xfs_iunlink	*iu = ptr;
	bool			*freed_anything = arg;

	*freed_anything = true;
	kmem_free(iu);
}

void
xfs_iunlink_destroy(
	struct xfs_perag	*pag)
{
	bool			freed_anything = false;

	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
			xfs_iunlink_free_item, &freed_anything);

	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
}

1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
/*
 * Point the AGI unlinked bucket at an inode and log the results.  The caller
 * is responsible for validating the old value.
 */
STATIC int
xfs_iunlink_update_bucket(
	struct xfs_trans	*tp,
	xfs_agnumber_t		agno,
	struct xfs_buf		*agibp,
	unsigned int		bucket_index,
	xfs_agino_t		new_agino)
{
C
Christoph Hellwig 已提交
1984
	struct xfs_agi		*agi = agibp->b_addr;
1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
	xfs_agino_t		old_value;
	int			offset;

	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));

	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
	trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
			old_value, new_agino);

	/*
	 * We should never find the head of the list already set to the value
	 * passed in because either we're adding or removing ourselves from the
	 * head of the list.
	 */
1999
	if (old_value == new_agino) {
2000
		xfs_buf_mark_corrupt(agibp);
2001
		return -EFSCORRUPTED;
2002
	}
2003 2004 2005 2006 2007 2008 2009 2010

	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
	offset = offsetof(struct xfs_agi, agi_unlinked) +
			(sizeof(xfs_agino_t) * bucket_index);
	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
	return 0;
}

2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
/* Set an on-disk inode's next_unlinked pointer. */
STATIC void
xfs_iunlink_update_dinode(
	struct xfs_trans	*tp,
	xfs_agnumber_t		agno,
	xfs_agino_t		agino,
	struct xfs_buf		*ibp,
	struct xfs_dinode	*dip,
	struct xfs_imap		*imap,
	xfs_agino_t		next_agino)
{
	struct xfs_mount	*mp = tp->t_mountp;
	int			offset;

	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));

	trace_xfs_iunlink_update_dinode(mp, agno, agino,
			be32_to_cpu(dip->di_next_unlinked), next_agino);

	dip->di_next_unlinked = cpu_to_be32(next_agino);
	offset = imap->im_boffset +
			offsetof(struct xfs_dinode, di_next_unlinked);

	/* need to recalc the inode CRC if appropriate */
	xfs_dinode_calc_crc(mp, dip);
	xfs_trans_inode_buf(tp, ibp);
	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
}

/* Set an in-core inode's unlinked pointer and return the old value. */
STATIC int
xfs_iunlink_update_inode(
	struct xfs_trans	*tp,
	struct xfs_inode	*ip,
	xfs_agnumber_t		agno,
	xfs_agino_t		next_agino,
	xfs_agino_t		*old_next_agino)
{
	struct xfs_mount	*mp = tp->t_mountp;
	struct xfs_dinode	*dip;
	struct xfs_buf		*ibp;
	xfs_agino_t		old_value;
	int			error;

	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));

C
Christoph Hellwig 已提交
2057
	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
2058 2059
	if (error)
		return error;
C
Christoph Hellwig 已提交
2060
	dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
2061 2062 2063 2064

	/* Make sure the old pointer isn't garbage. */
	old_value = be32_to_cpu(dip->di_next_unlinked);
	if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2065 2066
		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
				sizeof(*dip), __this_address);
2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
		error = -EFSCORRUPTED;
		goto out;
	}

	/*
	 * Since we're updating a linked list, we should never find that the
	 * current pointer is the same as the new value, unless we're
	 * terminating the list.
	 */
	*old_next_agino = old_value;
	if (old_value == next_agino) {
2078 2079 2080
		if (next_agino != NULLAGINO) {
			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
					dip, sizeof(*dip), __this_address);
2081
			error = -EFSCORRUPTED;
2082
		}
2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
		goto out;
	}

	/* Ok, update the new pointer. */
	xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
			ibp, dip, &ip->i_imap, next_agino);
	return 0;
out:
	xfs_trans_brelse(tp, ibp);
	return error;
}

L
Linus Torvalds 已提交
2095
/*
2096 2097
 * This is called when the inode's link count has gone to 0 or we are creating
 * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
2098 2099 2100
 *
 * We place the on-disk inode on a list in the AGI.  It will be pulled from this
 * list when the inode is freed.
L
Linus Torvalds 已提交
2101
 */
2102
STATIC int
L
Linus Torvalds 已提交
2103
xfs_iunlink(
2104 2105
	struct xfs_trans	*tp,
	struct xfs_inode	*ip)
L
Linus Torvalds 已提交
2106
{
2107 2108 2109
	struct xfs_mount	*mp = tp->t_mountp;
	struct xfs_agi		*agi;
	struct xfs_buf		*agibp;
2110
	xfs_agino_t		next_agino;
2111 2112 2113 2114
	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
	int			error;
L
Linus Torvalds 已提交
2115

2116
	ASSERT(VFS_I(ip)->i_nlink == 0);
D
Dave Chinner 已提交
2117
	ASSERT(VFS_I(ip)->i_mode != 0);
2118
	trace_xfs_iunlink(ip);
L
Linus Torvalds 已提交
2119

2120 2121
	/* Get the agi buffer first.  It ensures lock ordering on the list. */
	error = xfs_read_agi(mp, tp, agno, &agibp);
2122
	if (error)
L
Linus Torvalds 已提交
2123
		return error;
C
Christoph Hellwig 已提交
2124
	agi = agibp->b_addr;
2125

L
Linus Torvalds 已提交
2126
	/*
2127 2128 2129
	 * Get the index into the agi hash table for the list this inode will
	 * go on.  Make sure the pointer isn't garbage and that this inode
	 * isn't already on the list.
L
Linus Torvalds 已提交
2130
	 */
2131 2132
	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
	if (next_agino == agino ||
2133
	    !xfs_verify_agino_or_null(mp, agno, next_agino)) {
2134
		xfs_buf_mark_corrupt(agibp);
2135
		return -EFSCORRUPTED;
2136
	}
L
Linus Torvalds 已提交
2137

2138
	if (next_agino != NULLAGINO) {
2139
		xfs_agino_t		old_agino;
2140

L
Linus Torvalds 已提交
2141
		/*
2142 2143
		 * There is already another inode in the bucket, so point this
		 * inode to the current head of the list.
L
Linus Torvalds 已提交
2144
		 */
2145 2146
		error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
				&old_agino);
2147 2148
		if (error)
			return error;
2149
		ASSERT(old_agino == NULLAGINO);
2150 2151 2152 2153 2154

		/*
		 * agino has been unlinked, add a backref from the next inode
		 * back to agino.
		 */
2155
		error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino);
2156 2157
		if (error)
			return error;
L
Linus Torvalds 已提交
2158 2159
	}

2160 2161
	/* Point the head of the list to point to this inode. */
	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
L
Linus Torvalds 已提交
2162 2163
}

2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
/* Return the imap, dinode pointer, and buffer for an inode. */
STATIC int
xfs_iunlink_map_ino(
	struct xfs_trans	*tp,
	xfs_agnumber_t		agno,
	xfs_agino_t		agino,
	struct xfs_imap		*imap,
	struct xfs_dinode	**dipp,
	struct xfs_buf		**bpp)
{
	struct xfs_mount	*mp = tp->t_mountp;
	int			error;

	imap->im_blkno = 0;
	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
	if (error) {
		xfs_warn(mp, "%s: xfs_imap returned error %d.",
				__func__, error);
		return error;
	}

C
Christoph Hellwig 已提交
2185
	error = xfs_imap_to_bp(mp, tp, imap, bpp);
2186 2187 2188 2189 2190 2191
	if (error) {
		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
				__func__, error);
		return error;
	}

C
Christoph Hellwig 已提交
2192
	*dipp = xfs_buf_offset(*bpp, imap->im_boffset);
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	return 0;
}

/*
 * Walk the unlinked chain from @head_agino until we find the inode that
 * points to @target_agino.  Return the inode number, map, dinode pointer,
 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
 *
 * @tp, @pag, @head_agino, and @target_agino are input parameters.
 * @agino, @imap, @dipp, and @bpp are all output parameters.
 *
 * Do not call this function if @target_agino is the head of the list.
 */
STATIC int
xfs_iunlink_map_prev(
	struct xfs_trans	*tp,
	xfs_agnumber_t		agno,
	xfs_agino_t		head_agino,
	xfs_agino_t		target_agino,
	xfs_agino_t		*agino,
	struct xfs_imap		*imap,
	struct xfs_dinode	**dipp,
2215 2216
	struct xfs_buf		**bpp,
	struct xfs_perag	*pag)
2217 2218 2219 2220 2221 2222 2223 2224
{
	struct xfs_mount	*mp = tp->t_mountp;
	xfs_agino_t		next_agino;
	int			error;

	ASSERT(head_agino != target_agino);
	*bpp = NULL;

2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246
	/* See if our backref cache can find it faster. */
	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
	if (*agino != NULLAGINO) {
		error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
		if (error)
			return error;

		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
			return 0;

		/*
		 * If we get here the cache contents were corrupt, so drop the
		 * buffer and fall back to walking the bucket list.
		 */
		xfs_trans_brelse(tp, *bpp);
		*bpp = NULL;
		WARN_ON_ONCE(1);
	}

	trace_xfs_iunlink_map_prev_fallback(mp, agno);

	/* Otherwise, walk the entire bucket until we find it. */
2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278
	next_agino = head_agino;
	while (next_agino != target_agino) {
		xfs_agino_t	unlinked_agino;

		if (*bpp)
			xfs_trans_brelse(tp, *bpp);

		*agino = next_agino;
		error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
				bpp);
		if (error)
			return error;

		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
		/*
		 * Make sure this pointer is valid and isn't an obvious
		 * infinite loop.
		 */
		if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
		    next_agino == unlinked_agino) {
			XFS_CORRUPTION_ERROR(__func__,
					XFS_ERRLEVEL_LOW, mp,
					*dipp, sizeof(**dipp));
			error = -EFSCORRUPTED;
			return error;
		}
		next_agino = unlinked_agino;
	}

	return 0;
}

L
Linus Torvalds 已提交
2279 2280 2281 2282 2283
/*
 * Pull the on-disk inode from the AGI unlinked list.
 */
STATIC int
xfs_iunlink_remove(
2284 2285
	struct xfs_trans	*tp,
	struct xfs_inode	*ip)
L
Linus Torvalds 已提交
2286
{
2287 2288 2289 2290 2291 2292 2293 2294
	struct xfs_mount	*mp = tp->t_mountp;
	struct xfs_agi		*agi;
	struct xfs_buf		*agibp;
	struct xfs_buf		*last_ibp;
	struct xfs_dinode	*last_dip = NULL;
	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
	xfs_agino_t		next_agino;
2295
	xfs_agino_t		head_agino;
2296 2297
	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
	int			error;
L
Linus Torvalds 已提交
2298

2299 2300
	trace_xfs_iunlink_remove(ip);

2301
	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2302 2303
	error = xfs_read_agi(mp, tp, agno, &agibp);
	if (error)
L
Linus Torvalds 已提交
2304
		return error;
C
Christoph Hellwig 已提交
2305
	agi = agibp->b_addr;
2306

L
Linus Torvalds 已提交
2307
	/*
2308 2309
	 * Get the index into the agi hash table for the list this inode will
	 * go on.  Make sure the head pointer isn't garbage.
L
Linus Torvalds 已提交
2310
	 */
2311 2312
	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
	if (!xfs_verify_agino(mp, agno, head_agino)) {
2313 2314 2315 2316
		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
				agi, sizeof(*agi));
		return -EFSCORRUPTED;
	}
L
Linus Torvalds 已提交
2317

2318 2319 2320 2321 2322 2323 2324 2325
	/*
	 * Set our inode's next_unlinked pointer to NULL and then return
	 * the old pointer value so that we can update whatever was previous
	 * to us in the list to point to whatever was next in the list.
	 */
	error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
	if (error)
		return error;
2326

2327 2328 2329 2330 2331 2332 2333 2334
	/*
	 * If there was a backref pointing from the next inode back to this
	 * one, remove it because we've removed this inode from the list.
	 *
	 * Later, if this inode was in the middle of the list we'll update
	 * this inode's backref to point from the next inode.
	 */
	if (next_agino != NULLAGINO) {
2335
		error = xfs_iunlink_change_backref(agibp->b_pag, next_agino,
2336 2337
				NULLAGINO);
		if (error)
2338
			return error;
2339 2340
	}

2341
	if (head_agino != agino) {
2342 2343 2344
		struct xfs_imap	imap;
		xfs_agino_t	prev_agino;

2345
		/* We need to search the list for the inode being freed. */
2346
		error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
2347
				&prev_agino, &imap, &last_dip, &last_ibp,
2348
				agibp->b_pag);
2349
		if (error)
2350
			return error;
2351

2352 2353 2354
		/* Point the previous inode on the list to the next inode. */
		xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
				last_dip, &imap, next_agino);
2355 2356 2357 2358 2359 2360 2361 2362 2363

		/*
		 * Now we deal with the backref for this inode.  If this inode
		 * pointed at a real inode, change the backref that pointed to
		 * us to point to our old next.  If this inode was the end of
		 * the list, delete the backref that pointed to us.  Note that
		 * change_backref takes care of deleting the backref if
		 * next_agino is NULLAGINO.
		 */
2364 2365
		return xfs_iunlink_change_backref(agibp->b_pag, agino,
				next_agino);
L
Linus Torvalds 已提交
2366
	}
2367

2368 2369 2370
	/* Point the head of the list to the next unlinked inode. */
	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
			next_agino);
L
Linus Torvalds 已提交
2371 2372
}

2373
/*
2374 2375 2376
 * Look up the inode number specified and if it is not already marked XFS_ISTALE
 * mark it stale. We should only find clean inodes in this lookup that aren't
 * already stale.
2377
 */
2378 2379 2380
static void
xfs_ifree_mark_inode_stale(
	struct xfs_buf		*bp,
2381
	struct xfs_inode	*free_ip,
2382
	xfs_ino_t		inum)
2383
{
2384 2385 2386
	struct xfs_mount	*mp = bp->b_mount;
	struct xfs_perag	*pag = bp->b_pag;
	struct xfs_inode_log_item *iip;
2387 2388 2389 2390 2391 2392 2393
	struct xfs_inode	*ip;

retry:
	rcu_read_lock();
	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));

	/* Inode not in memory, nothing to do */
2394 2395 2396 2397
	if (!ip) {
		rcu_read_unlock();
		return;
	}
2398 2399 2400 2401 2402 2403 2404 2405

	/*
	 * because this is an RCU protected lookup, we could find a recently
	 * freed or even reallocated inode during the lookup. We need to check
	 * under the i_flags_lock for a valid inode here. Skip it if it is not
	 * valid, the wrong inode or stale.
	 */
	spin_lock(&ip->i_flags_lock);
2406 2407
	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
		goto out_iflags_unlock;
2408 2409 2410 2411 2412 2413 2414 2415 2416

	/*
	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
	 * other inodes that we did not find in the list attached to the buffer
	 * and are not already marked stale. If we can't lock it, back off and
	 * retry.
	 */
	if (ip != free_ip) {
		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2417
			spin_unlock(&ip->i_flags_lock);
2418 2419 2420 2421 2422
			rcu_read_unlock();
			delay(1);
			goto retry;
		}
	}
2423
	ip->i_flags |= XFS_ISTALE;
2424

2425
	/*
2426
	 * If the inode is flushing, it is already attached to the buffer.  All
2427 2428 2429 2430
	 * we needed to do here is mark the inode stale so buffer IO completion
	 * will remove it from the AIL.
	 */
	iip = ip->i_itemp;
2431
	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2432 2433 2434 2435
		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
		ASSERT(iip->ili_last_fields);
		goto out_iunlock;
	}
2436 2437

	/*
2438 2439 2440 2441
	 * Inodes not attached to the buffer can be released immediately.
	 * Everything else has to go through xfs_iflush_abort() on journal
	 * commit as the flock synchronises removal of the inode from the
	 * cluster buffer against inode reclaim.
2442
	 */
2443
	if (!iip || list_empty(&iip->ili_item.li_bio_list))
2444
		goto out_iunlock;
2445 2446 2447 2448

	__xfs_iflags_set(ip, XFS_IFLUSHING);
	spin_unlock(&ip->i_flags_lock);
	rcu_read_unlock();
2449

2450 2451 2452 2453 2454 2455 2456 2457
	/* we have a dirty inode in memory that has not yet been flushed. */
	spin_lock(&iip->ili_lock);
	iip->ili_last_fields = iip->ili_fields;
	iip->ili_fields = 0;
	iip->ili_fsync_fields = 0;
	spin_unlock(&iip->ili_lock);
	ASSERT(iip->ili_last_fields);

2458 2459 2460 2461
	if (ip != free_ip)
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return;

2462 2463 2464
out_iunlock:
	if (ip != free_ip)
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2465 2466 2467
out_iflags_unlock:
	spin_unlock(&ip->i_flags_lock);
	rcu_read_unlock();
2468 2469
}

2470
/*
2471
 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2472 2473 2474
 * inodes that are in memory - they all must be marked stale and attached to
 * the cluster buffer.
 */
2475
STATIC int
L
Linus Torvalds 已提交
2476
xfs_ifree_cluster(
2477 2478
	struct xfs_inode	*free_ip,
	struct xfs_trans	*tp,
2479
	struct xfs_icluster	*xic)
L
Linus Torvalds 已提交
2480
{
2481 2482 2483 2484 2485
	struct xfs_mount	*mp = free_ip->i_mount;
	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
	struct xfs_buf		*bp;
	xfs_daddr_t		blkno;
	xfs_ino_t		inum = xic->first_ino;
L
Linus Torvalds 已提交
2486
	int			nbufs;
2487
	int			i, j;
2488
	int			ioffset;
2489
	int			error;
L
Linus Torvalds 已提交
2490

D
Darrick J. Wong 已提交
2491
	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
L
Linus Torvalds 已提交
2492

D
Darrick J. Wong 已提交
2493
	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2494 2495 2496 2497 2498
		/*
		 * The allocation bitmap tells us which inodes of the chunk were
		 * physically allocated. Skip the cluster if an inode falls into
		 * a sparse region.
		 */
2499 2500
		ioffset = inum - xic->first_ino;
		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
D
Darrick J. Wong 已提交
2501
			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2502 2503 2504
			continue;
		}

L
Linus Torvalds 已提交
2505 2506 2507
		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
					 XFS_INO_TO_AGBNO(mp, inum));

2508 2509
		/*
		 * We obtain and lock the backing buffer first in the process
2510 2511 2512
		 * here to ensure dirty inodes attached to the buffer remain in
		 * the flushing state while we mark them stale.
		 *
2513 2514 2515 2516
		 * If we scan the in-memory inodes first, then buffer IO can
		 * complete before we get a lock on it, and hence we may fail
		 * to mark all the active inodes on the buffer stale.
		 */
2517 2518 2519
		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
				mp->m_bsize * igeo->blocks_per_cluster,
				XBF_UNMAPPED, &bp);
2520
		if (error)
2521
			return error;
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531

		/*
		 * This buffer may not have been correctly initialised as we
		 * didn't read it from disk. That's not important because we are
		 * only using to mark the buffer as stale in the log, and to
		 * attach stale cached inodes on it. That means it will never be
		 * dispatched for IO. If it is, we want to know about it, and we
		 * want it to fail. We can acheive this by adding a write
		 * verifier to the buffer.
		 */
2532
		bp->b_ops = &xfs_inode_buf_ops;
2533

2534
		/*
2535 2536 2537
		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
		 * too. This requires lookups, and will skip inodes that we've
		 * already marked XFS_ISTALE.
L
Linus Torvalds 已提交
2538
		 */
2539 2540
		for (i = 0; i < igeo->inodes_per_cluster; i++)
			xfs_ifree_mark_inode_stale(bp, free_ip, inum + i);
L
Linus Torvalds 已提交
2541

2542
		xfs_trans_stale_inode_buf(tp, bp);
L
Linus Torvalds 已提交
2543 2544
		xfs_trans_binval(tp, bp);
	}
2545
	return 0;
L
Linus Torvalds 已提交
2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
}

/*
 * This is called to return an inode to the inode free list.
 * The inode should already be truncated to 0 length and have
 * no pages associated with it.  This routine also assumes that
 * the inode is already a part of the transaction.
 *
 * The on-disk copy of the inode will have been added to the list
 * of unlinked inodes in the AGI. We need to remove the inode from
 * that list atomically with respect to freeing it here.
 */
int
xfs_ifree(
2560 2561
	struct xfs_trans	*tp,
	struct xfs_inode	*ip)
L
Linus Torvalds 已提交
2562 2563
{
	int			error;
2564
	struct xfs_icluster	xic = { 0 };
D
Dave Chinner 已提交
2565
	struct xfs_inode_log_item *iip = ip->i_itemp;
L
Linus Torvalds 已提交
2566

C
Christoph Hellwig 已提交
2567
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2568
	ASSERT(VFS_I(ip)->i_nlink == 0);
2569
	ASSERT(ip->i_df.if_nextents == 0);
2570
	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2571
	ASSERT(ip->i_nblocks == 0);
L
Linus Torvalds 已提交
2572 2573 2574 2575 2576

	/*
	 * Pull the on-disk inode from the AGI unlinked list.
	 */
	error = xfs_iunlink_remove(tp, ip);
2577
	if (error)
L
Linus Torvalds 已提交
2578 2579
		return error;

2580
	error = xfs_difree(tp, ip->i_ino, &xic);
2581
	if (error)
L
Linus Torvalds 已提交
2582
		return error;
2583

2584 2585 2586 2587 2588
	/*
	 * Free any local-format data sitting around before we reset the
	 * data fork to extents format.  Note that the attr fork data has
	 * already been freed by xfs_attr_inactive.
	 */
2589
	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2590 2591 2592 2593
		kmem_free(ip->i_df.if_u1.if_data);
		ip->i_df.if_u1.if_data = NULL;
		ip->i_df.if_bytes = 0;
	}
2594

D
Dave Chinner 已提交
2595
	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2596
	ip->i_diflags = 0;
2597
	ip->i_diflags2 = ip->i_mount->m_ino_geo.new_diflags2;
2598
	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2599
	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2600 2601
	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2602 2603

	/* Don't attempt to replay owner changes for a deleted inode */
D
Dave Chinner 已提交
2604 2605 2606
	spin_lock(&iip->ili_lock);
	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
	spin_unlock(&iip->ili_lock);
2607

L
Linus Torvalds 已提交
2608 2609 2610 2611
	/*
	 * Bump the generation count so no one will be confused
	 * by reincarnations of this inode.
	 */
2612
	VFS_I(ip)->i_generation++;
L
Linus Torvalds 已提交
2613 2614
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

2615 2616
	if (xic.deleted)
		error = xfs_ifree_cluster(ip, tp, &xic);
L
Linus Torvalds 已提交
2617

2618
	return error;
L
Linus Torvalds 已提交
2619 2620 2621
}

/*
2622 2623 2624
 * This is called to unpin an inode.  The caller must have the inode locked
 * in at least shared mode so that the buffer cannot be subsequently pinned
 * once someone is waiting for it to be unpinned.
L
Linus Torvalds 已提交
2625
 */
2626
static void
2627
xfs_iunpin(
2628
	struct xfs_inode	*ip)
L
Linus Torvalds 已提交
2629
{
C
Christoph Hellwig 已提交
2630
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
L
Linus Torvalds 已提交
2631

2632 2633
	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);

2634
	/* Give the log a push to start the unpinning I/O */
2635
	xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2636

2637
}
L
Linus Torvalds 已提交
2638

2639 2640 2641 2642 2643 2644 2645 2646 2647 2648
static void
__xfs_iunpin_wait(
	struct xfs_inode	*ip)
{
	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);

	xfs_iunpin(ip);

	do {
2649
		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2650 2651 2652
		if (xfs_ipincount(ip))
			io_schedule();
	} while (xfs_ipincount(ip));
2653
	finish_wait(wq, &wait.wq_entry);
2654 2655
}

2656
void
2657
xfs_iunpin_wait(
2658
	struct xfs_inode	*ip)
2659
{
2660 2661
	if (xfs_ipincount(ip))
		__xfs_iunpin_wait(ip);
L
Linus Torvalds 已提交
2662 2663
}

2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685
/*
 * Removing an inode from the namespace involves removing the directory entry
 * and dropping the link count on the inode. Removing the directory entry can
 * result in locking an AGF (directory blocks were freed) and removing a link
 * count can result in placing the inode on an unlinked list which results in
 * locking an AGI.
 *
 * The big problem here is that we have an ordering constraint on AGF and AGI
 * locking - inode allocation locks the AGI, then can allocate a new extent for
 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
 * removes the inode from the unlinked list, requiring that we lock the AGI
 * first, and then freeing the inode can result in an inode chunk being freed
 * and hence freeing disk space requiring that we lock an AGF.
 *
 * Hence the ordering that is imposed by other parts of the code is AGI before
 * AGF. This means we cannot remove the directory entry before we drop the inode
 * reference count and put it on the unlinked list as this results in a lock
 * order of AGF then AGI, and this can deadlock against inode allocation and
 * freeing. Therefore we must drop the link counts before we remove the
 * directory entry.
 *
 * This is still safe from a transactional point of view - it is not until we
2686
 * get to xfs_defer_finish() that we have the possibility of multiple
2687 2688 2689 2690
 * transactions in this operation. Hence as long as we remove the directory
 * entry and drop the link count in the first transaction of the remove
 * operation, there are no transactional constraints on the ordering here.
 */
D
Dave Chinner 已提交
2691 2692 2693 2694 2695 2696 2697 2698
int
xfs_remove(
	xfs_inode_t             *dp,
	struct xfs_name		*name,
	xfs_inode_t		*ip)
{
	xfs_mount_t		*mp = dp->i_mount;
	xfs_trans_t             *tp = NULL;
D
Dave Chinner 已提交
2699
	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
D
Dave Chinner 已提交
2700 2701 2702 2703 2704 2705
	int                     error = 0;
	uint			resblks;

	trace_xfs_remove(dp, name);

	if (XFS_FORCED_SHUTDOWN(mp))
D
Dave Chinner 已提交
2706
		return -EIO;
D
Dave Chinner 已提交
2707

2708
	error = xfs_qm_dqattach(dp);
D
Dave Chinner 已提交
2709 2710 2711
	if (error)
		goto std_return;

2712
	error = xfs_qm_dqattach(ip);
D
Dave Chinner 已提交
2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
	if (error)
		goto std_return;

	/*
	 * We try to get the real space reservation first,
	 * allowing for directory btree deletion(s) implying
	 * possible bmap insert(s).  If we can't get the space
	 * reservation then we use 0 instead, and avoid the bmap
	 * btree insert(s) in the directory code by, if the bmap
	 * insert tries to happen, instead trimming the LAST
	 * block from the directory.
	 */
	resblks = XFS_REMOVE_SPACE_RES(mp);
2726
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
D
Dave Chinner 已提交
2727
	if (error == -ENOSPC) {
D
Dave Chinner 已提交
2728
		resblks = 0;
2729 2730
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
				&tp);
D
Dave Chinner 已提交
2731 2732
	}
	if (error) {
D
Dave Chinner 已提交
2733
		ASSERT(error != -ENOSPC);
2734
		goto std_return;
D
Dave Chinner 已提交
2735 2736
	}

2737
	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
2738

2739
	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
2740 2741 2742 2743 2744 2745
	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);

	/*
	 * If we're removing a directory perform some additional validation.
	 */
	if (is_dir) {
2746 2747
		ASSERT(VFS_I(ip)->i_nlink >= 2);
		if (VFS_I(ip)->i_nlink != 2) {
D
Dave Chinner 已提交
2748
			error = -ENOTEMPTY;
D
Dave Chinner 已提交
2749 2750 2751
			goto out_trans_cancel;
		}
		if (!xfs_dir_isempty(ip)) {
D
Dave Chinner 已提交
2752
			error = -ENOTEMPTY;
D
Dave Chinner 已提交
2753 2754 2755
			goto out_trans_cancel;
		}

2756
		/* Drop the link from ip's "..".  */
D
Dave Chinner 已提交
2757 2758
		error = xfs_droplink(tp, dp);
		if (error)
2759
			goto out_trans_cancel;
D
Dave Chinner 已提交
2760

2761
		/* Drop the "." link from ip to self.  */
D
Dave Chinner 已提交
2762 2763
		error = xfs_droplink(tp, ip);
		if (error)
2764
			goto out_trans_cancel;
D
Dave Chinner 已提交
2765 2766 2767 2768 2769 2770 2771 2772
	} else {
		/*
		 * When removing a non-directory we need to log the parent
		 * inode here.  For a directory this is done implicitly
		 * by the xfs_droplink call for the ".." entry.
		 */
		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
	}
2773
	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
D
Dave Chinner 已提交
2774

2775
	/* Drop the link from dp to ip. */
D
Dave Chinner 已提交
2776 2777
	error = xfs_droplink(tp, ip);
	if (error)
2778
		goto out_trans_cancel;
D
Dave Chinner 已提交
2779

2780
	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2781
	if (error) {
D
Dave Chinner 已提交
2782
		ASSERT(error != -ENOENT);
2783
		goto out_trans_cancel;
2784 2785
	}

D
Dave Chinner 已提交
2786 2787 2788 2789 2790 2791 2792 2793
	/*
	 * If this is a synchronous mount, make sure that the
	 * remove transaction goes to disk before returning to
	 * the user.
	 */
	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
		xfs_trans_set_sync(tp);

2794
	error = xfs_trans_commit(tp);
D
Dave Chinner 已提交
2795 2796 2797
	if (error)
		goto std_return;

2798
	if (is_dir && xfs_inode_is_filestream(ip))
D
Dave Chinner 已提交
2799 2800 2801 2802 2803
		xfs_filestream_deassociate(ip);

	return 0;

 out_trans_cancel:
2804
	xfs_trans_cancel(tp);
D
Dave Chinner 已提交
2805 2806 2807 2808
 std_return:
	return error;
}

D
Dave Chinner 已提交
2809 2810 2811
/*
 * Enter all inodes for a rename transaction into a sorted array.
 */
2812
#define __XFS_SORT_INODES	5
D
Dave Chinner 已提交
2813 2814
STATIC void
xfs_sort_for_rename(
2815 2816 2817 2818 2819 2820 2821
	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
	struct xfs_inode	*ip1,	/* in: inode of old entry */
	struct xfs_inode	*ip2,	/* in: inode of new entry */
	struct xfs_inode	*wip,	/* in: whiteout inode */
	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
	int			*num_inodes)  /* in/out: inodes in array */
D
Dave Chinner 已提交
2822 2823 2824
{
	int			i, j;

2825 2826 2827
	ASSERT(*num_inodes == __XFS_SORT_INODES);
	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));

D
Dave Chinner 已提交
2828 2829 2830 2831 2832 2833 2834
	/*
	 * i_tab contains a list of pointers to inodes.  We initialize
	 * the table here & we'll sort it.  We will then use it to
	 * order the acquisition of the inode locks.
	 *
	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
	 */
2835 2836 2837 2838 2839 2840 2841 2842 2843
	i = 0;
	i_tab[i++] = dp1;
	i_tab[i++] = dp2;
	i_tab[i++] = ip1;
	if (ip2)
		i_tab[i++] = ip2;
	if (wip)
		i_tab[i++] = wip;
	*num_inodes = i;
D
Dave Chinner 已提交
2844 2845 2846

	/*
	 * Sort the elements via bubble sort.  (Remember, there are at
2847
	 * most 5 elements to sort, so this is adequate.)
D
Dave Chinner 已提交
2848 2849 2850 2851
	 */
	for (i = 0; i < *num_inodes; i++) {
		for (j = 1; j < *num_inodes; j++) {
			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2852
				struct xfs_inode *temp = i_tab[j];
D
Dave Chinner 已提交
2853 2854 2855 2856 2857 2858 2859
				i_tab[j] = i_tab[j-1];
				i_tab[j-1] = temp;
			}
		}
	}
}

2860 2861
static int
xfs_finish_rename(
2862
	struct xfs_trans	*tp)
2863 2864 2865 2866 2867 2868 2869 2870
{
	/*
	 * If this is a synchronous mount, make sure that the rename transaction
	 * goes to disk before returning to the user.
	 */
	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
		xfs_trans_set_sync(tp);

2871
	return xfs_trans_commit(tp);
2872 2873
}

2874 2875 2876
/*
 * xfs_cross_rename()
 *
2877
 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
 */
STATIC int
xfs_cross_rename(
	struct xfs_trans	*tp,
	struct xfs_inode	*dp1,
	struct xfs_name		*name1,
	struct xfs_inode	*ip1,
	struct xfs_inode	*dp2,
	struct xfs_name		*name2,
	struct xfs_inode	*ip2,
	int			spaceres)
{
	int		error = 0;
	int		ip1_flags = 0;
	int		ip2_flags = 0;
	int		dp2_flags = 0;

	/* Swap inode number for dirent in first parent */
2896
	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2897
	if (error)
2898
		goto out_trans_abort;
2899 2900

	/* Swap inode number for dirent in second parent */
2901
	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2902
	if (error)
2903
		goto out_trans_abort;
2904 2905 2906 2907 2908 2909 2910 2911 2912

	/*
	 * If we're renaming one or more directories across different parents,
	 * update the respective ".." entries (and link counts) to match the new
	 * parents.
	 */
	if (dp1 != dp2) {
		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;

D
Dave Chinner 已提交
2913
		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2914
			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2915
						dp1->i_ino, spaceres);
2916
			if (error)
2917
				goto out_trans_abort;
2918 2919

			/* transfer ip2 ".." reference to dp1 */
D
Dave Chinner 已提交
2920
			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2921 2922
				error = xfs_droplink(tp, dp2);
				if (error)
2923
					goto out_trans_abort;
2924
				xfs_bumplink(tp, dp1);
2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936
			}

			/*
			 * Although ip1 isn't changed here, userspace needs
			 * to be warned about the change, so that applications
			 * relying on it (like backup ones), will properly
			 * notify the change
			 */
			ip1_flags |= XFS_ICHGTIME_CHG;
			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
		}

D
Dave Chinner 已提交
2937
		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2938
			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2939
						dp2->i_ino, spaceres);
2940
			if (error)
2941
				goto out_trans_abort;
2942 2943

			/* transfer ip1 ".." reference to dp2 */
D
Dave Chinner 已提交
2944
			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2945 2946
				error = xfs_droplink(tp, dp1);
				if (error)
2947
					goto out_trans_abort;
2948
				xfs_bumplink(tp, dp2);
2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975
			}

			/*
			 * Although ip2 isn't changed here, userspace needs
			 * to be warned about the change, so that applications
			 * relying on it (like backup ones), will properly
			 * notify the change
			 */
			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
			ip2_flags |= XFS_ICHGTIME_CHG;
		}
	}

	if (ip1_flags) {
		xfs_trans_ichgtime(tp, ip1, ip1_flags);
		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
	}
	if (ip2_flags) {
		xfs_trans_ichgtime(tp, ip2, ip2_flags);
		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
	}
	if (dp2_flags) {
		xfs_trans_ichgtime(tp, dp2, dp2_flags);
		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
	}
	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2976
	return xfs_finish_rename(tp);
2977 2978

out_trans_abort:
2979
	xfs_trans_cancel(tp);
2980 2981 2982
	return error;
}

D
Dave Chinner 已提交
2983 2984 2985
/*
 * xfs_rename_alloc_whiteout()
 *
2986
 * Return a referenced, unlinked, unlocked inode that can be used as a
D
Dave Chinner 已提交
2987 2988 2989 2990 2991 2992
 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
 * crash between allocating the inode and linking it into the rename transaction
 * recovery will free the inode and we won't leak it.
 */
static int
xfs_rename_alloc_whiteout(
C
Christoph Hellwig 已提交
2993
	struct user_namespace	*mnt_userns,
D
Dave Chinner 已提交
2994 2995 2996 2997 2998 2999
	struct xfs_inode	*dp,
	struct xfs_inode	**wip)
{
	struct xfs_inode	*tmpfile;
	int			error;

C
Christoph Hellwig 已提交
3000 3001
	error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
				   &tmpfile);
D
Dave Chinner 已提交
3002 3003 3004
	if (error)
		return error;

3005 3006
	/*
	 * Prepare the tmpfile inode as if it were created through the VFS.
3007 3008
	 * Complete the inode setup and flag it as linkable.  nlink is already
	 * zero, so we can skip the drop_nlink.
3009
	 */
3010
	xfs_setup_iops(tmpfile);
D
Dave Chinner 已提交
3011 3012 3013 3014 3015 3016 3017
	xfs_finish_inode_setup(tmpfile);
	VFS_I(tmpfile)->i_state |= I_LINKABLE;

	*wip = tmpfile;
	return 0;
}

D
Dave Chinner 已提交
3018 3019 3020 3021 3022
/*
 * xfs_rename
 */
int
xfs_rename(
C
Christoph Hellwig 已提交
3023
	struct user_namespace	*mnt_userns,
D
Dave Chinner 已提交
3024 3025 3026 3027 3028 3029 3030
	struct xfs_inode	*src_dp,
	struct xfs_name		*src_name,
	struct xfs_inode	*src_ip,
	struct xfs_inode	*target_dp,
	struct xfs_name		*target_name,
	struct xfs_inode	*target_ip,
	unsigned int		flags)
D
Dave Chinner 已提交
3031
{
D
Dave Chinner 已提交
3032 3033 3034 3035
	struct xfs_mount	*mp = src_dp->i_mount;
	struct xfs_trans	*tp;
	struct xfs_inode	*wip = NULL;		/* whiteout inode */
	struct xfs_inode	*inodes[__XFS_SORT_INODES];
3036
	int			i;
D
Dave Chinner 已提交
3037
	int			num_inodes = __XFS_SORT_INODES;
3038
	bool			new_parent = (src_dp != target_dp);
D
Dave Chinner 已提交
3039
	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
D
Dave Chinner 已提交
3040 3041
	int			spaceres;
	int			error;
D
Dave Chinner 已提交
3042 3043 3044

	trace_xfs_rename(src_dp, target_dp, src_name, target_name);

3045 3046 3047
	if ((flags & RENAME_EXCHANGE) && !target_ip)
		return -EINVAL;

D
Dave Chinner 已提交
3048 3049 3050 3051 3052 3053 3054
	/*
	 * If we are doing a whiteout operation, allocate the whiteout inode
	 * we will be placing at the target and ensure the type is set
	 * appropriately.
	 */
	if (flags & RENAME_WHITEOUT) {
		ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
C
Christoph Hellwig 已提交
3055
		error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
D
Dave Chinner 已提交
3056 3057 3058 3059 3060 3061
		if (error)
			return error;

		/* setup target dirent info as whiteout */
		src_name->type = XFS_DIR3_FT_CHRDEV;
	}
D
Dave Chinner 已提交
3062

D
Dave Chinner 已提交
3063
	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
D
Dave Chinner 已提交
3064 3065 3066
				inodes, &num_inodes);

	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3067
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
D
Dave Chinner 已提交
3068
	if (error == -ENOSPC) {
D
Dave Chinner 已提交
3069
		spaceres = 0;
3070 3071
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
				&tp);
D
Dave Chinner 已提交
3072
	}
3073
	if (error)
3074
		goto out_release_wip;
D
Dave Chinner 已提交
3075 3076 3077 3078 3079

	/*
	 * Attach the dquots to the inodes
	 */
	error = xfs_qm_vop_rename_dqattach(inodes);
3080 3081
	if (error)
		goto out_trans_cancel;
D
Dave Chinner 已提交
3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095

	/*
	 * Lock all the participating inodes. Depending upon whether
	 * the target_name exists in the target directory, and
	 * whether the target directory is the same as the source
	 * directory, we can lock from 2 to 4 inodes.
	 */
	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);

	/*
	 * Join all the inodes to the transaction. From this point on,
	 * we can rely on either trans_commit or trans_cancel to unlock
	 * them.
	 */
3096
	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
3097
	if (new_parent)
3098
		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
3099 3100 3101
	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
	if (target_ip)
		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
3102 3103
	if (wip)
		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
D
Dave Chinner 已提交
3104 3105 3106 3107 3108 3109

	/*
	 * If we are using project inheritance, we only allow renames
	 * into our tree when the project IDs are the same; else the
	 * tree quota mechanism would be circumvented.
	 */
3110
	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
3111
		     target_dp->i_projid != src_ip->i_projid)) {
D
Dave Chinner 已提交
3112
		error = -EXDEV;
3113
		goto out_trans_cancel;
D
Dave Chinner 已提交
3114 3115
	}

3116 3117 3118 3119
	/* RENAME_EXCHANGE is unique from here on. */
	if (flags & RENAME_EXCHANGE)
		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
					target_dp, target_name, target_ip,
3120
					spaceres);
3121

D
Dave Chinner 已提交
3122
	/*
3123 3124
	 * Check for expected errors before we dirty the transaction
	 * so we can return an error without a transaction abort.
3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153
	 *
	 * Extent count overflow check:
	 *
	 * From the perspective of src_dp, a rename operation is essentially a
	 * directory entry remove operation. Hence the only place where we check
	 * for extent count overflow for src_dp is in
	 * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns
	 * -ENOSPC when it detects a possible extent count overflow and in
	 * response, the higher layers of directory handling code do the
	 * following:
	 * 1. Data/Free blocks: XFS lets these blocks linger until a
	 *    future remove operation removes them.
	 * 2. Dabtree blocks: XFS swaps the blocks with the last block in the
	 *    Leaf space and unmaps the last block.
	 *
	 * For target_dp, there are two cases depending on whether the
	 * destination directory entry exists or not.
	 *
	 * When destination directory entry does not exist (i.e. target_ip ==
	 * NULL), extent count overflow check is performed only when transaction
	 * has a non-zero sized space reservation associated with it.  With a
	 * zero-sized space reservation, XFS allows a rename operation to
	 * continue only when the directory has sufficient free space in its
	 * data/leaf/free space blocks to hold the new entry.
	 *
	 * When destination directory entry exists (i.e. target_ip != NULL), all
	 * we need to do is change the inode number associated with the already
	 * existing entry. Hence there is no need to perform an extent count
	 * overflow check.
D
Dave Chinner 已提交
3154 3155 3156 3157 3158 3159
	 */
	if (target_ip == NULL) {
		/*
		 * If there's no space reservation, check the entry will
		 * fit before actually inserting it.
		 */
3160 3161 3162
		if (!spaceres) {
			error = xfs_dir_canenter(tp, target_dp, target_name);
			if (error)
3163
				goto out_trans_cancel;
3164 3165 3166 3167 3168 3169
		} else {
			error = xfs_iext_count_may_overflow(target_dp,
					XFS_DATA_FORK,
					XFS_IEXT_DIR_MANIP_CNT(mp));
			if (error)
				goto out_trans_cancel;
3170
		}
3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183
	} else {
		/*
		 * If target exists and it's a directory, check that whether
		 * it can be destroyed.
		 */
		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
		    (!xfs_dir_isempty(target_ip) ||
		     (VFS_I(target_ip)->i_nlink > 2))) {
			error = -EEXIST;
			goto out_trans_cancel;
		}
	}

3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207
	/*
	 * Lock the AGI buffers we need to handle bumping the nlink of the
	 * whiteout inode off the unlinked list and to handle dropping the
	 * nlink of the target inode.  Per locking order rules, do this in
	 * increasing AG order and before directory block allocation tries to
	 * grab AGFs because we grab AGIs before AGFs.
	 *
	 * The (vfs) caller must ensure that if src is a directory then
	 * target_ip is either null or an empty directory.
	 */
	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
		if (inodes[i] == wip ||
		    (inodes[i] == target_ip &&
		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
			struct xfs_buf	*bp;
			xfs_agnumber_t	agno;

			agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
			error = xfs_read_agi(mp, tp, agno, &bp);
			if (error)
				goto out_trans_cancel;
		}
	}

3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233
	/*
	 * Directory entry creation below may acquire the AGF. Remove
	 * the whiteout from the unlinked list first to preserve correct
	 * AGI/AGF locking order. This dirties the transaction so failures
	 * after this point will abort and log recovery will clean up the
	 * mess.
	 *
	 * For whiteouts, we need to bump the link count on the whiteout
	 * inode. After this point, we have a real link, clear the tmpfile
	 * state flag from the inode so it doesn't accidentally get misused
	 * in future.
	 */
	if (wip) {
		ASSERT(VFS_I(wip)->i_nlink == 0);
		error = xfs_iunlink_remove(tp, wip);
		if (error)
			goto out_trans_cancel;

		xfs_bumplink(tp, wip);
		VFS_I(wip)->i_state &= ~I_LINKABLE;
	}

	/*
	 * Set up the target.
	 */
	if (target_ip == NULL) {
D
Dave Chinner 已提交
3234 3235 3236 3237 3238 3239
		/*
		 * If target does not exist and the rename crosses
		 * directories, adjust the target directory link count
		 * to account for the ".." reference from the new entry.
		 */
		error = xfs_dir_createname(tp, target_dp, target_name,
3240
					   src_ip->i_ino, spaceres);
D
Dave Chinner 已提交
3241
		if (error)
3242
			goto out_trans_cancel;
D
Dave Chinner 已提交
3243 3244 3245 3246 3247

		xfs_trans_ichgtime(tp, target_dp,
					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);

		if (new_parent && src_is_directory) {
3248
			xfs_bumplink(tp, target_dp);
D
Dave Chinner 已提交
3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260
		}
	} else { /* target_ip != NULL */
		/*
		 * Link the source inode under the target name.
		 * If the source inode is a directory and we are moving
		 * it across directories, its ".." entry will be
		 * inconsistent until we replace that down below.
		 *
		 * In case there is already an entry with the same
		 * name at the destination directory, remove it first.
		 */
		error = xfs_dir_replace(tp, target_dp, target_name,
3261
					src_ip->i_ino, spaceres);
D
Dave Chinner 已提交
3262
		if (error)
3263
			goto out_trans_cancel;
D
Dave Chinner 已提交
3264 3265 3266 3267 3268 3269 3270 3271 3272 3273

		xfs_trans_ichgtime(tp, target_dp,
					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);

		/*
		 * Decrement the link count on the target since the target
		 * dir no longer points to it.
		 */
		error = xfs_droplink(tp, target_ip);
		if (error)
3274
			goto out_trans_cancel;
D
Dave Chinner 已提交
3275 3276 3277 3278 3279 3280 3281

		if (src_is_directory) {
			/*
			 * Drop the link from the old "." entry.
			 */
			error = xfs_droplink(tp, target_ip);
			if (error)
3282
				goto out_trans_cancel;
D
Dave Chinner 已提交
3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294
		}
	} /* target_ip != NULL */

	/*
	 * Remove the source.
	 */
	if (new_parent && src_is_directory) {
		/*
		 * Rewrite the ".." entry to point to the new
		 * directory.
		 */
		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3295
					target_dp->i_ino, spaceres);
D
Dave Chinner 已提交
3296
		ASSERT(error != -EEXIST);
D
Dave Chinner 已提交
3297
		if (error)
3298
			goto out_trans_cancel;
D
Dave Chinner 已提交
3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323
	}

	/*
	 * We always want to hit the ctime on the source inode.
	 *
	 * This isn't strictly required by the standards since the source
	 * inode isn't really being changed, but old unix file systems did
	 * it and some incremental backup programs won't work without it.
	 */
	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);

	/*
	 * Adjust the link count on src_dp.  This is necessary when
	 * renaming a directory, either within one parent when
	 * the target existed, or across two parent directories.
	 */
	if (src_is_directory && (new_parent || target_ip != NULL)) {

		/*
		 * Decrement link count on src_directory since the
		 * entry that's moved no longer points to it.
		 */
		error = xfs_droplink(tp, src_dp);
		if (error)
3324
			goto out_trans_cancel;
D
Dave Chinner 已提交
3325 3326
	}

D
Dave Chinner 已提交
3327 3328 3329 3330 3331 3332 3333
	/*
	 * For whiteouts, we only need to update the source dirent with the
	 * inode number of the whiteout inode rather than removing it
	 * altogether.
	 */
	if (wip) {
		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3334
					spaceres);
3335 3336 3337 3338 3339 3340
	} else {
		/*
		 * NOTE: We don't need to check for extent count overflow here
		 * because the dir remove name code will leave the dir block in
		 * place if the extent count would overflow.
		 */
D
Dave Chinner 已提交
3341
		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3342
					   spaceres);
3343 3344
	}

D
Dave Chinner 已提交
3345
	if (error)
3346
		goto out_trans_cancel;
D
Dave Chinner 已提交
3347 3348 3349 3350 3351 3352

	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
	if (new_parent)
		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);

3353
	error = xfs_finish_rename(tp);
D
Dave Chinner 已提交
3354
	if (wip)
3355
		xfs_irele(wip);
D
Dave Chinner 已提交
3356
	return error;
D
Dave Chinner 已提交
3357

3358
out_trans_cancel:
3359
	xfs_trans_cancel(tp);
3360
out_release_wip:
D
Dave Chinner 已提交
3361
	if (wip)
3362
		xfs_irele(wip);
D
Dave Chinner 已提交
3363 3364 3365
	return error;
}

D
Dave Chinner 已提交
3366 3367
static int
xfs_iflush(
3368 3369
	struct xfs_inode	*ip,
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
3370
{
3371 3372 3373
	struct xfs_inode_log_item *iip = ip->i_itemp;
	struct xfs_dinode	*dip;
	struct xfs_mount	*mp = ip->i_mount;
3374
	int			error;
L
Linus Torvalds 已提交
3375

C
Christoph Hellwig 已提交
3376
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3377
	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3378
	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3379
	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3380
	ASSERT(iip->ili_item.li_buf == bp);
L
Linus Torvalds 已提交
3381

3382
	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
L
Linus Torvalds 已提交
3383

3384 3385 3386 3387 3388 3389 3390
	/*
	 * We don't flush the inode if any of the following checks fail, but we
	 * do still update the log item and attach to the backing buffer as if
	 * the flush happened. This is a formality to facilitate predictable
	 * error handling as the caller will shutdown and fail the buffer.
	 */
	error = -EFSCORRUPTED;
3391
	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3392
			       mp, XFS_ERRTAG_IFLUSH_1)) {
3393
		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3394
			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3395
			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3396
		goto flush_out;
L
Linus Torvalds 已提交
3397
	}
D
Dave Chinner 已提交
3398
	if (S_ISREG(VFS_I(ip)->i_mode)) {
L
Linus Torvalds 已提交
3399
		if (XFS_TEST_ERROR(
3400 3401
		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3402
		    mp, XFS_ERRTAG_IFLUSH_3)) {
3403
			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3404
				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
3405
				__func__, ip->i_ino, ip);
3406
			goto flush_out;
L
Linus Torvalds 已提交
3407
		}
D
Dave Chinner 已提交
3408
	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
L
Linus Torvalds 已提交
3409
		if (XFS_TEST_ERROR(
3410 3411 3412
		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3413
		    mp, XFS_ERRTAG_IFLUSH_4)) {
3414
			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3415
				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
3416
				__func__, ip->i_ino, ip);
3417
			goto flush_out;
L
Linus Torvalds 已提交
3418 3419
		}
	}
3420
	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3421
				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3422 3423
		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
			"%s: detected corrupt incore inode %Lu, "
3424
			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3425
			__func__, ip->i_ino,
3426
			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3427
			ip->i_nblocks, ip);
3428
		goto flush_out;
L
Linus Torvalds 已提交
3429
	}
3430
	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3431
				mp, XFS_ERRTAG_IFLUSH_6)) {
3432
		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3433
			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3434
			__func__, ip->i_ino, ip->i_forkoff, ip);
3435
		goto flush_out;
L
Linus Torvalds 已提交
3436
	}
3437

L
Linus Torvalds 已提交
3438
	/*
3439 3440 3441 3442 3443 3444
	 * Inode item log recovery for v2 inodes are dependent on the flushiter
	 * count for correct sequencing.  We bump the flush iteration count so
	 * we can detect flushes which postdate a log record during recovery.
	 * This is redundant as we now log every change and hence this can't
	 * happen but we need to still do it to ensure backwards compatibility
	 * with old kernels that predate logging all inode changes.
L
Linus Torvalds 已提交
3445
	 */
3446
	if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3447
		ip->i_flushiter++;
L
Linus Torvalds 已提交
3448

3449 3450 3451 3452
	/*
	 * If there are inline format data / attr forks attached to this inode,
	 * make sure they are not corrupt.
	 */
3453
	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3454 3455
	    xfs_ifork_verify_local_data(ip))
		goto flush_out;
3456
	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
3457
	    xfs_ifork_verify_local_attr(ip))
3458
		goto flush_out;
3459

L
Linus Torvalds 已提交
3460
	/*
3461 3462 3463
	 * Copy the dirty parts of the inode into the on-disk inode.  We always
	 * copy out the core of the inode, because if the inode is dirty at all
	 * the core must be.
L
Linus Torvalds 已提交
3464
	 */
3465
	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
L
Linus Torvalds 已提交
3466 3467

	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3468 3469 3470 3471
	if (!xfs_sb_version_has_v3inode(&mp->m_sb)) {
		if (ip->i_flushiter == DI_MAX_FLUSH)
			ip->i_flushiter = 0;
	}
L
Linus Torvalds 已提交
3472

3473 3474 3475
	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
	if (XFS_IFORK_Q(ip))
		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
L
Linus Torvalds 已提交
3476 3477

	/*
3478 3479 3480 3481 3482 3483 3484
	 * We've recorded everything logged in the inode, so we'd like to clear
	 * the ili_fields bits so we don't log and flush things unnecessarily.
	 * However, we can't stop logging all this information until the data
	 * we've copied into the disk buffer is written to disk.  If we did we
	 * might overwrite the copy of the inode in the log with all the data
	 * after re-logging only part of it, and in the face of a crash we
	 * wouldn't have all the data we need to recover.
L
Linus Torvalds 已提交
3485
	 *
3486 3487
	 * What we do is move the bits to the ili_last_fields field.  When
	 * logging the inode, these bits are moved back to the ili_fields field.
3488 3489
	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
	 * we know that the information those bits represent is permanently on
3490 3491
	 * disk.  As long as the flush completes before the inode is logged
	 * again, then both ili_fields and ili_last_fields will be cleared.
L
Linus Torvalds 已提交
3492
	 */
3493 3494
	error = 0;
flush_out:
D
Dave Chinner 已提交
3495
	spin_lock(&iip->ili_lock);
3496 3497
	iip->ili_last_fields = iip->ili_fields;
	iip->ili_fields = 0;
3498
	iip->ili_fsync_fields = 0;
D
Dave Chinner 已提交
3499
	spin_unlock(&iip->ili_lock);
L
Linus Torvalds 已提交
3500

D
Dave Chinner 已提交
3501 3502
	/*
	 * Store the current LSN of the inode so that we can tell whether the
3503
	 * item has moved in the AIL from xfs_buf_inode_iodone().
D
Dave Chinner 已提交
3504
	 */
3505 3506
	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
				&iip->ili_item.li_lsn);
L
Linus Torvalds 已提交
3507

3508 3509
	/* generate the checksum. */
	xfs_dinode_calc_crc(mp, dip);
3510
	return error;
L
Linus Torvalds 已提交
3511
}
3512

D
Dave Chinner 已提交
3513 3514 3515 3516 3517 3518 3519
/*
 * Non-blocking flush of dirty inode metadata into the backing buffer.
 *
 * The caller must have a reference to the inode and hold the cluster buffer
 * locked. The function will walk across all the inodes on the cluster buffer it
 * can find and lock without blocking, and flush them to the cluster buffer.
 *
3520 3521 3522 3523 3524
 * On successful flushing of at least one inode, the caller must write out the
 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
 * the caller needs to release the buffer. On failure, the filesystem will be
 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
 * will be returned.
D
Dave Chinner 已提交
3525 3526 3527 3528 3529
 */
int
xfs_iflush_cluster(
	struct xfs_buf		*bp)
{
3530 3531 3532 3533
	struct xfs_mount	*mp = bp->b_mount;
	struct xfs_log_item	*lip, *n;
	struct xfs_inode	*ip;
	struct xfs_inode_log_item *iip;
D
Dave Chinner 已提交
3534
	int			clcount = 0;
3535
	int			error = 0;
D
Dave Chinner 已提交
3536

3537 3538 3539 3540 3541 3542 3543
	/*
	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
	 * can remove itself from the list.
	 */
	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
		iip = (struct xfs_inode_log_item *)lip;
		ip = iip->ili_inode;
D
Dave Chinner 已提交
3544 3545

		/*
3546
		 * Quick and dirty check to avoid locks if possible.
D
Dave Chinner 已提交
3547
		 */
3548
		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3549 3550
			continue;
		if (xfs_ipincount(ip))
D
Dave Chinner 已提交
3551 3552 3553
			continue;

		/*
3554 3555 3556 3557 3558
		 * The inode is still attached to the buffer, which means it is
		 * dirty but reclaim might try to grab it. Check carefully for
		 * that, and grab the ilock while still holding the i_flags_lock
		 * to guarantee reclaim will not be able to reclaim this inode
		 * once we drop the i_flags_lock.
D
Dave Chinner 已提交
3559
		 */
3560 3561
		spin_lock(&ip->i_flags_lock);
		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3562
		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3563 3564
			spin_unlock(&ip->i_flags_lock);
			continue;
D
Dave Chinner 已提交
3565 3566 3567
		}

		/*
3568 3569
		 * ILOCK will pin the inode against reclaim and prevent
		 * concurrent transactions modifying the inode while we are
3570 3571
		 * flushing the inode. If we get the lock, set the flushing
		 * state before we drop the i_flags_lock.
D
Dave Chinner 已提交
3572
		 */
3573 3574
		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
			spin_unlock(&ip->i_flags_lock);
D
Dave Chinner 已提交
3575
			continue;
3576
		}
3577
		__xfs_iflags_set(ip, XFS_IFLUSHING);
3578
		spin_unlock(&ip->i_flags_lock);
D
Dave Chinner 已提交
3579 3580

		/*
3581 3582 3583 3584 3585
		 * Abort flushing this inode if we are shut down because the
		 * inode may not currently be in the AIL. This can occur when
		 * log I/O failure unpins the inode without inserting into the
		 * AIL, leaving a dirty/unpinned inode attached to the buffer
		 * that otherwise looks like it should be flushed.
D
Dave Chinner 已提交
3586
		 */
3587 3588 3589 3590 3591
		if (XFS_FORCED_SHUTDOWN(mp)) {
			xfs_iunpin_wait(ip);
			xfs_iflush_abort(ip);
			xfs_iunlock(ip, XFS_ILOCK_SHARED);
			error = -EIO;
D
Dave Chinner 已提交
3592 3593 3594
			continue;
		}

3595 3596
		/* don't block waiting on a log force to unpin dirty inodes */
		if (xfs_ipincount(ip)) {
3597
			xfs_iflags_clear(ip, XFS_IFLUSHING);
3598 3599
			xfs_iunlock(ip, XFS_ILOCK_SHARED);
			continue;
D
Dave Chinner 已提交
3600 3601
		}

3602 3603 3604
		if (!xfs_inode_clean(ip))
			error = xfs_iflush(ip, bp);
		else
3605
			xfs_iflags_clear(ip, XFS_IFLUSHING);
3606 3607 3608 3609
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		if (error)
			break;
		clcount++;
D
Dave Chinner 已提交
3610 3611 3612 3613 3614 3615
	}

	if (error) {
		bp->b_flags |= XBF_ASYNC;
		xfs_buf_ioend_fail(bp);
		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3616
		return error;
D
Dave Chinner 已提交
3617
	}
3618 3619 3620 3621 3622 3623 3624 3625

	if (!clcount)
		return -EAGAIN;

	XFS_STATS_INC(mp, xs_icluster_flushcnt);
	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
	return 0;

D
Dave Chinner 已提交
3626 3627
}

3628 3629 3630 3631 3632 3633 3634 3635
/* Release an inode. */
void
xfs_irele(
	struct xfs_inode	*ip)
{
	trace_xfs_irele(ip, _RET_IP_);
	iput(VFS_I(ip));
}
3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654

/*
 * Ensure all commited transactions touching the inode are written to the log.
 */
int
xfs_log_force_inode(
	struct xfs_inode	*ip)
{
	xfs_lsn_t		lsn = 0;

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	if (xfs_ipincount(ip))
		lsn = ip->i_itemp->ili_last_lsn;
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	if (!lsn)
		return 0;
	return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
}
3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747

/*
 * Grab the exclusive iolock for a data copy from src to dest, making sure to
 * abide vfs locking order (lowest pointer value goes first) and breaking the
 * layout leases before proceeding.  The loop is needed because we cannot call
 * the blocking break_layout() with the iolocks held, and therefore have to
 * back out both locks.
 */
static int
xfs_iolock_two_inodes_and_break_layout(
	struct inode		*src,
	struct inode		*dest)
{
	int			error;

	if (src > dest)
		swap(src, dest);

retry:
	/* Wait to break both inodes' layouts before we start locking. */
	error = break_layout(src, true);
	if (error)
		return error;
	if (src != dest) {
		error = break_layout(dest, true);
		if (error)
			return error;
	}

	/* Lock one inode and make sure nobody got in and leased it. */
	inode_lock(src);
	error = break_layout(src, false);
	if (error) {
		inode_unlock(src);
		if (error == -EWOULDBLOCK)
			goto retry;
		return error;
	}

	if (src == dest)
		return 0;

	/* Lock the other inode and make sure nobody got in and leased it. */
	inode_lock_nested(dest, I_MUTEX_NONDIR2);
	error = break_layout(dest, false);
	if (error) {
		inode_unlock(src);
		inode_unlock(dest);
		if (error == -EWOULDBLOCK)
			goto retry;
		return error;
	}

	return 0;
}

/*
 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
 * mmap activity.
 */
int
xfs_ilock2_io_mmap(
	struct xfs_inode	*ip1,
	struct xfs_inode	*ip2)
{
	int			ret;

	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
	if (ret)
		return ret;
	if (ip1 == ip2)
		xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
	else
		xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
				    ip2, XFS_MMAPLOCK_EXCL);
	return 0;
}

/* Unlock both inodes to allow IO and mmap activity. */
void
xfs_iunlock2_io_mmap(
	struct xfs_inode	*ip1,
	struct xfs_inode	*ip2)
{
	bool			same_inode = (ip1 == ip2);

	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
	if (!same_inode)
		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
	inode_unlock(VFS_I(ip2));
	if (!same_inode)
		inode_unlock(VFS_I(ip1));
}