xfs_sync.c 28.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
25
#include "xfs_trans_priv.h"
26 27 28 29 30 31 32 33 34 35
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_dinode.h"
#include "xfs_error.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
#include "xfs_inode_item.h"
C
Christoph Hellwig 已提交
36
#include "xfs_quota.h"
C
Christoph Hellwig 已提交
37
#include "xfs_trace.h"
38
#include "xfs_fsops.h"
39

40 41 42
#include <linux/kthread.h>
#include <linux/freezer.h>

43 44
struct workqueue_struct	*xfs_syncd_wq;	/* sync workqueue */

45 46 47 48 49 50 51 52
/*
 * The inode lookup is done in batches to keep the amount of lock traffic and
 * radix tree lookups to a minimum. The batch size is a trade off between
 * lookup reduction and stack usage. This is in the reclaim path, so we can't
 * be too greedy.
 */
#define XFS_LOOKUP_BATCH	32

53 54 55 56 57 58
STATIC int
xfs_inode_ag_walk_grab(
	struct xfs_inode	*ip)
{
	struct inode		*inode = VFS_I(ip);

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
	ASSERT(rcu_read_lock_held());

	/*
	 * check for stale RCU freed inode
	 *
	 * If the inode has been reallocated, it doesn't matter if it's not in
	 * the AG we are walking - we are walking for writeback, so if it
	 * passes all the "valid inode" checks and is dirty, then we'll write
	 * it back anyway.  If it has been reallocated and still being
	 * initialised, the XFS_INEW check below will catch it.
	 */
	spin_lock(&ip->i_flags_lock);
	if (!ip->i_ino)
		goto out_unlock_noent;

	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
		goto out_unlock_noent;
	spin_unlock(&ip->i_flags_lock);

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
	/* nothing to sync during shutdown */
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return EFSCORRUPTED;

	/* If we can't grab the inode, it must on it's way to reclaim. */
	if (!igrab(inode))
		return ENOENT;

	if (is_bad_inode(inode)) {
		IRELE(ip);
		return ENOENT;
	}

	/* inode is valid */
	return 0;
94 95 96 97

out_unlock_noent:
	spin_unlock(&ip->i_flags_lock);
	return ENOENT;
98 99
}

100 101 102
STATIC int
xfs_inode_ag_walk(
	struct xfs_mount	*mp,
D
Dave Chinner 已提交
103
	struct xfs_perag	*pag,
104 105
	int			(*execute)(struct xfs_inode *ip,
					   struct xfs_perag *pag, int flags),
106
	int			flags)
107 108 109 110
{
	uint32_t		first_index;
	int			last_error = 0;
	int			skipped;
111
	int			done;
112
	int			nr_found;
113 114

restart:
115
	done = 0;
116 117
	skipped = 0;
	first_index = 0;
118
	nr_found = 0;
119
	do {
120
		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
121
		int		error = 0;
122
		int		i;
123

124
		rcu_read_lock();
125
		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
126 127
					(void **)batch, first_index,
					XFS_LOOKUP_BATCH);
128
		if (!nr_found) {
129
			rcu_read_unlock();
130
			break;
131
		}
132

133
		/*
134 135
		 * Grab the inodes before we drop the lock. if we found
		 * nothing, nr == 0 and the loop will be skipped.
136
		 */
137 138 139 140 141 142 143
		for (i = 0; i < nr_found; i++) {
			struct xfs_inode *ip = batch[i];

			if (done || xfs_inode_ag_walk_grab(ip))
				batch[i] = NULL;

			/*
144 145 146 147 148 149 150 151 152 153
			 * Update the index for the next lookup. Catch
			 * overflows into the next AG range which can occur if
			 * we have inodes in the last block of the AG and we
			 * are currently pointing to the last inode.
			 *
			 * Because we may see inodes that are from the wrong AG
			 * due to RCU freeing and reallocation, only update the
			 * index if it lies in this AG. It was a race that lead
			 * us to see this inode, so another lookup from the
			 * same index will not find it again.
154
			 */
155 156
			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
				continue;
157 158 159
			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
				done = 1;
160
		}
161 162

		/* unlock now we've grabbed the inodes. */
163
		rcu_read_unlock();
164

165 166 167 168 169 170 171 172 173 174 175
		for (i = 0; i < nr_found; i++) {
			if (!batch[i])
				continue;
			error = execute(batch[i], pag, flags);
			IRELE(batch[i]);
			if (error == EAGAIN) {
				skipped++;
				continue;
			}
			if (error && last_error != EFSCORRUPTED)
				last_error = error;
176
		}
177 178

		/* bail out if the filesystem is corrupted.  */
179 180 181
		if (error == EFSCORRUPTED)
			break;

182 183
		cond_resched();

184
	} while (nr_found && !done);
185 186 187 188 189 190 191 192

	if (skipped) {
		delay(1);
		goto restart;
	}
	return last_error;
}

193
int
194 195 196 197
xfs_inode_ag_iterator(
	struct xfs_mount	*mp,
	int			(*execute)(struct xfs_inode *ip,
					   struct xfs_perag *pag, int flags),
198
	int			flags)
199
{
200
	struct xfs_perag	*pag;
201 202 203 204
	int			error = 0;
	int			last_error = 0;
	xfs_agnumber_t		ag;

205
	ag = 0;
206 207 208
	while ((pag = xfs_perag_get(mp, ag))) {
		ag = pag->pag_agno + 1;
		error = xfs_inode_ag_walk(mp, pag, execute, flags);
D
Dave Chinner 已提交
209
		xfs_perag_put(pag);
210 211 212 213 214 215 216 217 218
		if (error) {
			last_error = error;
			if (error == EFSCORRUPTED)
				break;
		}
	}
	return XFS_ERROR(last_error);
}

219 220 221
STATIC int
xfs_sync_inode_data(
	struct xfs_inode	*ip,
222
	struct xfs_perag	*pag,
223 224 225 226 227 228 229
	int			flags)
{
	struct inode		*inode = VFS_I(ip);
	struct address_space *mapping = inode->i_mapping;
	int			error = 0;

	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
C
Christoph Hellwig 已提交
230
		return 0;
231 232 233

	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
		if (flags & SYNC_TRYLOCK)
C
Christoph Hellwig 已提交
234
			return 0;
235 236 237 238
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}

	error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
239
				0 : XBF_ASYNC, FI_NONE);
240 241 242 243
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
	return error;
}

244 245 246
STATIC int
xfs_sync_inode_attr(
	struct xfs_inode	*ip,
247
	struct xfs_perag	*pag,
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	int			flags)
{
	int			error = 0;

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	if (xfs_inode_clean(ip))
		goto out_unlock;
	if (!xfs_iflock_nowait(ip)) {
		if (!(flags & SYNC_WAIT))
			goto out_unlock;
		xfs_iflock(ip);
	}

	if (xfs_inode_clean(ip)) {
		xfs_ifunlock(ip);
		goto out_unlock;
	}

266
	error = xfs_iflush(ip, flags);
267

268 269 270 271 272 273 274 275 276 277
	/*
	 * We don't want to try again on non-blocking flushes that can't run
	 * again immediately. If an inode really must be written, then that's
	 * what the SYNC_WAIT flag is for.
	 */
	if (error == EAGAIN) {
		ASSERT(!(flags & SYNC_WAIT));
		error = 0;
	}

278 279 280 281 282
 out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	return error;
}

C
Christoph Hellwig 已提交
283 284 285
/*
 * Write out pagecache data for the whole filesystem.
 */
286
STATIC int
C
Christoph Hellwig 已提交
287 288 289
xfs_sync_data(
	struct xfs_mount	*mp,
	int			flags)
290
{
C
Christoph Hellwig 已提交
291
	int			error;
292

C
Christoph Hellwig 已提交
293
	ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
294

295
	error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
C
Christoph Hellwig 已提交
296 297
	if (error)
		return XFS_ERROR(error);
298

299
	xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
C
Christoph Hellwig 已提交
300 301
	return 0;
}
302

C
Christoph Hellwig 已提交
303 304 305
/*
 * Write out inode metadata (attributes) for the whole filesystem.
 */
306
STATIC int
C
Christoph Hellwig 已提交
307 308 309 310 311
xfs_sync_attr(
	struct xfs_mount	*mp,
	int			flags)
{
	ASSERT((flags & ~SYNC_WAIT) == 0);
312

313
	return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
314 315
}

316
STATIC int
317
xfs_sync_fsdata(
318
	struct xfs_mount	*mp)
319 320
{
	struct xfs_buf		*bp;
321
	int			error;
322 323

	/*
324 325 326 327 328 329
	 * If the buffer is pinned then push on the log so we won't get stuck
	 * waiting in the write for someone, maybe ourselves, to flush the log.
	 *
	 * Even though we just pushed the log above, we did not have the
	 * superblock buffer locked at that point so it can become pinned in
	 * between there and here.
330
	 */
331
	bp = xfs_getsb(mp, 0);
332
	if (xfs_buf_ispinned(bp))
333
		xfs_log_force(mp, 0);
334 335 336
	error = xfs_bwrite(bp);
	xfs_buf_relse(bp);
	return error;
337 338 339
}

/*
D
David Chinner 已提交
340 341 342 343 344 345 346 347 348 349 350
 * When remounting a filesystem read-only or freezing the filesystem, we have
 * two phases to execute. This first phase is syncing the data before we
 * quiesce the filesystem, and the second is flushing all the inodes out after
 * we've waited for all the transactions created by the first phase to
 * complete. The second phase ensures that the inodes are written to their
 * location on disk rather than just existing in transactions in the log. This
 * means after a quiesce there is no log replay required to write the inodes to
 * disk (this is the main difference between a sync and a quiesce).
 */
/*
 * First stage of freeze - no writers will make progress now we are here,
351 352
 * so we flush delwri and delalloc buffers here, then wait for all I/O to
 * complete.  Data is frozen at that point. Metadata is not frozen,
D
David Chinner 已提交
353 354
 * transactions can still occur here so don't bother flushing the buftarg
 * because it'll just get dirty again.
355 356 357 358 359
 */
int
xfs_quiesce_data(
	struct xfs_mount	*mp)
{
360
	int			error, error2 = 0;
361

C
Christoph Hellwig 已提交
362
	xfs_qm_sync(mp, SYNC_TRYLOCK);
C
Christoph Hellwig 已提交
363
	xfs_qm_sync(mp, SYNC_WAIT);
364

365 366 367
	/* force out the newly dirtied log buffers */
	xfs_log_force(mp, XFS_LOG_SYNC);

D
David Chinner 已提交
368
	/* write superblock and hoover up shutdown errors */
369 370 371 372 373 374 375
	error = xfs_sync_fsdata(mp);

	/* make sure all delwri buffers are written out */
	xfs_flush_buftarg(mp->m_ddev_targp, 1);

	/* mark the log as covered if needed */
	if (xfs_log_need_covered(mp))
376
		error2 = xfs_fs_log_dummy(mp);
377

D
David Chinner 已提交
378
	/* flush data-only devices */
379 380 381
	if (mp->m_rtdev_targp)
		XFS_bflush(mp->m_rtdev_targp);

382
	return error ? error : error2;
383 384
}

D
David Chinner 已提交
385 386 387 388 389 390
STATIC void
xfs_quiesce_fs(
	struct xfs_mount	*mp)
{
	int	count = 0, pincount;

391
	xfs_reclaim_inodes(mp, 0);
D
David Chinner 已提交
392 393 394 395 396 397
	xfs_flush_buftarg(mp->m_ddev_targp, 0);

	/*
	 * This loop must run at least twice.  The first instance of the loop
	 * will flush most meta data but that will generate more meta data
	 * (typically directory updates).  Which then must be flushed and
398 399
	 * logged before we can write the unmount record. We also so sync
	 * reclaim of inodes to catch any that the above delwri flush skipped.
D
David Chinner 已提交
400 401
	 */
	do {
402
		xfs_reclaim_inodes(mp, SYNC_WAIT);
C
Christoph Hellwig 已提交
403
		xfs_sync_attr(mp, SYNC_WAIT);
D
David Chinner 已提交
404 405 406 407 408 409 410 411 412 413 414
		pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
		if (!pincount) {
			delay(50);
			count++;
		}
	} while (count < 2);
}

/*
 * Second stage of a quiesce. The data is already synced, now we have to take
 * care of the metadata. New transactions are already blocked, so we need to
L
Lucas De Marchi 已提交
415
 * wait for any remaining transactions to drain out before proceeding.
D
David Chinner 已提交
416 417 418 419 420 421 422 423 424 425 426 427 428 429
 */
void
xfs_quiesce_attr(
	struct xfs_mount	*mp)
{
	int	error = 0;

	/* wait for all modifications to complete */
	while (atomic_read(&mp->m_active_trans) > 0)
		delay(100);

	/* flush inodes and push all remaining buffers out to disk */
	xfs_quiesce_fs(mp);

430 431 432 433 434
	/*
	 * Just warn here till VFS can correctly support
	 * read-only remount without racing.
	 */
	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
D
David Chinner 已提交
435 436

	/* Push the superblock and write an unmount record */
437
	error = xfs_log_sbcount(mp);
D
David Chinner 已提交
438
	if (error)
439
		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
D
David Chinner 已提交
440 441 442 443 444
				"Frozen image may not be consistent.");
	xfs_log_unmount_write(mp);
	xfs_unmountfs_writesb(mp);
}

445 446 447
static void
xfs_syncd_queue_sync(
	struct xfs_mount        *mp)
448
{
449 450
	queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
				msecs_to_jiffies(xfs_syncd_centisecs * 10));
451 452
}

453
/*
454 455
 * Every sync period we need to unpin all items, reclaim inodes and sync
 * disk quotas.  We might need to cover the log to indicate that the
456
 * filesystem is idle and not frozen.
457
 */
458 459
STATIC void
xfs_sync_worker(
460
	struct work_struct *work)
461
{
462 463
	struct xfs_mount *mp = container_of(to_delayed_work(work),
					struct xfs_mount, m_sync_work);
464 465
	int		error;

466 467
	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
		/* dgc: errors ignored here */
468 469
		if (mp->m_super->s_frozen == SB_UNFROZEN &&
		    xfs_log_need_covered(mp))
470 471 472 473
			error = xfs_fs_log_dummy(mp);
		else
			xfs_log_force(mp, 0);
		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
474 475 476

		/* start pushing all the metadata that is currently dirty */
		xfs_ail_push_all(mp->m_ail);
477
	}
478 479 480

	/* queue us up again */
	xfs_syncd_queue_sync(mp);
481 482
}

483 484 485 486 487 488 489 490 491 492
/*
 * Queue a new inode reclaim pass if there are reclaimable inodes and there
 * isn't a reclaim pass already in progress. By default it runs every 5s based
 * on the xfs syncd work default of 30s. Perhaps this should have it's own
 * tunable, but that can be done if this method proves to be ineffective or too
 * aggressive.
 */
static void
xfs_syncd_queue_reclaim(
	struct xfs_mount        *mp)
493 494
{

495 496 497 498 499 500 501
	/*
	 * We can have inodes enter reclaim after we've shut down the syncd
	 * workqueue during unmount, so don't allow reclaim work to be queued
	 * during unmount.
	 */
	if (!(mp->m_super->s_flags & MS_ACTIVE))
		return;
502

503 504 505 506
	rcu_read_lock();
	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
		queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
507
	}
508 509
	rcu_read_unlock();
}
510

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
/*
 * This is a fast pass over the inode cache to try to get reclaim moving on as
 * many inodes as possible in a short period of time. It kicks itself every few
 * seconds, as well as being kicked by the inode cache shrinker when memory
 * goes low. It scans as quickly as possible avoiding locked inodes or those
 * already being flushed, and once done schedules a future pass.
 */
STATIC void
xfs_reclaim_worker(
	struct work_struct *work)
{
	struct xfs_mount *mp = container_of(to_delayed_work(work),
					struct xfs_mount, m_reclaim_work);

	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
	xfs_syncd_queue_reclaim(mp);
}

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
/*
 * Flush delayed allocate data, attempting to free up reserved space
 * from existing allocations.  At this point a new allocation attempt
 * has failed with ENOSPC and we are in the process of scratching our
 * heads, looking about for more room.
 *
 * Queue a new data flush if there isn't one already in progress and
 * wait for completion of the flush. This means that we only ever have one
 * inode flush in progress no matter how many ENOSPC events are occurring and
 * so will prevent the system from bogging down due to every concurrent
 * ENOSPC event scanning all the active inodes in the system for writeback.
 */
void
xfs_flush_inodes(
	struct xfs_inode	*ip)
{
	struct xfs_mount	*mp = ip->i_mount;

	queue_work(xfs_syncd_wq, &mp->m_flush_work);
	flush_work_sync(&mp->m_flush_work);
}

STATIC void
xfs_flush_worker(
	struct work_struct *work)
{
	struct xfs_mount *mp = container_of(work,
					struct xfs_mount, m_flush_work);

	xfs_sync_data(mp, SYNC_TRYLOCK);
	xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
560 561 562 563 564 565
}

int
xfs_syncd_init(
	struct xfs_mount	*mp)
{
566
	INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
567
	INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
568 569
	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);

570
	xfs_syncd_queue_sync(mp);
571
	xfs_syncd_queue_reclaim(mp);
572

573 574 575 576 577 578 579
	return 0;
}

void
xfs_syncd_stop(
	struct xfs_mount	*mp)
{
580
	cancel_delayed_work_sync(&mp->m_sync_work);
581
	cancel_delayed_work_sync(&mp->m_reclaim_work);
582
	cancel_work_sync(&mp->m_flush_work);
583 584
}

585 586 587 588 589 590 591 592
void
__xfs_inode_set_reclaim_tag(
	struct xfs_perag	*pag,
	struct xfs_inode	*ip)
{
	radix_tree_tag_set(&pag->pag_ici_root,
			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
			   XFS_ICI_RECLAIM_TAG);
593 594 595 596 597 598 599 600

	if (!pag->pag_ici_reclaimable) {
		/* propagate the reclaim tag up into the perag radix tree */
		spin_lock(&ip->i_mount->m_perag_lock);
		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
				XFS_ICI_RECLAIM_TAG);
		spin_unlock(&ip->i_mount->m_perag_lock);
601 602 603 604

		/* schedule periodic background inode reclaim */
		xfs_syncd_queue_reclaim(ip->i_mount);

605 606 607
		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
							-1, _RET_IP_);
	}
608
	pag->pag_ici_reclaimable++;
609 610
}

D
David Chinner 已提交
611 612 613 614 615
/*
 * We set the inode flag atomically with the radix tree tag.
 * Once we get tag lookups on the radix tree, this inode flag
 * can go away.
 */
616 617 618 619
void
xfs_inode_set_reclaim_tag(
	xfs_inode_t	*ip)
{
D
Dave Chinner 已提交
620 621
	struct xfs_mount *mp = ip->i_mount;
	struct xfs_perag *pag;
622

D
Dave Chinner 已提交
623
	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
624
	spin_lock(&pag->pag_ici_lock);
625
	spin_lock(&ip->i_flags_lock);
626
	__xfs_inode_set_reclaim_tag(pag, ip);
D
David Chinner 已提交
627
	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
628
	spin_unlock(&ip->i_flags_lock);
629
	spin_unlock(&pag->pag_ici_lock);
D
Dave Chinner 已提交
630
	xfs_perag_put(pag);
631 632
}

633 634
STATIC void
__xfs_inode_clear_reclaim(
635 636 637
	xfs_perag_t	*pag,
	xfs_inode_t	*ip)
{
638
	pag->pag_ici_reclaimable--;
639 640 641 642 643 644 645 646 647 648
	if (!pag->pag_ici_reclaimable) {
		/* clear the reclaim tag from the perag radix tree */
		spin_lock(&ip->i_mount->m_perag_lock);
		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
				XFS_ICI_RECLAIM_TAG);
		spin_unlock(&ip->i_mount->m_perag_lock);
		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
							-1, _RET_IP_);
	}
649 650
}

651 652 653 654 655 656 657 658 659 660 661
void
__xfs_inode_clear_reclaim_tag(
	xfs_mount_t	*mp,
	xfs_perag_t	*pag,
	xfs_inode_t	*ip)
{
	radix_tree_tag_clear(&pag->pag_ici_root,
			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
	__xfs_inode_clear_reclaim(pag, ip);
}

D
Dave Chinner 已提交
662 663 664 665 666 667 668 669 670
/*
 * Grab the inode for reclaim exclusively.
 * Return 0 if we grabbed it, non-zero otherwise.
 */
STATIC int
xfs_reclaim_inode_grab(
	struct xfs_inode	*ip,
	int			flags)
{
671 672 673 674 675
	ASSERT(rcu_read_lock_held());

	/* quick check for stale RCU freed inode */
	if (!ip->i_ino)
		return 1;
D
Dave Chinner 已提交
676 677

	/*
678
	 * do some unlocked checks first to avoid unnecessary lock traffic.
D
Dave Chinner 已提交
679 680 681 682 683 684 685 686 687 688 689 690
	 * The first is a flush lock check, the second is a already in reclaim
	 * check. Only do these checks if we are not going to block on locks.
	 */
	if ((flags & SYNC_TRYLOCK) &&
	    (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) {
		return 1;
	}

	/*
	 * The radix tree lock here protects a thread in xfs_iget from racing
	 * with us starting reclaim on the inode.  Once we have the
	 * XFS_IRECLAIM flag set it will not touch us.
691 692 693 694 695
	 *
	 * Due to RCU lookup, we may find inodes that have been freed and only
	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
	 * aren't candidates for reclaim at all, so we must check the
	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
D
Dave Chinner 已提交
696 697
	 */
	spin_lock(&ip->i_flags_lock);
698 699 700
	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
		/* not a reclaim candidate. */
D
Dave Chinner 已提交
701 702 703 704 705 706 707 708
		spin_unlock(&ip->i_flags_lock);
		return 1;
	}
	__xfs_iflags_set(ip, XFS_IRECLAIM);
	spin_unlock(&ip->i_flags_lock);
	return 0;
}

709 710 711 712 713 714 715 716 717 718 719 720 721
/*
 * Inodes in different states need to be treated differently, and the return
 * value of xfs_iflush is not sufficient to get this right. The following table
 * lists the inode states and the reclaim actions necessary for non-blocking
 * reclaim:
 *
 *
 *	inode state	     iflush ret		required action
 *      ---------------      ----------         ---------------
 *	bad			-		reclaim
 *	shutdown		EIO		unpin and reclaim
 *	clean, unpinned		0		reclaim
 *	stale, unpinned		0		reclaim
722 723 724 725 726
 *	clean, pinned(*)	0		requeue
 *	stale, pinned		EAGAIN		requeue
 *	dirty, delwri ok	0		requeue
 *	dirty, delwri blocked	EAGAIN		requeue
 *	dirty, sync flush	0		reclaim
727 728 729 730
 *
 * (*) dgc: I don't think the clean, pinned state is possible but it gets
 * handled anyway given the order of checks implemented.
 *
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
 * As can be seen from the table, the return value of xfs_iflush() is not
 * sufficient to correctly decide the reclaim action here. The checks in
 * xfs_iflush() might look like duplicates, but they are not.
 *
 * Also, because we get the flush lock first, we know that any inode that has
 * been flushed delwri has had the flush completed by the time we check that
 * the inode is clean. The clean inode check needs to be done before flushing
 * the inode delwri otherwise we would loop forever requeuing clean inodes as
 * we cannot tell apart a successful delwri flush and a clean inode from the
 * return value of xfs_iflush().
 *
 * Note that because the inode is flushed delayed write by background
 * writeback, the flush lock may already be held here and waiting on it can
 * result in very long latencies. Hence for sync reclaims, where we wait on the
 * flush lock, the caller should push out delayed write inodes first before
 * trying to reclaim them to minimise the amount of time spent waiting. For
 * background relaim, we just requeue the inode for the next pass.
 *
749 750 751
 * Hence the order of actions after gaining the locks should be:
 *	bad		=> reclaim
 *	shutdown	=> unpin and reclaim
752 753
 *	pinned, delwri	=> requeue
 *	pinned, sync	=> unpin
754 755
 *	stale		=> reclaim
 *	clean		=> reclaim
756 757
 *	dirty, delwri	=> flush and requeue
 *	dirty, sync	=> flush, wait and reclaim
758
 */
759
STATIC int
760
xfs_reclaim_inode(
761 762
	struct xfs_inode	*ip,
	struct xfs_perag	*pag,
763
	int			sync_mode)
764
{
765
	int	error;
766

767 768
restart:
	error = 0;
769
	xfs_ilock(ip, XFS_ILOCK_EXCL);
770 771 772 773 774
	if (!xfs_iflock_nowait(ip)) {
		if (!(sync_mode & SYNC_WAIT))
			goto out;
		xfs_iflock(ip);
	}
775

776 777 778 779 780 781
	if (is_bad_inode(VFS_I(ip)))
		goto reclaim;
	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
		xfs_iunpin_wait(ip);
		goto reclaim;
	}
782 783 784 785 786
	if (xfs_ipincount(ip)) {
		if (!(sync_mode & SYNC_WAIT)) {
			xfs_ifunlock(ip);
			goto out;
		}
787
		xfs_iunpin_wait(ip);
788
	}
789 790 791 792 793
	if (xfs_iflags_test(ip, XFS_ISTALE))
		goto reclaim;
	if (xfs_inode_clean(ip))
		goto reclaim;

794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
	/*
	 * Now we have an inode that needs flushing.
	 *
	 * We do a nonblocking flush here even if we are doing a SYNC_WAIT
	 * reclaim as we can deadlock with inode cluster removal.
	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
	 * ip->i_lock, and we are doing the exact opposite here. As a result,
	 * doing a blocking xfs_itobp() to get the cluster buffer will result
	 * in an ABBA deadlock with xfs_ifree_cluster().
	 *
	 * As xfs_ifree_cluser() must gather all inodes that are active in the
	 * cache to mark them stale, if we hit this case we don't actually want
	 * to do IO here - we want the inode marked stale so we can simply
	 * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush,
	 * just unlock the inode, back off and try again. Hopefully the next
	 * pass through will see the stale flag set on the inode.
	 */
	error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode);
812
	if (sync_mode & SYNC_WAIT) {
813 814 815 816 817 818
		if (error == EAGAIN) {
			xfs_iunlock(ip, XFS_ILOCK_EXCL);
			/* backoff longer than in xfs_ifree_cluster */
			delay(2);
			goto restart;
		}
819 820
		xfs_iflock(ip);
		goto reclaim;
821 822
	}

823 824 825 826 827 828
	/*
	 * When we have to flush an inode but don't have SYNC_WAIT set, we
	 * flush the inode out using a delwri buffer and wait for the next
	 * call into reclaim to find it in a clean state instead of waiting for
	 * it now. We also don't return errors here - if the error is transient
	 * then the next reclaim pass will flush the inode, and if the error
829
	 * is permanent then the next sync reclaim will reclaim the inode and
830 831
	 * pass on the error.
	 */
832
	if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
833
		xfs_warn(ip->i_mount,
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
			"inode 0x%llx background reclaim flush failed with %d",
			(long long)ip->i_ino, error);
	}
out:
	xfs_iflags_clear(ip, XFS_IRECLAIM);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	/*
	 * We could return EAGAIN here to make reclaim rescan the inode tree in
	 * a short while. However, this just burns CPU time scanning the tree
	 * waiting for IO to complete and xfssyncd never goes back to the idle
	 * state. Instead, return 0 to let the next scheduled background reclaim
	 * attempt to reclaim the inode again.
	 */
	return 0;

849 850
reclaim:
	xfs_ifunlock(ip);
851
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
852 853 854 855 856 857 858 859 860

	XFS_STATS_INC(xs_ig_reclaims);
	/*
	 * Remove the inode from the per-AG radix tree.
	 *
	 * Because radix_tree_delete won't complain even if the item was never
	 * added to the tree assert that it's been there before to catch
	 * problems with the inode life time early on.
	 */
861
	spin_lock(&pag->pag_ici_lock);
862 863 864
	if (!radix_tree_delete(&pag->pag_ici_root,
				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
		ASSERT(0);
865
	__xfs_inode_clear_reclaim(pag, ip);
866
	spin_unlock(&pag->pag_ici_lock);
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882

	/*
	 * Here we do an (almost) spurious inode lock in order to coordinate
	 * with inode cache radix tree lookups.  This is because the lookup
	 * can reference the inodes in the cache without taking references.
	 *
	 * We make that OK here by ensuring that we wait until the inode is
	 * unlocked after the lookup before we go ahead and free it.  We get
	 * both the ilock and the iolock because the code may need to drop the
	 * ilock one but will still hold the iolock.
	 */
	xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
	xfs_qm_dqdetach(ip);
	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);

	xfs_inode_free(ip);
883 884
	return error;

885 886
}

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
/*
 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
 * corrupted, we still want to try to reclaim all the inodes. If we don't,
 * then a shut down during filesystem unmount reclaim walk leak all the
 * unreclaimed inodes.
 */
int
xfs_reclaim_inodes_ag(
	struct xfs_mount	*mp,
	int			flags,
	int			*nr_to_scan)
{
	struct xfs_perag	*pag;
	int			error = 0;
	int			last_error = 0;
	xfs_agnumber_t		ag;
903 904
	int			trylock = flags & SYNC_TRYLOCK;
	int			skipped;
905

906
restart:
907
	ag = 0;
908
	skipped = 0;
909 910 911
	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
		unsigned long	first_index = 0;
		int		done = 0;
D
Dave Chinner 已提交
912
		int		nr_found = 0;
913 914 915

		ag = pag->pag_agno + 1;

916 917 918
		if (trylock) {
			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
				skipped++;
919
				xfs_perag_put(pag);
920 921 922 923 924 925
				continue;
			}
			first_index = pag->pag_ici_reclaim_cursor;
		} else
			mutex_lock(&pag->pag_ici_reclaim_lock);

926
		do {
D
Dave Chinner 已提交
927 928
			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
			int	i;
929

930
			rcu_read_lock();
D
Dave Chinner 已提交
931 932 933 934
			nr_found = radix_tree_gang_lookup_tag(
					&pag->pag_ici_root,
					(void **)batch, first_index,
					XFS_LOOKUP_BATCH,
935 936
					XFS_ICI_RECLAIM_TAG);
			if (!nr_found) {
937
				done = 1;
938
				rcu_read_unlock();
939 940 941 942
				break;
			}

			/*
D
Dave Chinner 已提交
943 944
			 * Grab the inodes before we drop the lock. if we found
			 * nothing, nr == 0 and the loop will be skipped.
945
			 */
D
Dave Chinner 已提交
946 947 948 949 950 951 952 953 954 955 956 957
			for (i = 0; i < nr_found; i++) {
				struct xfs_inode *ip = batch[i];

				if (done || xfs_reclaim_inode_grab(ip, flags))
					batch[i] = NULL;

				/*
				 * Update the index for the next lookup. Catch
				 * overflows into the next AG range which can
				 * occur if we have inodes in the last block of
				 * the AG and we are currently pointing to the
				 * last inode.
958 959 960 961 962 963 964
				 *
				 * Because we may see inodes that are from the
				 * wrong AG due to RCU freeing and
				 * reallocation, only update the index if it
				 * lies in this AG. It was a race that lead us
				 * to see this inode, so another lookup from
				 * the same index will not find it again.
D
Dave Chinner 已提交
965
				 */
966 967 968
				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
								pag->pag_agno)
					continue;
D
Dave Chinner 已提交
969 970 971 972
				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
					done = 1;
			}
973

D
Dave Chinner 已提交
974
			/* unlock now we've grabbed the inodes. */
975
			rcu_read_unlock();
D
Dave Chinner 已提交
976 977 978 979 980 981 982 983 984 985

			for (i = 0; i < nr_found; i++) {
				if (!batch[i])
					continue;
				error = xfs_reclaim_inode(batch[i], pag, flags);
				if (error && last_error != EFSCORRUPTED)
					last_error = error;
			}

			*nr_to_scan -= XFS_LOOKUP_BATCH;
986

987 988
			cond_resched();

D
Dave Chinner 已提交
989
		} while (nr_found && !done && *nr_to_scan > 0);
990

991 992 993 994 995
		if (trylock && !done)
			pag->pag_ici_reclaim_cursor = first_index;
		else
			pag->pag_ici_reclaim_cursor = 0;
		mutex_unlock(&pag->pag_ici_reclaim_lock);
996 997
		xfs_perag_put(pag);
	}
998 999 1000 1001 1002 1003 1004 1005

	/*
	 * if we skipped any AG, and we still have scan count remaining, do
	 * another pass this time using blocking reclaim semantics (i.e
	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
	 * ensure that when we get more reclaimers than AGs we block rather
	 * than spin trying to execute reclaim.
	 */
1006
	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1007 1008 1009
		trylock = 0;
		goto restart;
	}
1010 1011 1012
	return XFS_ERROR(last_error);
}

1013 1014 1015 1016 1017
int
xfs_reclaim_inodes(
	xfs_mount_t	*mp,
	int		mode)
{
1018 1019 1020
	int		nr_to_scan = INT_MAX;

	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1021 1022 1023
}

/*
1024
 * Scan a certain number of inodes for reclaim.
1025 1026
 *
 * When called we make sure that there is a background (fast) inode reclaim in
1027
 * progress, while we will throttle the speed of reclaim via doing synchronous
1028 1029 1030
 * reclaim of inodes. That means if we come across dirty inodes, we wait for
 * them to be cleaned, which we hope will not be very long due to the
 * background walker having already kicked the IO off on those dirty inodes.
1031
 */
1032 1033 1034 1035
void
xfs_reclaim_inodes_nr(
	struct xfs_mount	*mp,
	int			nr_to_scan)
1036
{
1037 1038 1039
	/* kick background reclaimer and push the AIL */
	xfs_syncd_queue_reclaim(mp);
	xfs_ail_push_all(mp->m_ail);
1040

1041 1042
	xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
}
1043

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
/*
 * Return the number of reclaimable inodes in the filesystem for
 * the shrinker to determine how much to reclaim.
 */
int
xfs_reclaim_inodes_count(
	struct xfs_mount	*mp)
{
	struct xfs_perag	*pag;
	xfs_agnumber_t		ag = 0;
	int			reclaimable = 0;
1055

1056 1057
	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
		ag = pag->pag_agno + 1;
1058 1059
		reclaimable += pag->pag_ici_reclaimable;
		xfs_perag_put(pag);
1060 1061 1062 1063
	}
	return reclaimable;
}