xfs_sync.c 17.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
/*
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_inode.h"
#include "xfs_dinode.h"
#include "xfs_error.h"
#include "xfs_mru_cache.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
#include "xfs_utils.h"
#include "xfs_buf_item.h"
#include "xfs_inode_item.h"
#include "xfs_rw.h"
C
Christoph Hellwig 已提交
46
#include "xfs_quota.h"
C
Christoph Hellwig 已提交
47
#include "xfs_trace.h"
48

49 50 51
#include <linux/kthread.h>
#include <linux/freezer.h>

52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
STATIC xfs_inode_t *
xfs_inode_ag_lookup(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	uint32_t		*first_index,
	int			tag)
{
	int			nr_found;
	struct xfs_inode	*ip;

	/*
	 * use a gang lookup to find the next inode in the tree
	 * as the tree is sparse and a gang lookup walks to find
	 * the number of objects requested.
	 */
	if (tag == XFS_ICI_NO_TAG) {
		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
				(void **)&ip, *first_index, 1);
	} else {
		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
				(void **)&ip, *first_index, 1, tag);
	}
	if (!nr_found)
76
		return NULL;
77 78 79 80 81 82 83 84 85

	/*
	 * Update the index for the next lookup. Catch overflows
	 * into the next AG range which can occur if we have inodes
	 * in the last block of the AG and we are currently
	 * pointing to the last inode.
	 */
	*first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
	if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
86
		return NULL;
87 88 89 90 91 92
	return ip;
}

STATIC int
xfs_inode_ag_walk(
	struct xfs_mount	*mp,
D
Dave Chinner 已提交
93
	struct xfs_perag	*pag,
94 95 96
	int			(*execute)(struct xfs_inode *ip,
					   struct xfs_perag *pag, int flags),
	int			flags,
97 98
	int			tag,
	int			exclusive)
99 100 101 102 103 104 105 106 107 108 109 110
{
	uint32_t		first_index;
	int			last_error = 0;
	int			skipped;

restart:
	skipped = 0;
	first_index = 0;
	do {
		int		error = 0;
		xfs_inode_t	*ip;

111 112 113 114
		if (exclusive)
			write_lock(&pag->pag_ici_lock);
		else
			read_lock(&pag->pag_ici_lock);
115
		ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
116 117 118 119 120
		if (!ip) {
			if (exclusive)
				write_unlock(&pag->pag_ici_lock);
			else
				read_unlock(&pag->pag_ici_lock);
121
			break;
122
		}
123

124
		/* execute releases pag->pag_ici_lock */
125 126 127 128 129 130 131
		error = execute(ip, pag, flags);
		if (error == EAGAIN) {
			skipped++;
			continue;
		}
		if (error)
			last_error = error;
132 133

		/* bail out if the filesystem is corrupted.  */
134 135 136 137 138 139 140 141 142 143 144 145
		if (error == EFSCORRUPTED)
			break;

	} while (1);

	if (skipped) {
		delay(1);
		goto restart;
	}
	return last_error;
}

146
int
147 148 149 150 151
xfs_inode_ag_iterator(
	struct xfs_mount	*mp,
	int			(*execute)(struct xfs_inode *ip,
					   struct xfs_perag *pag, int flags),
	int			flags,
152 153
	int			tag,
	int			exclusive)
154 155 156 157 158 159
{
	int			error = 0;
	int			last_error = 0;
	xfs_agnumber_t		ag;

	for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
D
Dave Chinner 已提交
160 161 162 163 164
		struct xfs_perag	*pag;

		pag = xfs_perag_get(mp, ag);
		if (!pag->pag_ici_init) {
			xfs_perag_put(pag);
165
			continue;
D
Dave Chinner 已提交
166 167
		}
		error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
168
						exclusive);
D
Dave Chinner 已提交
169
		xfs_perag_put(pag);
170 171 172 173 174 175 176 177 178
		if (error) {
			last_error = error;
			if (error == EFSCORRUPTED)
				break;
		}
	}
	return XFS_ERROR(last_error);
}

179
/* must be called with pag_ici_lock held and releases it */
180
int
181 182 183 184 185
xfs_sync_inode_valid(
	struct xfs_inode	*ip,
	struct xfs_perag	*pag)
{
	struct inode		*inode = VFS_I(ip);
186
	int			error = EFSCORRUPTED;
187 188

	/* nothing to sync during shutdown */
189 190
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		goto out_unlock;
191

192 193 194 195
	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
	error = ENOENT;
	if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
		goto out_unlock;
196

197 198 199 200 201
	/* If we can't grab the inode, it must on it's way to reclaim. */
	if (!igrab(inode))
		goto out_unlock;

	if (is_bad_inode(inode)) {
202
		IRELE(ip);
203
		goto out_unlock;
204 205
	}

206 207 208 209 210
	/* inode is valid */
	error = 0;
out_unlock:
	read_unlock(&pag->pag_ici_lock);
	return error;
211 212
}

213 214 215
STATIC int
xfs_sync_inode_data(
	struct xfs_inode	*ip,
216
	struct xfs_perag	*pag,
217 218 219 220 221 222
	int			flags)
{
	struct inode		*inode = VFS_I(ip);
	struct address_space *mapping = inode->i_mapping;
	int			error = 0;

223 224 225 226
	error = xfs_sync_inode_valid(ip, pag);
	if (error)
		return error;

227 228 229 230 231 232 233 234 235 236
	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		goto out_wait;

	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
		if (flags & SYNC_TRYLOCK)
			goto out_wait;
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}

	error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
237
				0 : XBF_ASYNC, FI_NONE);
238 239 240
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);

 out_wait:
C
Christoph Hellwig 已提交
241
	if (flags & SYNC_WAIT)
242
		xfs_ioend_wait(ip);
243
	IRELE(ip);
244 245 246
	return error;
}

247 248 249
STATIC int
xfs_sync_inode_attr(
	struct xfs_inode	*ip,
250
	struct xfs_perag	*pag,
251 252 253 254
	int			flags)
{
	int			error = 0;

255 256 257 258
	error = xfs_sync_inode_valid(ip, pag);
	if (error)
		return error;

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
	xfs_ilock(ip, XFS_ILOCK_SHARED);
	if (xfs_inode_clean(ip))
		goto out_unlock;
	if (!xfs_iflock_nowait(ip)) {
		if (!(flags & SYNC_WAIT))
			goto out_unlock;
		xfs_iflock(ip);
	}

	if (xfs_inode_clean(ip)) {
		xfs_ifunlock(ip);
		goto out_unlock;
	}

	error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
			   XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);

 out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
278
	IRELE(ip);
279 280 281
	return error;
}

C
Christoph Hellwig 已提交
282 283 284
/*
 * Write out pagecache data for the whole filesystem.
 */
285
int
C
Christoph Hellwig 已提交
286 287 288
xfs_sync_data(
	struct xfs_mount	*mp,
	int			flags)
289
{
C
Christoph Hellwig 已提交
290
	int			error;
291

C
Christoph Hellwig 已提交
292
	ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
293

C
Christoph Hellwig 已提交
294
	error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
295
				      XFS_ICI_NO_TAG, 0);
C
Christoph Hellwig 已提交
296 297
	if (error)
		return XFS_ERROR(error);
298

299
	xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
C
Christoph Hellwig 已提交
300 301
	return 0;
}
302

C
Christoph Hellwig 已提交
303 304 305 306 307 308 309 310 311
/*
 * Write out inode metadata (attributes) for the whole filesystem.
 */
int
xfs_sync_attr(
	struct xfs_mount	*mp,
	int			flags)
{
	ASSERT((flags & ~SYNC_WAIT) == 0);
312

C
Christoph Hellwig 已提交
313
	return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
314
				     XFS_ICI_NO_TAG, 0);
315 316
}

317 318 319
STATIC int
xfs_commit_dummy_trans(
	struct xfs_mount	*mp,
320
	uint			flags)
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
{
	struct xfs_inode	*ip = mp->m_rootip;
	struct xfs_trans	*tp;
	int			error;

	/*
	 * Put a dummy transaction in the log to tell recovery
	 * that all others are OK.
	 */
	tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
	error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
	if (error) {
		xfs_trans_cancel(tp, 0);
		return error;
	}

	xfs_ilock(ip, XFS_ILOCK_EXCL);

	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
	xfs_trans_ihold(tp, ip);
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
	error = xfs_trans_commit(tp, 0);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);

345
	/* the log force ensures this transaction is pushed to disk */
346
	xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
347
	return error;
348 349
}

350
STATIC int
351 352 353 354 355 356 357 358 359 360 361 362
xfs_sync_fsdata(
	struct xfs_mount	*mp,
	int			flags)
{
	struct xfs_buf		*bp;
	struct xfs_buf_log_item	*bip;
	int			error = 0;

	/*
	 * If this is xfssyncd() then only sync the superblock if we can
	 * lock it without sleeping and it is not pinned.
	 */
C
Christoph Hellwig 已提交
363
	if (flags & SYNC_TRYLOCK) {
364 365
		ASSERT(!(flags & SYNC_WAIT));

366
		bp = xfs_getsb(mp, XBF_TRYLOCK);
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
		if (!bp)
			goto out;

		bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
		if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
			goto out_brelse;
	} else {
		bp = xfs_getsb(mp, 0);

		/*
		 * If the buffer is pinned then push on the log so we won't
		 * get stuck waiting in the write for someone, maybe
		 * ourselves, to flush the log.
		 *
		 * Even though we just pushed the log above, we did not have
		 * the superblock buffer locked at that point so it can
		 * become pinned in between there and here.
		 */
		if (XFS_BUF_ISPINNED(bp))
386
			xfs_log_force(mp, 0);
387 388 389 390 391 392 393 394
	}


	if (flags & SYNC_WAIT)
		XFS_BUF_UNASYNC(bp);
	else
		XFS_BUF_ASYNC(bp);

395 396 397 398 399 400 401 402 403 404 405 406 407 408
	error = xfs_bwrite(mp, bp);
	if (error)
		return error;

	/*
	 * If this is a data integrity sync make sure all pending buffers
	 * are flushed out for the log coverage check below.
	 */
	if (flags & SYNC_WAIT)
		xfs_flush_buftarg(mp->m_ddev_targp, 1);

	if (xfs_log_need_covered(mp))
		error = xfs_commit_dummy_trans(mp, flags);
	return error;
409 410 411 412 413

 out_brelse:
	xfs_buf_relse(bp);
 out:
	return error;
414 415 416
}

/*
D
David Chinner 已提交
417 418 419 420 421 422 423 424 425 426 427
 * When remounting a filesystem read-only or freezing the filesystem, we have
 * two phases to execute. This first phase is syncing the data before we
 * quiesce the filesystem, and the second is flushing all the inodes out after
 * we've waited for all the transactions created by the first phase to
 * complete. The second phase ensures that the inodes are written to their
 * location on disk rather than just existing in transactions in the log. This
 * means after a quiesce there is no log replay required to write the inodes to
 * disk (this is the main difference between a sync and a quiesce).
 */
/*
 * First stage of freeze - no writers will make progress now we are here,
428 429
 * so we flush delwri and delalloc buffers here, then wait for all I/O to
 * complete.  Data is frozen at that point. Metadata is not frozen,
D
David Chinner 已提交
430 431
 * transactions can still occur here so don't bother flushing the buftarg
 * because it'll just get dirty again.
432 433 434 435 436 437 438 439
 */
int
xfs_quiesce_data(
	struct xfs_mount	*mp)
{
	int error;

	/* push non-blocking */
C
Christoph Hellwig 已提交
440
	xfs_sync_data(mp, 0);
C
Christoph Hellwig 已提交
441
	xfs_qm_sync(mp, SYNC_TRYLOCK);
442

D
Dave Chinner 已提交
443
	/* push and block till complete */
C
Christoph Hellwig 已提交
444
	xfs_sync_data(mp, SYNC_WAIT);
C
Christoph Hellwig 已提交
445
	xfs_qm_sync(mp, SYNC_WAIT);
446

D
David Chinner 已提交
447
	/* write superblock and hoover up shutdown errors */
D
Dave Chinner 已提交
448
	error = xfs_sync_fsdata(mp, SYNC_WAIT);
449

D
David Chinner 已提交
450
	/* flush data-only devices */
451 452 453 454
	if (mp->m_rtdev_targp)
		XFS_bflush(mp->m_rtdev_targp);

	return error;
455 456
}

D
David Chinner 已提交
457 458 459 460 461 462 463
STATIC void
xfs_quiesce_fs(
	struct xfs_mount	*mp)
{
	int	count = 0, pincount;

	xfs_flush_buftarg(mp->m_ddev_targp, 0);
464
	xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
D
David Chinner 已提交
465 466 467 468 469 470 471 472

	/*
	 * This loop must run at least twice.  The first instance of the loop
	 * will flush most meta data but that will generate more meta data
	 * (typically directory updates).  Which then must be flushed and
	 * logged before we can write the unmount record.
	 */
	do {
C
Christoph Hellwig 已提交
473
		xfs_sync_attr(mp, SYNC_WAIT);
D
David Chinner 已提交
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
		pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
		if (!pincount) {
			delay(50);
			count++;
		}
	} while (count < 2);
}

/*
 * Second stage of a quiesce. The data is already synced, now we have to take
 * care of the metadata. New transactions are already blocked, so we need to
 * wait for any remaining transactions to drain out before proceding.
 */
void
xfs_quiesce_attr(
	struct xfs_mount	*mp)
{
	int	error = 0;

	/* wait for all modifications to complete */
	while (atomic_read(&mp->m_active_trans) > 0)
		delay(100);

	/* flush inodes and push all remaining buffers out to disk */
	xfs_quiesce_fs(mp);

500 501 502 503 504
	/*
	 * Just warn here till VFS can correctly support
	 * read-only remount without racing.
	 */
	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
D
David Chinner 已提交
505 506 507 508 509 510 511 512 513 514 515

	/* Push the superblock and write an unmount record */
	error = xfs_log_sbcount(mp, 1);
	if (error)
		xfs_fs_cmn_err(CE_WARN, mp,
				"xfs_attr_quiesce: failed to log sb changes. "
				"Frozen image may not be consistent.");
	xfs_log_unmount_write(mp);
	xfs_unmountfs_writesb(mp);
}

516 517 518 519 520 521 522 523 524 525 526
/*
 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
 * Doing this has two advantages:
 * - It saves on stack space, which is tight in certain situations
 * - It can be used (with care) as a mechanism to avoid deadlocks.
 * Flushing while allocating in a full filesystem requires both.
 */
STATIC void
xfs_syncd_queue_work(
	struct xfs_mount *mp,
	void		*data,
527 528
	void		(*syncer)(struct xfs_mount *, void *),
	struct completion *completion)
529
{
530
	struct xfs_sync_work *work;
531

532
	work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
533 534 535 536
	INIT_LIST_HEAD(&work->w_list);
	work->w_syncer = syncer;
	work->w_data = data;
	work->w_mount = mp;
537
	work->w_completion = completion;
538 539 540 541 542 543 544 545 546 547 548 549 550
	spin_lock(&mp->m_sync_lock);
	list_add_tail(&work->w_list, &mp->m_sync_list);
	spin_unlock(&mp->m_sync_lock);
	wake_up_process(mp->m_sync_task);
}

/*
 * Flush delayed allocate data, attempting to free up reserved space
 * from existing allocations.  At this point a new allocation attempt
 * has failed with ENOSPC and we are in the process of scratching our
 * heads, looking about for more room...
 */
STATIC void
551
xfs_flush_inodes_work(
552 553 554 555
	struct xfs_mount *mp,
	void		*arg)
{
	struct inode	*inode = arg;
C
Christoph Hellwig 已提交
556
	xfs_sync_data(mp, SYNC_TRYLOCK);
C
Christoph Hellwig 已提交
557
	xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
558 559 560 561
	iput(inode);
}

void
562
xfs_flush_inodes(
563 564 565
	xfs_inode_t	*ip)
{
	struct inode	*inode = VFS_I(ip);
566
	DECLARE_COMPLETION_ONSTACK(completion);
567 568

	igrab(inode);
569 570
	xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
	wait_for_completion(&completion);
571
	xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
572 573
}

574 575 576 577 578
/*
 * Every sync period we need to unpin all items, reclaim inodes, sync
 * quota and write out the superblock. We might need to cover the log
 * to indicate it is idle.
 */
579 580 581 582 583 584 585
STATIC void
xfs_sync_worker(
	struct xfs_mount *mp,
	void		*unused)
{
	int		error;

586
	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
587
		xfs_log_force(mp, 0);
588
		xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
589
		/* dgc: errors ignored here */
C
Christoph Hellwig 已提交
590 591
		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
		error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
592
	}
593 594 595 596 597 598 599 600 601 602
	mp->m_sync_seq++;
	wake_up(&mp->m_wait_single_sync_task);
}

STATIC int
xfssyncd(
	void			*arg)
{
	struct xfs_mount	*mp = arg;
	long			timeleft;
603
	xfs_sync_work_t		*work, *n;
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
	LIST_HEAD		(tmp);

	set_freezable();
	timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
	for (;;) {
		timeleft = schedule_timeout_interruptible(timeleft);
		/* swsusp */
		try_to_freeze();
		if (kthread_should_stop() && list_empty(&mp->m_sync_list))
			break;

		spin_lock(&mp->m_sync_lock);
		/*
		 * We can get woken by laptop mode, to do a sync -
		 * that's the (only!) case where the list would be
		 * empty with time remaining.
		 */
		if (!timeleft || list_empty(&mp->m_sync_list)) {
			if (!timeleft)
				timeleft = xfs_syncd_centisecs *
							msecs_to_jiffies(10);
			INIT_LIST_HEAD(&mp->m_sync_work.w_list);
			list_add_tail(&mp->m_sync_work.w_list,
					&mp->m_sync_list);
		}
		list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
			list_move(&work->w_list, &tmp);
		spin_unlock(&mp->m_sync_lock);

		list_for_each_entry_safe(work, n, &tmp, w_list) {
			(*work->w_syncer)(mp, work->w_data);
			list_del(&work->w_list);
			if (work == &mp->m_sync_work)
				continue;
638 639
			if (work->w_completion)
				complete(work->w_completion);
640 641 642 643 644 645 646 647 648 649 650 651 652
			kmem_free(work);
		}
	}

	return 0;
}

int
xfs_syncd_init(
	struct xfs_mount	*mp)
{
	mp->m_sync_work.w_syncer = xfs_sync_worker;
	mp->m_sync_work.w_mount = mp;
653
	mp->m_sync_work.w_completion = NULL;
654 655 656 657 658 659 660 661 662 663 664 665 666
	mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
	if (IS_ERR(mp->m_sync_task))
		return -PTR_ERR(mp->m_sync_task);
	return 0;
}

void
xfs_syncd_stop(
	struct xfs_mount	*mp)
{
	kthread_stop(mp->m_sync_task);
}

667 668 669 670 671 672 673 674 675 676
void
__xfs_inode_set_reclaim_tag(
	struct xfs_perag	*pag,
	struct xfs_inode	*ip)
{
	radix_tree_tag_set(&pag->pag_ici_root,
			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
			   XFS_ICI_RECLAIM_TAG);
}

D
David Chinner 已提交
677 678 679 680 681
/*
 * We set the inode flag atomically with the radix tree tag.
 * Once we get tag lookups on the radix tree, this inode flag
 * can go away.
 */
682 683 684 685
void
xfs_inode_set_reclaim_tag(
	xfs_inode_t	*ip)
{
D
Dave Chinner 已提交
686 687
	struct xfs_mount *mp = ip->i_mount;
	struct xfs_perag *pag;
688

D
Dave Chinner 已提交
689
	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
690 691
	read_lock(&pag->pag_ici_lock);
	spin_lock(&ip->i_flags_lock);
692
	__xfs_inode_set_reclaim_tag(pag, ip);
D
David Chinner 已提交
693
	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
694 695
	spin_unlock(&ip->i_flags_lock);
	read_unlock(&pag->pag_ici_lock);
D
Dave Chinner 已提交
696
	xfs_perag_put(pag);
697 698 699 700 701 702 703 704 705 706 707 708
}

void
__xfs_inode_clear_reclaim_tag(
	xfs_mount_t	*mp,
	xfs_perag_t	*pag,
	xfs_inode_t	*ip)
{
	radix_tree_tag_clear(&pag->pag_ici_root,
			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
}

709
STATIC int
710
xfs_reclaim_inode(
711 712
	struct xfs_inode	*ip,
	struct xfs_perag	*pag,
713
	int			sync_mode)
714
{
715 716 717 718 719 720 721 722 723 724 725
	/*
	 * The radix tree lock here protects a thread in xfs_iget from racing
	 * with us starting reclaim on the inode.  Once we have the
	 * XFS_IRECLAIM flag set it will not touch us.
	 */
	spin_lock(&ip->i_flags_lock);
	ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
	if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
		/* ignore as it is already under reclaim */
		spin_unlock(&ip->i_flags_lock);
		write_unlock(&pag->pag_ici_lock);
726
		return 0;
727
	}
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
	__xfs_iflags_set(ip, XFS_IRECLAIM);
	spin_unlock(&ip->i_flags_lock);
	write_unlock(&pag->pag_ici_lock);

	/*
	 * If the inode is still dirty, then flush it out.  If the inode
	 * is not in the AIL, then it will be OK to flush it delwri as
	 * long as xfs_iflush() does not keep any references to the inode.
	 * We leave that decision up to xfs_iflush() since it has the
	 * knowledge of whether it's OK to simply do a delwri flush of
	 * the inode or whether we need to wait until the inode is
	 * pulled from the AIL.
	 * We get the flush lock regardless, though, just to make sure
	 * we don't free it while it is being flushed.
	 */
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_iflock(ip);
745

746 747 748 749 750 751 752 753 754 755 756 757 758
	/*
	 * In the case of a forced shutdown we rely on xfs_iflush() to
	 * wait for the inode to be unpinned before returning an error.
	 */
	if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
		/* synchronize with xfs_iflush_done */
		xfs_iflock(ip);
		xfs_ifunlock(ip);
	}

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	xfs_ireclaim(ip);
	return 0;
759 760 761 762 763 764 765
}

int
xfs_reclaim_inodes(
	xfs_mount_t	*mp,
	int		mode)
{
766 767
	return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
					XFS_ICI_RECLAIM_TAG, 1);
768
}