提交 75f3cb13 编写于 作者: D Dave Chinner 提交者: Christoph Hellwig

xfs: introduce a per-ag inode iterator

Given that we walk across the per-ag inode lists so often, it makes sense to
introduce an iterator for this.

Convert the sync and reclaim code to use this new iterator, quota code will
follow in the next patch.

Also change xfs_reclaim_inode to return -EGAIN instead of 1 for an inode
already under reclaim.  This simplifies the AG iterator and doesn't
matter for the only other caller.

[hch: merged the lookup and execute callbacks back into one to get the
 pag_ici_lock locking correct and simplify the code flow]
Signed-off-by: NDave Chinner <david@fromorbit.com>
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NEric Sandeen <sandeen@sandeen.net>
上级 abc10647
...@@ -49,6 +49,123 @@ ...@@ -49,6 +49,123 @@
#include <linux/freezer.h> #include <linux/freezer.h>
STATIC xfs_inode_t *
xfs_inode_ag_lookup(
struct xfs_mount *mp,
struct xfs_perag *pag,
uint32_t *first_index,
int tag)
{
int nr_found;
struct xfs_inode *ip;
/*
* use a gang lookup to find the next inode in the tree
* as the tree is sparse and a gang lookup walks to find
* the number of objects requested.
*/
read_lock(&pag->pag_ici_lock);
if (tag == XFS_ICI_NO_TAG) {
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void **)&ip, *first_index, 1);
} else {
nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
(void **)&ip, *first_index, 1, tag);
}
if (!nr_found)
goto unlock;
/*
* Update the index for the next lookup. Catch overflows
* into the next AG range which can occur if we have inodes
* in the last block of the AG and we are currently
* pointing to the last inode.
*/
*first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
goto unlock;
return ip;
unlock:
read_unlock(&pag->pag_ici_lock);
return NULL;
}
STATIC int
xfs_inode_ag_walk(
struct xfs_mount *mp,
xfs_agnumber_t ag,
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
int flags,
int tag)
{
struct xfs_perag *pag = &mp->m_perag[ag];
uint32_t first_index;
int last_error = 0;
int skipped;
restart:
skipped = 0;
first_index = 0;
do {
int error = 0;
xfs_inode_t *ip;
ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
if (!ip)
break;
error = execute(ip, pag, flags);
if (error == EAGAIN) {
skipped++;
continue;
}
if (error)
last_error = error;
/*
* bail out if the filesystem is corrupted.
*/
if (error == EFSCORRUPTED)
break;
} while (1);
if (skipped) {
delay(1);
goto restart;
}
xfs_put_perag(mp, pag);
return last_error;
}
STATIC int
xfs_inode_ag_iterator(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
int flags,
int tag)
{
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
if (!mp->m_perag[ag].pag_ici_init)
continue;
error = xfs_inode_ag_walk(mp, ag, execute, flags, tag);
if (error) {
last_error = error;
if (error == EFSCORRUPTED)
break;
}
}
return XFS_ERROR(last_error);
}
/* must be called with pag_ici_lock held and releases it */ /* must be called with pag_ici_lock held and releases it */
STATIC int STATIC int
xfs_sync_inode_valid( xfs_sync_inode_valid(
...@@ -85,12 +202,17 @@ xfs_sync_inode_valid( ...@@ -85,12 +202,17 @@ xfs_sync_inode_valid(
STATIC int STATIC int
xfs_sync_inode_data( xfs_sync_inode_data(
struct xfs_inode *ip, struct xfs_inode *ip,
struct xfs_perag *pag,
int flags) int flags)
{ {
struct inode *inode = VFS_I(ip); struct inode *inode = VFS_I(ip);
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
int error = 0; int error = 0;
error = xfs_sync_inode_valid(ip, pag);
if (error)
return error;
if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
goto out_wait; goto out_wait;
...@@ -107,16 +229,22 @@ xfs_sync_inode_data( ...@@ -107,16 +229,22 @@ xfs_sync_inode_data(
out_wait: out_wait:
if (flags & SYNC_IOWAIT) if (flags & SYNC_IOWAIT)
xfs_ioend_wait(ip); xfs_ioend_wait(ip);
IRELE(ip);
return error; return error;
} }
STATIC int STATIC int
xfs_sync_inode_attr( xfs_sync_inode_attr(
struct xfs_inode *ip, struct xfs_inode *ip,
struct xfs_perag *pag,
int flags) int flags)
{ {
int error = 0; int error = 0;
error = xfs_sync_inode_valid(ip, pag);
if (error)
return error;
xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_ilock(ip, XFS_ILOCK_SHARED);
if (xfs_inode_clean(ip)) if (xfs_inode_clean(ip))
goto out_unlock; goto out_unlock;
...@@ -136,84 +264,8 @@ xfs_sync_inode_attr( ...@@ -136,84 +264,8 @@ xfs_sync_inode_attr(
out_unlock: out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
return error;
}
/*
* Sync all the inodes in the given AG according to the
* direction given by the flags.
*/
STATIC int
xfs_sync_inodes_ag(
xfs_mount_t *mp,
int ag,
int flags)
{
xfs_perag_t *pag = &mp->m_perag[ag];
int nr_found;
uint32_t first_index = 0;
int error = 0;
int last_error = 0;
do {
xfs_inode_t *ip = NULL;
/*
* use a gang lookup to find the next inode in the tree
* as the tree is sparse and a gang lookup walks to find
* the number of objects requested.
*/
read_lock(&pag->pag_ici_lock);
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void**)&ip, first_index, 1);
if (!nr_found) {
read_unlock(&pag->pag_ici_lock);
break;
}
/*
* Update the index for the next lookup. Catch overflows
* into the next AG range which can occur if we have inodes
* in the last block of the AG and we are currently
* pointing to the last inode.
*/
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
read_unlock(&pag->pag_ici_lock);
break;
}
error = xfs_sync_inode_valid(ip, pag);
if (error) {
if (error == EFSCORRUPTED)
return 0;
continue;
}
/*
* If we have to flush data or wait for I/O completion
* we need to hold the iolock.
*/
if (flags & SYNC_DELWRI)
error = xfs_sync_inode_data(ip, flags);
if (flags & SYNC_ATTR)
error = xfs_sync_inode_attr(ip, flags);
IRELE(ip); IRELE(ip);
return error;
if (error)
last_error = error;
/*
* bail out if the filesystem is corrupted.
*/
if (error == EFSCORRUPTED)
return XFS_ERROR(error);
} while (nr_found);
return last_error;
} }
int int
...@@ -221,32 +273,24 @@ xfs_sync_inodes( ...@@ -221,32 +273,24 @@ xfs_sync_inodes(
xfs_mount_t *mp, xfs_mount_t *mp,
int flags) int flags)
{ {
int error; int error = 0;
int last_error;
int i;
int lflags = XFS_LOG_FORCE; int lflags = XFS_LOG_FORCE;
if (mp->m_flags & XFS_MOUNT_RDONLY) if (mp->m_flags & XFS_MOUNT_RDONLY)
return 0; return 0;
error = 0;
last_error = 0;
if (flags & SYNC_WAIT) if (flags & SYNC_WAIT)
lflags |= XFS_LOG_SYNC; lflags |= XFS_LOG_SYNC;
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
if (!mp->m_perag[i].pag_ici_init)
continue;
error = xfs_sync_inodes_ag(mp, i, flags);
if (error)
last_error = error;
if (error == EFSCORRUPTED)
break;
}
if (flags & SYNC_DELWRI) if (flags & SYNC_DELWRI)
xfs_log_force(mp, 0, lflags); error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, XFS_ICI_NO_TAG);
return XFS_ERROR(last_error); if (flags & SYNC_ATTR)
error = xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, XFS_ICI_NO_TAG);
if (!error && (flags & SYNC_DELWRI))
xfs_log_force(mp, 0, lflags);
return XFS_ERROR(error);
} }
STATIC int STATIC int
...@@ -613,7 +657,7 @@ xfs_reclaim_inode( ...@@ -613,7 +657,7 @@ xfs_reclaim_inode(
xfs_ifunlock(ip); xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
} }
return 1; return -EAGAIN;
} }
__xfs_iflags_set(ip, XFS_IRECLAIM); __xfs_iflags_set(ip, XFS_IRECLAIM);
spin_unlock(&ip->i_flags_lock); spin_unlock(&ip->i_flags_lock);
...@@ -698,72 +742,20 @@ xfs_inode_clear_reclaim_tag( ...@@ -698,72 +742,20 @@ xfs_inode_clear_reclaim_tag(
xfs_put_perag(mp, pag); xfs_put_perag(mp, pag);
} }
STATIC int
STATIC void xfs_reclaim_inode_now(
xfs_reclaim_inodes_ag( struct xfs_inode *ip,
xfs_mount_t *mp, struct xfs_perag *pag,
int ag, int flags)
int mode)
{ {
xfs_inode_t *ip = NULL;
xfs_perag_t *pag = &mp->m_perag[ag];
int nr_found;
uint32_t first_index;
int skipped;
restart:
first_index = 0;
skipped = 0;
do {
/*
* use a gang lookup to find the next inode in the tree
* as the tree is sparse and a gang lookup walks to find
* the number of objects requested.
*/
read_lock(&pag->pag_ici_lock);
nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
(void**)&ip, first_index, 1,
XFS_ICI_RECLAIM_TAG);
if (!nr_found) {
read_unlock(&pag->pag_ici_lock);
break;
}
/*
* Update the index for the next lookup. Catch overflows
* into the next AG range which can occur if we have inodes
* in the last block of the AG and we are currently
* pointing to the last inode.
*/
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
read_unlock(&pag->pag_ici_lock);
break;
}
/* ignore if already under reclaim */ /* ignore if already under reclaim */
if (xfs_iflags_test(ip, XFS_IRECLAIM)) { if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
continue; return 0;
} }
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
/* return xfs_reclaim_inode(ip, 0, flags);
* hmmm - this is an inode already in reclaim. Do
* we even bother catching it here?
*/
if (xfs_reclaim_inode(ip, 0, mode))
skipped++;
} while (nr_found);
if (skipped) {
delay(1);
goto restart;
}
return;
} }
int int
...@@ -771,14 +763,6 @@ xfs_reclaim_inodes( ...@@ -771,14 +763,6 @@ xfs_reclaim_inodes(
xfs_mount_t *mp, xfs_mount_t *mp,
int mode) int mode)
{ {
int i; return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode,
XFS_ICI_RECLAIM_TAG);
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
if (!mp->m_perag[i].pag_ici_init)
continue;
xfs_reclaim_inodes_ag(mp, i, mode);
}
return 0;
} }
...@@ -212,6 +212,8 @@ typedef struct xfs_perag ...@@ -212,6 +212,8 @@ typedef struct xfs_perag
/* /*
* tags for inode radix tree * tags for inode radix tree
*/ */
#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
in xfs_inode_ag_iterator */
#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */ #define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) #define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册