提交 1b040712 编写于 作者: C Christoph Hellwig 提交者: Alex Elder

xfs: do not use xfs_mod_incore_sb_batch for per-cpu counters

Update the per-cpu counters manually in xfs_trans_unreserve_and_mod_sb
and remove support for per-cpu counters from xfs_mod_incore_sb_batch
to simplify it.  And added benefit is that we don't have to take
m_sb_lock for transactions that only modify per-cpu counters.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NAlex Elder <aelder@sgi.com>
上级 96540c78
...@@ -1856,98 +1856,54 @@ xfs_mod_incore_sb( ...@@ -1856,98 +1856,54 @@ xfs_mod_incore_sb(
} }
/* /*
* xfs_mod_incore_sb_batch() is used to change more than one field * Change more than one field in the in-core superblock structure at a time.
* in the in-core superblock structure at a time. This modification
* is protected by a lock internal to this module. The fields and
* changes to those fields are specified in the array of xfs_mod_sb
* structures passed in.
* *
* Either all of the specified deltas will be applied or none of * The fields and changes to those fields are specified in the array of
* them will. If any modified field dips below 0, then all modifications * xfs_mod_sb structures passed in. Either all of the specified deltas
* will be backed out and EINVAL will be returned. * will be applied or none of them will. If any modified field dips below 0,
* then all modifications will be backed out and EINVAL will be returned.
*
* Note that this function may not be used for the superblock values that
* are tracked with the in-memory per-cpu counters - a direct call to
* xfs_icsb_modify_counters is required for these.
*/ */
int int
xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) xfs_mod_incore_sb_batch(
struct xfs_mount *mp,
xfs_mod_sb_t *msb,
uint nmsb,
int rsvd)
{ {
int status=0; xfs_mod_sb_t *msbp = &msb[0];
xfs_mod_sb_t *msbp; int error = 0;
/* /*
* Loop through the array of mod structures and apply each * Loop through the array of mod structures and apply each individually.
* individually. If any fail, then back out all those * If any fail, then back out all those which have already been applied.
* which have already been applied. Do all of this within * Do all of this within the scope of the m_sb_lock so that all of the
* the scope of the m_sb_lock so that all of the changes will * changes will be atomic.
* be atomic.
*/ */
spin_lock(&mp->m_sb_lock); spin_lock(&mp->m_sb_lock);
msbp = &msb[0];
for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
/* ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
* Apply the delta at index n. If it fails, break msbp->msb_field > XFS_SBS_FDBLOCKS);
* from the loop so we'll fall into the undo loop
* below.
*/
switch (msbp->msb_field) {
#ifdef HAVE_PERCPU_SB
case XFS_SBS_ICOUNT:
case XFS_SBS_IFREE:
case XFS_SBS_FDBLOCKS:
spin_unlock(&mp->m_sb_lock);
status = xfs_icsb_modify_counters(mp,
msbp->msb_field,
msbp->msb_delta, rsvd);
spin_lock(&mp->m_sb_lock);
break;
#endif
default:
status = xfs_mod_incore_sb_unlocked(mp,
msbp->msb_field,
msbp->msb_delta, rsvd);
break;
}
if (status != 0) { error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
break; msbp->msb_delta, rsvd);
} if (error)
goto unwind;
} }
/*
* If we didn't complete the loop above, then back out
* any changes made to the superblock. If you add code
* between the loop above and here, make sure that you
* preserve the value of status. Loop back until
* we step below the beginning of the array. Make sure
* we don't touch anything back there.
*/
if (status != 0) {
msbp--;
while (msbp >= msb) {
switch (msbp->msb_field) {
#ifdef HAVE_PERCPU_SB
case XFS_SBS_ICOUNT:
case XFS_SBS_IFREE:
case XFS_SBS_FDBLOCKS:
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
status = xfs_icsb_modify_counters(mp, return 0;
msbp->msb_field,
-(msbp->msb_delta), unwind:
rsvd); while (--msbp >= msb) {
spin_lock(&mp->m_sb_lock); error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
break; -msbp->msb_delta, rsvd);
#endif ASSERT(error == 0);
default:
status = xfs_mod_incore_sb_unlocked(mp,
msbp->msb_field,
-(msbp->msb_delta),
rsvd);
break;
}
ASSERT(status == 0);
msbp--;
}
} }
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
return status; return error;
} }
/* /*
...@@ -2478,7 +2434,7 @@ xfs_icsb_balance_counter( ...@@ -2478,7 +2434,7 @@ xfs_icsb_balance_counter(
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
} }
STATIC int int
xfs_icsb_modify_counters( xfs_icsb_modify_counters(
xfs_mount_t *mp, xfs_mount_t *mp,
xfs_sb_field_t field, xfs_sb_field_t field,
......
...@@ -1009,7 +1009,7 @@ void ...@@ -1009,7 +1009,7 @@ void
xfs_trans_unreserve_and_mod_sb( xfs_trans_unreserve_and_mod_sb(
xfs_trans_t *tp) xfs_trans_t *tp)
{ {
xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ xfs_mod_sb_t msb[9]; /* If you add cases, add entries */
xfs_mod_sb_t *msbp; xfs_mod_sb_t *msbp;
xfs_mount_t *mp = tp->t_mountp; xfs_mount_t *mp = tp->t_mountp;
/* REFERENCED */ /* REFERENCED */
...@@ -1017,53 +1017,59 @@ xfs_trans_unreserve_and_mod_sb( ...@@ -1017,53 +1017,59 @@ xfs_trans_unreserve_and_mod_sb(
int rsvd; int rsvd;
int64_t blkdelta = 0; int64_t blkdelta = 0;
int64_t rtxdelta = 0; int64_t rtxdelta = 0;
int64_t idelta = 0;
int64_t ifreedelta = 0;
msbp = msb; msbp = msb;
rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
/* calculate free blocks delta */ /* calculate deltas */
if (tp->t_blk_res > 0) if (tp->t_blk_res > 0)
blkdelta = tp->t_blk_res; blkdelta = tp->t_blk_res;
if ((tp->t_fdblocks_delta != 0) && if ((tp->t_fdblocks_delta != 0) &&
(xfs_sb_version_haslazysbcount(&mp->m_sb) || (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
(tp->t_flags & XFS_TRANS_SB_DIRTY))) (tp->t_flags & XFS_TRANS_SB_DIRTY)))
blkdelta += tp->t_fdblocks_delta; blkdelta += tp->t_fdblocks_delta;
if (blkdelta != 0) {
msbp->msb_field = XFS_SBS_FDBLOCKS;
msbp->msb_delta = blkdelta;
msbp++;
}
/* calculate free realtime extents delta */
if (tp->t_rtx_res > 0) if (tp->t_rtx_res > 0)
rtxdelta = tp->t_rtx_res; rtxdelta = tp->t_rtx_res;
if ((tp->t_frextents_delta != 0) && if ((tp->t_frextents_delta != 0) &&
(tp->t_flags & XFS_TRANS_SB_DIRTY)) (tp->t_flags & XFS_TRANS_SB_DIRTY))
rtxdelta += tp->t_frextents_delta; rtxdelta += tp->t_frextents_delta;
if (rtxdelta != 0) { if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
msbp->msb_field = XFS_SBS_FREXTENTS; (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
msbp->msb_delta = rtxdelta; idelta = tp->t_icount_delta;
msbp++; ifreedelta = tp->t_ifree_delta;
} }
/* apply remaining deltas */ /* apply the per-cpu counters */
if (blkdelta) {
error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
blkdelta, rsvd);
if (error)
goto out;
}
if (xfs_sb_version_haslazysbcount(&mp->m_sb) || if (idelta) {
(tp->t_flags & XFS_TRANS_SB_DIRTY)) { error = xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT,
if (tp->t_icount_delta != 0) { idelta, rsvd);
msbp->msb_field = XFS_SBS_ICOUNT; if (error)
msbp->msb_delta = tp->t_icount_delta; goto out_undo_fdblocks;
msbp++;
} }
if (tp->t_ifree_delta != 0) {
msbp->msb_field = XFS_SBS_IFREE; if (ifreedelta) {
msbp->msb_delta = tp->t_ifree_delta; error = xfs_icsb_modify_counters(mp, XFS_SBS_IFREE,
msbp++; ifreedelta, rsvd);
if (error)
goto out_undo_icount;
} }
/* apply remaining deltas */
if (rtxdelta != 0) {
msbp->msb_field = XFS_SBS_FREXTENTS;
msbp->msb_delta = rtxdelta;
msbp++;
} }
if (tp->t_flags & XFS_TRANS_SB_DIRTY) { if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
...@@ -1115,8 +1121,24 @@ xfs_trans_unreserve_and_mod_sb( ...@@ -1115,8 +1121,24 @@ xfs_trans_unreserve_and_mod_sb(
if (msbp > msb) { if (msbp > msb) {
error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, error = xfs_mod_incore_sb_batch(tp->t_mountp, msb,
(uint)(msbp - msb), rsvd); (uint)(msbp - msb), rsvd);
ASSERT(error == 0); if (error)
goto out_undo_ifreecount;
} }
return;
out_undo_ifreecount:
if (ifreedelta)
xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, -ifreedelta, rsvd);
out_undo_icount:
if (idelta)
xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, -idelta, rsvd);
out_undo_fdblocks:
if (blkdelta)
xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
out:
ASSERT(error = 0);
return;
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册