提交 3685c2a1 编写于 作者: E Eric Sandeen 提交者: Lachlan McIlroy

[XFS] Unwrap XFS_SB_LOCK.

Un-obfuscate XFS_SB_LOCK, remove XFS_SB_LOCK->mutex_lock->spin_lock
macros, call spin_lock directly, remove extraneous cookie holdover from
old xfs code, and change lock type to spinlock_t.

SGI-PV: 970382
SGI-Modid: xfs-linux-melb:xfs-kern:29746a
Signed-off-by: NEric Sandeen <sandeen@sandeen.net>
Signed-off-by: NDonald Douwsma <donaldd@sgi.com>
Signed-off-by: NTim Shimmin <tes@sgi.com>
上级 ba74d0cb
......@@ -310,7 +310,6 @@ xfs_qm_mount_quotas(
xfs_mount_t *mp,
int mfsi_flags)
{
unsigned long s;
int error = 0;
uint sbf;
......@@ -367,13 +366,13 @@ xfs_qm_mount_quotas(
write_changes:
/*
* We actually don't have to acquire the SB_LOCK at all.
* We actually don't have to acquire the m_sb_lock at all.
* This can only be called from mount, and that's single threaded. XXX
*/
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
sbf = mp->m_sb.sb_qflags;
mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
......@@ -1370,7 +1369,6 @@ xfs_qm_qino_alloc(
{
xfs_trans_t *tp;
int error;
unsigned long s;
int committed;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
......@@ -1402,7 +1400,7 @@ xfs_qm_qino_alloc(
* sbfields arg may contain fields other than *QUOTINO;
* VERSIONNUM for example.
*/
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
if (flags & XFS_QMOPT_SBVERSION) {
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
unsigned oldv = mp->m_sb.sb_versionnum;
......@@ -1429,7 +1427,7 @@ xfs_qm_qino_alloc(
mp->m_sb.sb_uquotino = (*ip)->i_ino;
else
mp->m_sb.sb_gquotino = (*ip)->i_ino;
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
xfs_mod_sb(tp, sbfields);
if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
......
......@@ -200,7 +200,6 @@ xfs_qm_scall_quotaoff(
boolean_t force)
{
uint dqtype;
unsigned long s;
int error;
uint inactivate_flags;
xfs_qoff_logitem_t *qoffstart;
......@@ -237,9 +236,9 @@ xfs_qm_scall_quotaoff(
if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
mp->m_qflags &= ~(flags);
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
mp->m_sb.sb_qflags = mp->m_qflags;
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
/* XXX what to do if error ? Revert back to old vals incore ? */
......@@ -415,7 +414,6 @@ xfs_qm_scall_quotaon(
uint flags)
{
int error;
unsigned long s;
uint qf;
uint accflags;
__int64_t sbflags;
......@@ -468,10 +466,10 @@ xfs_qm_scall_quotaon(
* Change sb_qflags on disk but not incore mp->qflags
* if this is the root filesystem.
*/
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
qf = mp->m_sb.sb_qflags;
mp->m_sb.sb_qflags = qf | flags;
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
/*
* There's nothing to change if it's the same.
......@@ -815,7 +813,6 @@ xfs_qm_log_quotaoff(
{
xfs_trans_t *tp;
int error;
unsigned long s;
xfs_qoff_logitem_t *qoffi=NULL;
uint oldsbqflag=0;
......@@ -832,10 +829,10 @@ xfs_qm_log_quotaoff(
qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
xfs_trans_log_quotaoff_item(tp, qoffi);
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
oldsbqflag = mp->m_sb.sb_qflags;
mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
xfs_mod_sb(tp, XFS_SB_QFLAGS);
......@@ -854,9 +851,9 @@ xfs_qm_log_quotaoff(
* No one else is modifying sb_qflags, so this is OK.
* We still hold the quotaofflock.
*/
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
mp->m_sb.sb_qflags = oldsbqflag;
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
}
*qoffstartp = qoffi;
return (error);
......
......@@ -226,17 +226,15 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
STATIC void
xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
{
unsigned long s;
if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
!(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) {
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
XFS_SB_VERSION_ADDATTR2(&mp->m_sb);
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
} else
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
}
}
......
......@@ -3956,7 +3956,6 @@ xfs_bmap_add_attrfork(
xfs_bmap_free_t flist; /* freed extent records */
xfs_mount_t *mp; /* mount structure */
xfs_trans_t *tp; /* transaction pointer */
unsigned long s; /* spinlock spl value */
int blks; /* space reservation */
int version = 1; /* superblock attr version */
int committed; /* xaction was committed */
......@@ -4053,7 +4052,7 @@ xfs_bmap_add_attrfork(
(!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) {
__int64_t sbfields = 0;
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) {
XFS_SB_VERSION_ADDATTR(&mp->m_sb);
sbfields |= XFS_SB_VERSIONNUM;
......@@ -4063,10 +4062,10 @@ xfs_bmap_add_attrfork(
sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
}
if (sbfields) {
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
xfs_mod_sb(tp, sbfields);
} else
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
}
if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
goto error2;
......
......@@ -462,15 +462,13 @@ xfs_fs_counts(
xfs_mount_t *mp,
xfs_fsop_counts_t *cnt)
{
unsigned long s;
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
cnt->freertx = mp->m_sb.sb_frextents;
cnt->freeino = mp->m_sb.sb_ifree;
cnt->allocino = mp->m_sb.sb_icount;
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
return 0;
}
......@@ -497,7 +495,6 @@ xfs_reserve_blocks(
{
__int64_t lcounter, delta, fdblks_delta;
__uint64_t request;
unsigned long s;
/* If inval is null, report current values and return */
if (inval == (__uint64_t *)NULL) {
......@@ -515,7 +512,7 @@ xfs_reserve_blocks(
* problem. we needto work out if we are freeing or allocation
* blocks first, then we can do the modification as necessary.
*
* We do this under the XFS_SB_LOCK so that if we are near
* We do this under the m_sb_lock so that if we are near
* ENOSPC, we will hold out any changes while we work out
* what to do. This means that the amount of free space can
* change while we do this, so we need to retry if we end up
......@@ -526,7 +523,7 @@ xfs_reserve_blocks(
* enabled, disabled or even compiled in....
*/
retry:
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED);
/*
......@@ -569,7 +566,7 @@ xfs_reserve_blocks(
outval->resblks = mp->m_resblks;
outval->resblks_avail = mp->m_resblks_avail;
}
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
if (fdblks_delta) {
/*
......
......@@ -696,7 +696,6 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
uint64_t bfreelst = 0;
uint64_t btree = 0;
int error;
int s;
for (index = 0; index < agcount; index++) {
/*
......@@ -721,11 +720,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
/*
* Overwrite incore superblock counters with just-read data
*/
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
sbp->sb_ifree = ifree;
sbp->sb_icount = ialloc;
sbp->sb_fdblocks = bfree + bfreelst + btree;
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
/* Fixup the per-cpu counters as well. */
xfs_icsb_reinit_counters(mp);
......@@ -1440,7 +1439,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
* Fields are not allowed to dip below zero, so if the delta would
* do this do not apply it and return EINVAL.
*
* The SB_LOCK must be held when this routine is called.
* The m_sb_lock must be held when this routine is called.
*/
int
xfs_mod_incore_sb_unlocked(
......@@ -1605,7 +1604,7 @@ xfs_mod_incore_sb_unlocked(
/*
* xfs_mod_incore_sb() is used to change a field in the in-core
* superblock structure by the specified delta. This modification
* is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked()
* is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked()
* routine to do the work.
*/
int
......@@ -1615,7 +1614,6 @@ xfs_mod_incore_sb(
int64_t delta,
int rsvd)
{
unsigned long s;
int status;
/* check for per-cpu counters */
......@@ -1632,9 +1630,9 @@ xfs_mod_incore_sb(
/* FALLTHROUGH */
#endif
default:
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
break;
}
......@@ -1655,7 +1653,6 @@ xfs_mod_incore_sb(
int
xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
{
unsigned long s;
int status=0;
xfs_mod_sb_t *msbp;
......@@ -1663,10 +1660,10 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
* Loop through the array of mod structures and apply each
* individually. If any fail, then back out all those
* which have already been applied. Do all of this within
* the scope of the SB_LOCK so that all of the changes will
* the scope of the m_sb_lock so that all of the changes will
* be atomic.
*/
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
msbp = &msb[0];
for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
/*
......@@ -1680,11 +1677,11 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
case XFS_SBS_IFREE:
case XFS_SBS_FDBLOCKS:
if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
status = xfs_icsb_modify_counters(mp,
msbp->msb_field,
msbp->msb_delta, rsvd);
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
break;
}
/* FALLTHROUGH */
......@@ -1718,12 +1715,12 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
case XFS_SBS_IFREE:
case XFS_SBS_FDBLOCKS:
if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
status = xfs_icsb_modify_counters(mp,
msbp->msb_field,
-(msbp->msb_delta),
rsvd);
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
break;
}
/* FALLTHROUGH */
......@@ -1739,7 +1736,7 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
msbp--;
}
}
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
return status;
}
......@@ -1887,12 +1884,12 @@ xfs_mount_log_sbunit(
*
* Locking rules:
*
* 1. XFS_SB_LOCK() before picking up per-cpu locks
* 1. m_sb_lock before picking up per-cpu locks
* 2. per-cpu locks always picked up via for_each_online_cpu() order
* 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks
* 3. accurate counter sync requires m_sb_lock + per cpu locks
* 4. modifying per-cpu counters requires holding per-cpu lock
* 5. modifying global counters requires holding XFS_SB_LOCK
* 6. enabling or disabling a counter requires holding the XFS_SB_LOCK
* 5. modifying global counters requires holding m_sb_lock
* 6. enabling or disabling a counter requires holding the m_sb_lock
* and _none_ of the per-cpu locks.
*
* Disabled counters are only ever re-enabled by a balance operation
......@@ -1945,7 +1942,7 @@ xfs_icsb_cpu_notify(
* count into the total on the global superblock and
* re-enable the counters. */
xfs_icsb_lock(mp);
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
......@@ -1962,7 +1959,7 @@ xfs_icsb_cpu_notify(
XFS_ICSB_SB_LOCKED, 0);
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
XFS_ICSB_SB_LOCKED, 0);
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
xfs_icsb_unlock(mp);
break;
}
......@@ -2197,7 +2194,7 @@ xfs_icsb_sync_counters_flags(
/* Pass 1: lock all counters */
if ((flags & XFS_ICSB_SB_LOCKED) == 0)
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
xfs_icsb_count(mp, &cnt, flags);
......@@ -2210,7 +2207,7 @@ xfs_icsb_sync_counters_flags(
mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
if ((flags & XFS_ICSB_SB_LOCKED) == 0)
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
}
/*
......@@ -2255,7 +2252,7 @@ xfs_icsb_balance_counter(
uint64_t min = (uint64_t)min_per_cpu;
if (!(flags & XFS_ICSB_SB_LOCKED))
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
/* disable counter and sync counter */
xfs_icsb_disable_counter(mp, field);
......@@ -2289,7 +2286,7 @@ xfs_icsb_balance_counter(
xfs_icsb_enable_counter(mp, field, count, resid);
out:
if (!(flags & XFS_ICSB_SB_LOCKED))
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
}
int
......@@ -2379,15 +2376,15 @@ xfs_icsb_modify_counters(
* running atomically here, we know a rebalance cannot
* be in progress. Hence we can go straight to operating
* on the global superblock. We do not call xfs_mod_incore_sb()
* here even though we need to get the SB_LOCK. Doing so
* here even though we need to get the m_sb_lock. Doing so
* will cause us to re-enter this function and deadlock.
* Hence we get the SB_LOCK ourselves and then call
* Hence we get the m_sb_lock ourselves and then call
* xfs_mod_incore_sb_unlocked() as the unlocked path operates
* directly on the global counters.
*/
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
/*
* Now that we've modified the global superblock, we
......
......@@ -227,7 +227,7 @@ typedef struct xfs_mount {
xfs_ail_entry_t m_ail; /* fs active log item list */
uint m_ail_gen; /* fs AIL generation count */
xfs_sb_t m_sb; /* copy of fs superblock */
lock_t m_sb_lock; /* sb counter mutex */
spinlock_t m_sb_lock; /* sb counter lock */
struct xfs_buf *m_sb_bp; /* buffer for superblock */
char *m_fsname; /* filesystem name */
int m_fsname_len; /* strlen of fs name */
......@@ -503,8 +503,6 @@ typedef struct xfs_mod_sb {
#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock))
#define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock))
#define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock)
#define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s))
extern xfs_mount_t *xfs_mount_init(void);
extern void xfs_mod_sb(xfs_trans_t *, __int64_t);
......
......@@ -49,18 +49,17 @@ xfs_mount_reset_sbqflags(xfs_mount_t *mp)
{
int error;
xfs_trans_t *tp;
unsigned long s;
mp->m_qflags = 0;
/*
* It is OK to look at sb_qflags here in mount path,
* without SB_LOCK.
* without m_sb_lock.
*/
if (mp->m_sb.sb_qflags == 0)
return 0;
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
mp->m_sb.sb_qflags = 0;
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
/*
* if the fs is readonly, let the incore superblock run
......
......@@ -330,7 +330,6 @@ xfs_bump_ino_vers2(
xfs_inode_t *ip)
{
xfs_mount_t *mp;
unsigned long s;
ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE));
ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1);
......@@ -340,13 +339,13 @@ xfs_bump_ino_vers2(
memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
mp = tp->t_mountp;
if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
XFS_SB_VERSION_ADDNLINK(&mp->m_sb);
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
xfs_mod_sb(tp, XFS_SB_VERSIONNUM);
} else {
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
}
}
/* Caller must log the inode */
......
......@@ -854,14 +854,13 @@ xfs_statvfs(
__uint64_t fakeinos;
xfs_extlen_t lsize;
xfs_sb_t *sbp;
unsigned long s;
sbp = &(mp->m_sb);
statp->f_type = XFS_SB_MAGIC;
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
s = XFS_SB_LOCK(mp);
spin_lock(&mp->m_sb_lock);
statp->f_bsize = sbp->sb_blocksize;
lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
statp->f_blocks = sbp->sb_dblocks - lsize;
......@@ -881,7 +880,7 @@ xfs_statvfs(
statp->f_files,
mp->m_maxicount);
statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
XFS_SB_UNLOCK(mp, s);
spin_unlock(&mp->m_sb_lock);
xfs_statvfs_fsid(statp, mp);
statp->f_namelen = MAXNAMELEN - 1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册