提交 0cadda1c 编写于 作者: C Christoph Hellwig 提交者: Alex Elder

xfs: remove duplicate buffer flags

Currently we define aliases for the buffer flags in various
namespaces, which only adds confusion.  Remove all but the XBF_
flags to clean this up a bit.

Note that we still abuse XFS_B_ASYNC/XBF_ASYNC for some non-buffer
uses, but I'll clean that up later.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NDave Chinner <david@fromorbit.com>
Signed-off-by: NAlex Elder <aelder@sgi.com>
上级 a210c1aa
...@@ -1169,7 +1169,7 @@ xfs_bioerror_relse( ...@@ -1169,7 +1169,7 @@ xfs_bioerror_relse(
XFS_BUF_STALE(bp); XFS_BUF_STALE(bp);
XFS_BUF_CLR_IODONE_FUNC(bp); XFS_BUF_CLR_IODONE_FUNC(bp);
XFS_BUF_CLR_BDSTRAT_FUNC(bp); XFS_BUF_CLR_BDSTRAT_FUNC(bp);
if (!(fl & XFS_B_ASYNC)) { if (!(fl & XBF_ASYNC)) {
/* /*
* Mark b_error and B_ERROR _both_. * Mark b_error and B_ERROR _both_.
* Lot's of chunkcache code assumes that. * Lot's of chunkcache code assumes that.
......
...@@ -275,33 +275,19 @@ extern void xfs_buf_terminate(void); ...@@ -275,33 +275,19 @@ extern void xfs_buf_terminate(void);
({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; }) ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
#define XFS_B_ASYNC XBF_ASYNC
#define XFS_B_DELWRI XBF_DELWRI
#define XFS_B_READ XBF_READ
#define XFS_B_WRITE XBF_WRITE
#define XFS_B_STALE XBF_STALE
#define XFS_BUF_TRYLOCK XBF_TRYLOCK
#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
#define XFS_BUF_LOCK XBF_LOCK
#define XFS_BUF_MAPPED XBF_MAPPED
#define BUF_BUSY XBF_DONT_BLOCK
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ #define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XBF_STALE)
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE) #define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
#define XFS_BUF_SUPER_STALE(bp) do { \ #define XFS_BUF_SUPER_STALE(bp) do { \
XFS_BUF_STALE(bp); \ XFS_BUF_STALE(bp); \
xfs_buf_delwri_dequeue(bp); \ xfs_buf_delwri_dequeue(bp); \
XFS_BUF_DONE(bp); \ XFS_BUF_DONE(bp); \
} while (0) } while (0)
#define XFS_BUF_MANAGE XBF_FS_MANAGED
#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED) #define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI) #define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
...@@ -390,7 +376,7 @@ static inline void xfs_buf_relse(xfs_buf_t *bp) ...@@ -390,7 +376,7 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
#define xfs_biomove(bp, off, len, data, rw) \ #define xfs_biomove(bp, off, len, data, rw) \
xfs_buf_iomove((bp), (off), (len), (data), \ xfs_buf_iomove((bp), (off), (len), (data), \
((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ) ((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ)
#define xfs_biozero(bp, off, len) \ #define xfs_biozero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
......
...@@ -79,7 +79,7 @@ xfs_flush_pages( ...@@ -79,7 +79,7 @@ xfs_flush_pages(
xfs_iflags_clear(ip, XFS_ITRUNCATED); xfs_iflags_clear(ip, XFS_ITRUNCATED);
ret = -filemap_fdatawrite(mapping); ret = -filemap_fdatawrite(mapping);
} }
if (flags & XFS_B_ASYNC) if (flags & XBF_ASYNC)
return ret; return ret;
ret2 = xfs_wait_on_pages(ip, first, last); ret2 = xfs_wait_on_pages(ip, first, last);
if (!ret) if (!ret)
......
...@@ -234,7 +234,7 @@ xfs_sync_inode_data( ...@@ -234,7 +234,7 @@ xfs_sync_inode_data(
} }
error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
0 : XFS_B_ASYNC, FI_NONE); 0 : XBF_ASYNC, FI_NONE);
xfs_iunlock(ip, XFS_IOLOCK_SHARED); xfs_iunlock(ip, XFS_IOLOCK_SHARED);
out_wait: out_wait:
...@@ -370,7 +370,7 @@ xfs_sync_fsdata( ...@@ -370,7 +370,7 @@ xfs_sync_fsdata(
if (flags & SYNC_TRYLOCK) { if (flags & SYNC_TRYLOCK) {
ASSERT(!(flags & SYNC_WAIT)); ASSERT(!(flags & SYNC_WAIT));
bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); bp = xfs_getsb(mp, XBF_TRYLOCK);
if (!bp) if (!bp)
goto out; goto out;
......
...@@ -1527,8 +1527,7 @@ xfs_qm_dqflock_pushbuf_wait( ...@@ -1527,8 +1527,7 @@ xfs_qm_dqflock_pushbuf_wait(
* the flush lock when the I/O completes. * the flush lock when the I/O completes.
*/ */
bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno, bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
XFS_QI_DQCHUNKLEN(dqp->q_mount), XFS_QI_DQCHUNKLEN(dqp->q_mount), XBF_TRYLOCK);
XFS_INCORE_TRYLOCK);
if (bp != NULL) { if (bp != NULL) {
if (XFS_BUF_ISDELAYWRITE(bp)) { if (XFS_BUF_ISDELAYWRITE(bp)) {
int error; int error;
......
...@@ -237,8 +237,7 @@ xfs_qm_dquot_logitem_pushbuf( ...@@ -237,8 +237,7 @@ xfs_qm_dquot_logitem_pushbuf(
} }
mp = dqp->q_mount; mp = dqp->q_mount;
bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno, bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
XFS_QI_DQCHUNKLEN(mp), XFS_QI_DQCHUNKLEN(mp), XBF_TRYLOCK);
XFS_INCORE_TRYLOCK);
if (bp != NULL) { if (bp != NULL) {
if (XFS_BUF_ISDELAYWRITE(bp)) { if (XFS_BUF_ISDELAYWRITE(bp)) {
dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
......
...@@ -2180,7 +2180,7 @@ xfs_alloc_read_agf( ...@@ -2180,7 +2180,7 @@ xfs_alloc_read_agf(
ASSERT(agno != NULLAGNUMBER); ASSERT(agno != NULLAGNUMBER);
error = xfs_read_agf(mp, tp, agno, error = xfs_read_agf(mp, tp, agno,
(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0, (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
bpp); bpp);
if (error) if (error)
return error; return error;
......
...@@ -2015,15 +2015,14 @@ xfs_attr_rmtval_get(xfs_da_args_t *args) ...@@ -2015,15 +2015,14 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno, error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
blkcnt, blkcnt, XBF_LOCK | XBF_DONT_BLOCK,
XFS_BUF_LOCK | XBF_DONT_BLOCK,
&bp); &bp);
if (error) if (error)
return(error); return(error);
tmp = (valuelen < XFS_BUF_SIZE(bp)) tmp = (valuelen < XFS_BUF_SIZE(bp))
? valuelen : XFS_BUF_SIZE(bp); ? valuelen : XFS_BUF_SIZE(bp);
xfs_biomove(bp, 0, tmp, dst, XFS_B_READ); xfs_biomove(bp, 0, tmp, dst, XBF_READ);
xfs_buf_relse(bp); xfs_buf_relse(bp);
dst += tmp; dst += tmp;
valuelen -= tmp; valuelen -= tmp;
...@@ -2149,13 +2148,13 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) ...@@ -2149,13 +2148,13 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
XFS_BUF_LOCK | XBF_DONT_BLOCK); XBF_LOCK | XBF_DONT_BLOCK);
ASSERT(bp); ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp)); ASSERT(!XFS_BUF_GETERROR(bp));
tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
XFS_BUF_SIZE(bp); XFS_BUF_SIZE(bp);
xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE); xfs_biomove(bp, 0, tmp, src, XBF_WRITE);
if (tmp < XFS_BUF_SIZE(bp)) if (tmp < XFS_BUF_SIZE(bp))
xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp); xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */ if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
...@@ -2216,8 +2215,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) ...@@ -2216,8 +2215,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
/* /*
* If the "remote" value is in the cache, remove it. * If the "remote" value is in the cache, remove it.
*/ */
bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK);
XFS_INCORE_TRYLOCK);
if (bp) { if (bp) {
XFS_BUF_STALE(bp); XFS_BUF_STALE(bp);
XFS_BUF_UNDELAYWRITE(bp); XFS_BUF_UNDELAYWRITE(bp);
......
...@@ -2950,7 +2950,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, ...@@ -2950,7 +2950,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
map.br_blockcount); map.br_blockcount);
bp = xfs_trans_get_buf(*trans, bp = xfs_trans_get_buf(*trans,
dp->i_mount->m_ddev_targp, dp->i_mount->m_ddev_targp,
dblkno, dblkcnt, XFS_BUF_LOCK); dblkno, dblkcnt, XBF_LOCK);
xfs_trans_binval(*trans, bp); xfs_trans_binval(*trans, bp);
/* /*
* Roll to next transaction. * Roll to next transaction.
......
...@@ -977,7 +977,7 @@ xfs_btree_get_buf_block( ...@@ -977,7 +977,7 @@ xfs_btree_get_buf_block(
xfs_daddr_t d; xfs_daddr_t d;
/* need to sort out how callers deal with failures first */ /* need to sort out how callers deal with failures first */
ASSERT(!(flags & XFS_BUF_TRYLOCK)); ASSERT(!(flags & XBF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr); d = xfs_btree_ptr_to_daddr(cur, ptr);
*bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
...@@ -1008,7 +1008,7 @@ xfs_btree_read_buf_block( ...@@ -1008,7 +1008,7 @@ xfs_btree_read_buf_block(
int error; int error;
/* need to sort out how callers deal with failures first */ /* need to sort out how callers deal with failures first */
ASSERT(!(flags & XFS_BUF_TRYLOCK)); ASSERT(!(flags & XBF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr); d = xfs_btree_ptr_to_daddr(cur, ptr);
error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d, error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
......
...@@ -205,7 +205,7 @@ xfs_ialloc_inode_init( ...@@ -205,7 +205,7 @@ xfs_ialloc_inode_init(
d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster)); d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
mp->m_bsize * blks_per_cluster, mp->m_bsize * blks_per_cluster,
XFS_BUF_LOCK); XBF_LOCK);
ASSERT(fbuf); ASSERT(fbuf);
ASSERT(!XFS_BUF_GETERROR(fbuf)); ASSERT(!XFS_BUF_GETERROR(fbuf));
......
...@@ -151,7 +151,7 @@ xfs_imap_to_bp( ...@@ -151,7 +151,7 @@ xfs_imap_to_bp(
"an error %d on %s. Returning error.", "an error %d on %s. Returning error.",
error, mp->m_fsname); error, mp->m_fsname);
} else { } else {
ASSERT(buf_flags & XFS_BUF_TRYLOCK); ASSERT(buf_flags & XBF_TRYLOCK);
} }
return error; return error;
} }
...@@ -239,7 +239,7 @@ xfs_inotobp( ...@@ -239,7 +239,7 @@ xfs_inotobp(
if (error) if (error)
return error; return error;
error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
if (error) if (error)
return error; return error;
...@@ -285,7 +285,7 @@ xfs_itobp( ...@@ -285,7 +285,7 @@ xfs_itobp(
return error; return error;
if (!bp) { if (!bp) {
ASSERT(buf_flags & XFS_BUF_TRYLOCK); ASSERT(buf_flags & XBF_TRYLOCK);
ASSERT(tp == NULL); ASSERT(tp == NULL);
*bpp = NULL; *bpp = NULL;
return EAGAIN; return EAGAIN;
...@@ -807,7 +807,7 @@ xfs_iread( ...@@ -807,7 +807,7 @@ xfs_iread(
* Get pointers to the on-disk inode and the buffer containing it. * Get pointers to the on-disk inode and the buffer containing it.
*/ */
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
XFS_BUF_LOCK, iget_flags); XBF_LOCK, iget_flags);
if (error) if (error)
return error; return error;
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
...@@ -1751,7 +1751,7 @@ xfs_iunlink( ...@@ -1751,7 +1751,7 @@ xfs_iunlink(
* Here we put the head pointer into our next pointer, * Here we put the head pointer into our next pointer,
* and then we fall through to point the head at us. * and then we fall through to point the head at us.
*/ */
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) if (error)
return error; return error;
...@@ -1833,7 +1833,7 @@ xfs_iunlink_remove( ...@@ -1833,7 +1833,7 @@ xfs_iunlink_remove(
* of dealing with the buffer when there is no need to * of dealing with the buffer when there is no need to
* change it. * change it.
*/ */
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) { if (error) {
cmn_err(CE_WARN, cmn_err(CE_WARN,
"xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
...@@ -1895,7 +1895,7 @@ xfs_iunlink_remove( ...@@ -1895,7 +1895,7 @@ xfs_iunlink_remove(
* Now last_ibp points to the buffer previous to us on * Now last_ibp points to the buffer previous to us on
* the unlinked list. Pull us from the list. * the unlinked list. Pull us from the list.
*/ */
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) { if (error) {
cmn_err(CE_WARN, cmn_err(CE_WARN,
"xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
...@@ -2040,7 +2040,7 @@ xfs_ifree_cluster( ...@@ -2040,7 +2040,7 @@ xfs_ifree_cluster(
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
mp->m_bsize * blks_per_cluster, mp->m_bsize * blks_per_cluster,
XFS_BUF_LOCK); XBF_LOCK);
pre_flushed = 0; pre_flushed = 0;
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
...@@ -2151,7 +2151,7 @@ xfs_ifree( ...@@ -2151,7 +2151,7 @@ xfs_ifree(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK); error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) if (error)
return error; return error;
...@@ -2952,7 +2952,7 @@ xfs_iflush( ...@@ -2952,7 +2952,7 @@ xfs_iflush(
* Get the buffer containing the on-disk inode. * Get the buffer containing the on-disk inode.
*/ */
error = xfs_itobp(mp, NULL, ip, &dip, &bp, error = xfs_itobp(mp, NULL, ip, &dip, &bp,
noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK); noblock ? XBF_TRYLOCK : XBF_LOCK);
if (error || !bp) { if (error || !bp) {
xfs_ifunlock(ip); xfs_ifunlock(ip);
return error; return error;
......
...@@ -785,7 +785,7 @@ xfs_inode_item_pushbuf( ...@@ -785,7 +785,7 @@ xfs_inode_item_pushbuf(
mp = ip->i_mount; mp = ip->i_mount;
bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno, bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK); iip->ili_format.ilf_len, XBF_TRYLOCK);
if (bp != NULL) { if (bp != NULL) {
if (XFS_BUF_ISDELAYWRITE(bp)) { if (XFS_BUF_ISDELAYWRITE(bp)) {
......
...@@ -2184,9 +2184,9 @@ xlog_recover_do_buffer_trans( ...@@ -2184,9 +2184,9 @@ xlog_recover_do_buffer_trans(
} }
mp = log->l_mp; mp = log->l_mp;
buf_flags = XFS_BUF_LOCK; buf_flags = XBF_LOCK;
if (!(flags & XFS_BLI_INODE_BUF)) if (!(flags & XFS_BLI_INODE_BUF))
buf_flags |= XFS_BUF_MAPPED; buf_flags |= XBF_MAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags); bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
if (XFS_BUF_ISERROR(bp)) { if (XFS_BUF_ISERROR(bp)) {
...@@ -2288,7 +2288,7 @@ xlog_recover_do_inode_trans( ...@@ -2288,7 +2288,7 @@ xlog_recover_do_inode_trans(
} }
bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
XFS_BUF_LOCK); XBF_LOCK);
if (XFS_BUF_ISERROR(bp)) { if (XFS_BUF_ISERROR(bp)) {
xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
bp, in_f->ilf_blkno); bp, in_f->ilf_blkno);
...@@ -3146,7 +3146,7 @@ xlog_recover_process_one_iunlink( ...@@ -3146,7 +3146,7 @@ xlog_recover_process_one_iunlink(
/* /*
* Get the on disk inode to find the next inode in the bucket. * Get the on disk inode to find the next inode in the bucket.
*/ */
error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK); error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
if (error) if (error)
goto fail_iput; goto fail_iput;
......
...@@ -665,7 +665,7 @@ xfs_readsb(xfs_mount_t *mp, int flags) ...@@ -665,7 +665,7 @@ xfs_readsb(xfs_mount_t *mp, int flags)
* access to the superblock. * access to the superblock.
*/ */
sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED; extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size), bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
extra_flags); extra_flags);
...@@ -1969,7 +1969,7 @@ xfs_getsb( ...@@ -1969,7 +1969,7 @@ xfs_getsb(
ASSERT(mp->m_sb_bp != NULL); ASSERT(mp->m_sb_bp != NULL);
bp = mp->m_sb_bp; bp = mp->m_sb_bp;
if (flags & XFS_BUF_TRYLOCK) { if (flags & XBF_TRYLOCK) {
if (!XFS_BUF_CPSEMA(bp)) { if (!XFS_BUF_CPSEMA(bp)) {
return NULL; return NULL;
} }
......
...@@ -75,13 +75,14 @@ xfs_trans_get_buf(xfs_trans_t *tp, ...@@ -75,13 +75,14 @@ xfs_trans_get_buf(xfs_trans_t *tp,
xfs_buf_log_item_t *bip; xfs_buf_log_item_t *bip;
if (flags == 0) if (flags == 0)
flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; flags = XBF_LOCK | XBF_MAPPED;
/* /*
* Default to a normal get_buf() call if the tp is NULL. * Default to a normal get_buf() call if the tp is NULL.
*/ */
if (tp == NULL) if (tp == NULL)
return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY); return xfs_buf_get(target_dev, blkno, len,
flags | XBF_DONT_BLOCK);
/* /*
* If we find the buffer in the cache with this transaction * If we find the buffer in the cache with this transaction
...@@ -117,14 +118,14 @@ xfs_trans_get_buf(xfs_trans_t *tp, ...@@ -117,14 +118,14 @@ xfs_trans_get_buf(xfs_trans_t *tp,
} }
/* /*
* We always specify the BUF_BUSY flag within a transaction so * We always specify the XBF_DONT_BLOCK flag within a transaction
* that get_buf does not try to push out a delayed write buffer * so that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the * which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can * buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause * easily deadlock with our current transaction as well as cause
* us to run out of stack space. * us to run out of stack space.
*/ */
bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY); bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) { if (bp == NULL) {
return NULL; return NULL;
} }
...@@ -290,15 +291,15 @@ xfs_trans_read_buf( ...@@ -290,15 +291,15 @@ xfs_trans_read_buf(
int error; int error;
if (flags == 0) if (flags == 0)
flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; flags = XBF_LOCK | XBF_MAPPED;
/* /*
* Default to a normal get_buf() call if the tp is NULL. * Default to a normal get_buf() call if the tp is NULL.
*/ */
if (tp == NULL) { if (tp == NULL) {
bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY); bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
if (!bp) if (!bp)
return (flags & XFS_BUF_TRYLOCK) ? return (flags & XBF_TRYLOCK) ?
EAGAIN : XFS_ERROR(ENOMEM); EAGAIN : XFS_ERROR(ENOMEM);
if (XFS_BUF_GETERROR(bp) != 0) { if (XFS_BUF_GETERROR(bp) != 0) {
...@@ -385,14 +386,14 @@ xfs_trans_read_buf( ...@@ -385,14 +386,14 @@ xfs_trans_read_buf(
} }
/* /*
* We always specify the BUF_BUSY flag within a transaction so * We always specify the XBF_DONT_BLOCK flag within a transaction
* that get_buf does not try to push out a delayed write buffer * so that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the * which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can * buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause * easily deadlock with our current transaction as well as cause
* us to run out of stack space. * us to run out of stack space.
*/ */
bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY); bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) { if (bp == NULL) {
*bpp = NULL; *bpp = NULL;
return 0; return 0;
...@@ -472,8 +473,8 @@ xfs_trans_read_buf( ...@@ -472,8 +473,8 @@ xfs_trans_read_buf(
if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp); cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
#endif #endif
ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) !=
(XFS_B_STALE|XFS_B_DELWRI)); (XBF_STALE|XBF_DELWRI));
trace_xfs_trans_read_buf_shut(bp, _RET_IP_); trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
xfs_buf_relse(bp); xfs_buf_relse(bp);
......
...@@ -256,7 +256,7 @@ xfs_setattr( ...@@ -256,7 +256,7 @@ xfs_setattr(
iattr->ia_size > ip->i_d.di_size) { iattr->ia_size > ip->i_d.di_size) {
code = xfs_flush_pages(ip, code = xfs_flush_pages(ip,
ip->i_d.di_size, iattr->ia_size, ip->i_d.di_size, iattr->ia_size,
XFS_B_ASYNC, FI_NONE); XBF_ASYNC, FI_NONE);
} }
/* wait for all I/O to complete */ /* wait for all I/O to complete */
...@@ -1096,7 +1096,7 @@ xfs_release( ...@@ -1096,7 +1096,7 @@ xfs_release(
*/ */
truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
} }
if (ip->i_d.di_nlink != 0) { if (ip->i_d.di_nlink != 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册