提交 b524bfee 编写于 作者: C Christoph Hellwig 提交者: Lachlan McIlroy

[XFS] refactor xfs_btree_readahead

From: Dave Chinner <dgc@sgi.com>

Refactor xfs_btree_readahead to make it more readable:

(a) remove the inline xfs_btree_readahead wrapper and move all checks out

of line into the main routine.

(b) factor out helpers for short/long form btrees

(c) move check for root in inodes from the callers into
xfs_btree_readahead

[hch: split out from a big patch and minor cleanups]

SGI-PV: 985583

SGI-Modid: xfs-linux-melb:xfs-kern:32182a
Signed-off-by: NChristoph Hellwig <hch@infradead.org>
Signed-off-by: NLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: NBill O'Donnell <billodo@sgi.com>
Signed-off-by: NDavid Chinner <david@fromorbit.com>
上级 e99ab90d
...@@ -1721,8 +1721,9 @@ xfs_bmbt_decrement( ...@@ -1721,8 +1721,9 @@ xfs_bmbt_decrement(
XFS_BMBT_TRACE_CURSOR(cur, ENTRY); XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
XFS_BMBT_TRACE_ARGI(cur, level); XFS_BMBT_TRACE_ARGI(cur, level);
ASSERT(level < cur->bc_nlevels); ASSERT(level < cur->bc_nlevels);
if (level < cur->bc_nlevels - 1)
xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
if (--cur->bc_ptrs[level] > 0) { if (--cur->bc_ptrs[level] > 0) {
XFS_BMBT_TRACE_CURSOR(cur, EXIT); XFS_BMBT_TRACE_CURSOR(cur, EXIT);
*stat = 1; *stat = 1;
...@@ -1743,8 +1744,7 @@ xfs_bmbt_decrement( ...@@ -1743,8 +1744,7 @@ xfs_bmbt_decrement(
for (lev = level + 1; lev < cur->bc_nlevels; lev++) { for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
if (--cur->bc_ptrs[lev] > 0) if (--cur->bc_ptrs[lev] > 0)
break; break;
if (lev < cur->bc_nlevels - 1) xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
} }
if (lev == cur->bc_nlevels) { if (lev == cur->bc_nlevels) {
XFS_BMBT_TRACE_CURSOR(cur, EXIT); XFS_BMBT_TRACE_CURSOR(cur, EXIT);
...@@ -1995,8 +1995,8 @@ xfs_bmbt_increment( ...@@ -1995,8 +1995,8 @@ xfs_bmbt_increment(
XFS_BMBT_TRACE_CURSOR(cur, ENTRY); XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
XFS_BMBT_TRACE_ARGI(cur, level); XFS_BMBT_TRACE_ARGI(cur, level);
ASSERT(level < cur->bc_nlevels); ASSERT(level < cur->bc_nlevels);
if (level < cur->bc_nlevels - 1)
xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
block = xfs_bmbt_get_block(cur, level, &bp); block = xfs_bmbt_get_block(cur, level, &bp);
#ifdef DEBUG #ifdef DEBUG
if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
...@@ -2024,8 +2024,7 @@ xfs_bmbt_increment( ...@@ -2024,8 +2024,7 @@ xfs_bmbt_increment(
#endif #endif
if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs))
break; break;
if (lev < cur->bc_nlevels - 1) xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
} }
if (lev == cur->bc_nlevels) { if (lev == cur->bc_nlevels) {
XFS_BMBT_TRACE_CURSOR(cur, EXIT); XFS_BMBT_TRACE_CURSOR(cur, EXIT);
......
...@@ -725,66 +725,84 @@ xfs_btree_reada_bufs( ...@@ -725,66 +725,84 @@ xfs_btree_reada_bufs(
xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count);
} }
STATIC int
xfs_btree_readahead_lblock(
struct xfs_btree_cur *cur,
int lr,
struct xfs_btree_block *block)
{
int rval = 0;
xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) {
xfs_btree_reada_bufl(cur->bc_mp, left, 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLDFSBNO) {
xfs_btree_reada_bufl(cur->bc_mp, right, 1);
rval++;
}
return rval;
}
STATIC int
xfs_btree_readahead_sblock(
struct xfs_btree_cur *cur,
int lr,
struct xfs_btree_block *block)
{
int rval = 0;
xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
left, 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
right, 1);
rval++;
}
return rval;
}
/* /*
* Read-ahead btree blocks, at the given level. * Read-ahead btree blocks, at the given level.
* Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
*/ */
int int
xfs_btree_readahead_core( xfs_btree_readahead(
xfs_btree_cur_t *cur, /* btree cursor */ struct xfs_btree_cur *cur, /* btree cursor */
int lev, /* level in btree */ int lev, /* level in btree */
int lr) /* left/right bits */ int lr) /* left/right bits */
{ {
xfs_alloc_block_t *a; struct xfs_btree_block *block;
xfs_bmbt_block_t *b;
xfs_inobt_block_t *i; /*
int rval = 0; * No readahead needed if we are at the root level and the
* btree root is stored in the inode.
*/
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(lev == cur->bc_nlevels - 1))
return 0;
if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
return 0;
ASSERT(cur->bc_bufs[lev] != NULL);
cur->bc_ra[lev] |= lr; cur->bc_ra[lev] |= lr;
switch (cur->bc_btnum) { block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
case XFS_BTNUM_BNO:
case XFS_BTNUM_CNT: if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); return xfs_btree_readahead_lblock(cur, lr, block);
if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(a->bb_leftsib) != NULLAGBLOCK) { return xfs_btree_readahead_sblock(cur, lr, block);
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
be32_to_cpu(a->bb_leftsib), 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(a->bb_rightsib) != NULLAGBLOCK) {
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
be32_to_cpu(a->bb_rightsib), 1);
rval++;
}
break;
case XFS_BTNUM_BMAP:
b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]);
if ((lr & XFS_BTCUR_LEFTRA) && be64_to_cpu(b->bb_leftsib) != NULLDFSBNO) {
xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_leftsib), 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && be64_to_cpu(b->bb_rightsib) != NULLDFSBNO) {
xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_rightsib), 1);
rval++;
}
break;
case XFS_BTNUM_INO:
i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]);
if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) {
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
be32_to_cpu(i->bb_leftsib), 1);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) {
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
be32_to_cpu(i->bb_rightsib), 1);
rval++;
}
break;
default:
ASSERT(0);
}
return rval;
} }
/* /*
......
...@@ -421,23 +421,10 @@ xfs_btree_reada_bufs( ...@@ -421,23 +421,10 @@ xfs_btree_reada_bufs(
* Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
*/ */
int /* readahead block count */ int /* readahead block count */
xfs_btree_readahead_core(
xfs_btree_cur_t *cur, /* btree cursor */
int lev, /* level in btree */
int lr); /* left/right bits */
static inline int /* readahead block count */
xfs_btree_readahead( xfs_btree_readahead(
xfs_btree_cur_t *cur, /* btree cursor */ xfs_btree_cur_t *cur, /* btree cursor */
int lev, /* level in btree */ int lev, /* level in btree */
int lr) /* left/right bits */ int lr); /* left/right bits */
{
if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
return 0;
return xfs_btree_readahead_core(cur, lev, lr);
}
/* /*
* Set the buffer for level "lev" in the cursor to bp, releasing * Set the buffer for level "lev" in the cursor to bp, releasing
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册