提交 c517b3aa 编写于 作者: D Darrick J. Wong

xfs: shorten xfs_scrub_ prefix

Shorten all the metadata checking xfs_scrub_ prefixes to xchk_.  After
this, the only xfs_scrub* symbols are the ones that pertain to both
scrub and repair.  Whitespace damage will be fixed in a subsequent
patch.  There are no functional changes.
Signed-off-by: NDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: NBrian Foster <bfoster@redhat.com>
上级 ef97ef26
此差异已折叠。
...@@ -28,11 +28,11 @@ ...@@ -28,11 +28,11 @@
* Set us up to scrub free space btrees. * Set us up to scrub free space btrees.
*/ */
int int
xfs_scrub_setup_ag_allocbt( xchk_setup_ag_allocbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
return xfs_scrub_setup_ag_btree(sc, ip, false); return xchk_setup_ag_btree(sc, ip, false);
} }
/* Free space btree scrubber. */ /* Free space btree scrubber. */
...@@ -41,7 +41,7 @@ xfs_scrub_setup_ag_allocbt( ...@@ -41,7 +41,7 @@ xfs_scrub_setup_ag_allocbt(
* bnobt/cntbt record, respectively. * bnobt/cntbt record, respectively.
*/ */
STATIC void STATIC void
xfs_scrub_allocbt_xref_other( xchk_allocbt_xref_other(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
...@@ -56,32 +56,32 @@ xfs_scrub_allocbt_xref_other( ...@@ -56,32 +56,32 @@ xfs_scrub_allocbt_xref_other(
pcur = &sc->sa.cnt_cur; pcur = &sc->sa.cnt_cur;
else else
pcur = &sc->sa.bno_cur; pcur = &sc->sa.bno_cur;
if (!*pcur || xfs_scrub_skip_xref(sc->sm)) if (!*pcur || xchk_skip_xref(sc->sm))
return; return;
error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec); error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec);
if (!xfs_scrub_should_check_xref(sc, &error, pcur)) if (!xchk_should_check_xref(sc, &error, pcur))
return; return;
if (!has_otherrec) { if (!has_otherrec) {
xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return; return;
} }
error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec); error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec);
if (!xfs_scrub_should_check_xref(sc, &error, pcur)) if (!xchk_should_check_xref(sc, &error, pcur))
return; return;
if (!has_otherrec) { if (!has_otherrec) {
xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return; return;
} }
if (fbno != agbno || flen != len) if (fbno != agbno || flen != len)
xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); xchk_btree_xref_set_corrupt(sc, *pcur, 0);
} }
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_allocbt_xref( xchk_allocbt_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
...@@ -89,16 +89,16 @@ xfs_scrub_allocbt_xref( ...@@ -89,16 +89,16 @@ xfs_scrub_allocbt_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return; return;
xfs_scrub_allocbt_xref_other(sc, agbno, len); xchk_allocbt_xref_other(sc, agbno, len);
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len); xchk_xref_is_not_inode_chunk(sc, agbno, len);
xfs_scrub_xref_has_no_owner(sc, agbno, len); xchk_xref_has_no_owner(sc, agbno, len);
xfs_scrub_xref_is_not_shared(sc, agbno, len); xchk_xref_is_not_shared(sc, agbno, len);
} }
/* Scrub a bnobt/cntbt record. */ /* Scrub a bnobt/cntbt record. */
STATIC int STATIC int
xfs_scrub_allocbt_rec( xchk_allocbt_rec(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec) union xfs_btree_rec *rec)
{ {
struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_mount *mp = bs->cur->bc_mp;
...@@ -113,16 +113,16 @@ xfs_scrub_allocbt_rec( ...@@ -113,16 +113,16 @@ xfs_scrub_allocbt_rec(
if (bno + len <= bno || if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) || !xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1)) !xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
xfs_scrub_allocbt_xref(bs->sc, bno, len); xchk_allocbt_xref(bs->sc, bno, len);
return error; return error;
} }
/* Scrub the freespace btrees for some AG. */ /* Scrub the freespace btrees for some AG. */
STATIC int STATIC int
xfs_scrub_allocbt( xchk_allocbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_btnum_t which) xfs_btnum_t which)
{ {
...@@ -131,26 +131,26 @@ xfs_scrub_allocbt( ...@@ -131,26 +131,26 @@ xfs_scrub_allocbt(
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur; cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur;
return xfs_scrub_btree(sc, cur, xfs_scrub_allocbt_rec, &oinfo, NULL); return xchk_btree(sc, cur, xchk_allocbt_rec, &oinfo, NULL);
} }
int int
xfs_scrub_bnobt( xchk_bnobt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_allocbt(sc, XFS_BTNUM_BNO); return xchk_allocbt(sc, XFS_BTNUM_BNO);
} }
int int
xfs_scrub_cntbt( xchk_cntbt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_allocbt(sc, XFS_BTNUM_CNT); return xchk_allocbt(sc, XFS_BTNUM_CNT);
} }
/* xref check that the extent is not free */ /* xref check that the extent is not free */
void void
xfs_scrub_xref_is_used_space( xchk_xref_is_used_space(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
...@@ -158,12 +158,12 @@ xfs_scrub_xref_is_used_space( ...@@ -158,12 +158,12 @@ xfs_scrub_xref_is_used_space(
bool is_freesp; bool is_freesp;
int error; int error;
if (!sc->sa.bno_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm))
return; return;
error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp); error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return; return;
if (is_freesp) if (is_freesp)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
} }
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
/* Set us up to scrub an inode's extended attributes. */ /* Set us up to scrub an inode's extended attributes. */
int int
xfs_scrub_setup_xattr( xchk_setup_xattr(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
...@@ -50,12 +50,12 @@ xfs_scrub_setup_xattr( ...@@ -50,12 +50,12 @@ xfs_scrub_setup_xattr(
if (!sc->buf) if (!sc->buf)
return -ENOMEM; return -ENOMEM;
return xfs_scrub_setup_inode_contents(sc, ip, 0); return xchk_setup_inode_contents(sc, ip, 0);
} }
/* Extended Attributes */ /* Extended Attributes */
struct xfs_scrub_xattr { struct xchk_xattr {
struct xfs_attr_list_context context; struct xfs_attr_list_context context;
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
}; };
...@@ -69,22 +69,22 @@ struct xfs_scrub_xattr { ...@@ -69,22 +69,22 @@ struct xfs_scrub_xattr {
* or if we get more or less data than we expected. * or if we get more or less data than we expected.
*/ */
static void static void
xfs_scrub_xattr_listent( xchk_xattr_listent(
struct xfs_attr_list_context *context, struct xfs_attr_list_context *context,
int flags, int flags,
unsigned char *name, unsigned char *name,
int namelen, int namelen,
int valuelen) int valuelen)
{ {
struct xfs_scrub_xattr *sx; struct xchk_xattr *sx;
struct xfs_da_args args = { NULL }; struct xfs_da_args args = { NULL };
int error = 0; int error = 0;
sx = container_of(context, struct xfs_scrub_xattr, context); sx = container_of(context, struct xchk_xattr, context);
if (flags & XFS_ATTR_INCOMPLETE) { if (flags & XFS_ATTR_INCOMPLETE) {
/* Incomplete attr key, just mark the inode for preening. */ /* Incomplete attr key, just mark the inode for preening. */
xfs_scrub_ino_set_preen(sx->sc, context->dp->i_ino); xchk_ino_set_preen(sx->sc, context->dp->i_ino);
return; return;
} }
...@@ -106,11 +106,11 @@ xfs_scrub_xattr_listent( ...@@ -106,11 +106,11 @@ xfs_scrub_xattr_listent(
error = xfs_attr_get_ilocked(context->dp, &args); error = xfs_attr_get_ilocked(context->dp, &args);
if (error == -EEXIST) if (error == -EEXIST)
error = 0; error = 0;
if (!xfs_scrub_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno, if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
&error)) &error))
goto fail_xref; goto fail_xref;
if (args.valuelen != valuelen) if (args.valuelen != valuelen)
xfs_scrub_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
args.blkno); args.blkno);
fail_xref: fail_xref:
if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
...@@ -126,7 +126,7 @@ xfs_scrub_xattr_listent( ...@@ -126,7 +126,7 @@ xfs_scrub_xattr_listent(
* the smallest address * the smallest address
*/ */
STATIC bool STATIC bool
xfs_scrub_xattr_set_map( xchk_xattr_set_map(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
unsigned long *map, unsigned long *map,
unsigned int start, unsigned int start,
...@@ -154,7 +154,7 @@ xfs_scrub_xattr_set_map( ...@@ -154,7 +154,7 @@ xfs_scrub_xattr_set_map(
* attr freemap has problems or points to used space. * attr freemap has problems or points to used space.
*/ */
STATIC bool STATIC bool
xfs_scrub_xattr_check_freemap( xchk_xattr_check_freemap(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
unsigned long *map, unsigned long *map,
struct xfs_attr3_icleaf_hdr *leafhdr) struct xfs_attr3_icleaf_hdr *leafhdr)
...@@ -168,7 +168,7 @@ xfs_scrub_xattr_check_freemap( ...@@ -168,7 +168,7 @@ xfs_scrub_xattr_check_freemap(
freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize); freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize);
bitmap_zero(freemap, mapsize); bitmap_zero(freemap, mapsize);
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (!xfs_scrub_xattr_set_map(sc, freemap, if (!xchk_xattr_set_map(sc, freemap,
leafhdr->freemap[i].base, leafhdr->freemap[i].base,
leafhdr->freemap[i].size)) leafhdr->freemap[i].size))
return false; return false;
...@@ -184,8 +184,8 @@ xfs_scrub_xattr_check_freemap( ...@@ -184,8 +184,8 @@ xfs_scrub_xattr_check_freemap(
* Returns the number of bytes used for the name/value data. * Returns the number of bytes used for the name/value data.
*/ */
STATIC void STATIC void
xfs_scrub_xattr_entry( xchk_xattr_entry(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
char *buf_end, char *buf_end,
struct xfs_attr_leafblock *leaf, struct xfs_attr_leafblock *leaf,
...@@ -204,17 +204,17 @@ xfs_scrub_xattr_entry( ...@@ -204,17 +204,17 @@ xfs_scrub_xattr_entry(
unsigned int namesize; unsigned int namesize;
if (ent->pad2 != 0) if (ent->pad2 != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
/* Hash values in order? */ /* Hash values in order? */
if (be32_to_cpu(ent->hashval) < *last_hashval) if (be32_to_cpu(ent->hashval) < *last_hashval)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
*last_hashval = be32_to_cpu(ent->hashval); *last_hashval = be32_to_cpu(ent->hashval);
nameidx = be16_to_cpu(ent->nameidx); nameidx = be16_to_cpu(ent->nameidx);
if (nameidx < leafhdr->firstused || if (nameidx < leafhdr->firstused ||
nameidx >= mp->m_attr_geo->blksize) { nameidx >= mp->m_attr_geo->blksize) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return; return;
} }
...@@ -225,27 +225,27 @@ xfs_scrub_xattr_entry( ...@@ -225,27 +225,27 @@ xfs_scrub_xattr_entry(
be16_to_cpu(lentry->valuelen)); be16_to_cpu(lentry->valuelen));
name_end = (char *)lentry + namesize; name_end = (char *)lentry + namesize;
if (lentry->namelen == 0) if (lentry->namelen == 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} else { } else {
rentry = xfs_attr3_leaf_name_remote(leaf, idx); rentry = xfs_attr3_leaf_name_remote(leaf, idx);
namesize = xfs_attr_leaf_entsize_remote(rentry->namelen); namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
name_end = (char *)rentry + namesize; name_end = (char *)rentry + namesize;
if (rentry->namelen == 0 || rentry->valueblk == 0) if (rentry->namelen == 0 || rentry->valueblk == 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} }
if (name_end > buf_end) if (name_end > buf_end)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, nameidx, namesize)) if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
*usedbytes += namesize; *usedbytes += namesize;
} }
/* Scrub an attribute leaf. */ /* Scrub an attribute leaf. */
STATIC int STATIC int
xfs_scrub_xattr_block( xchk_xattr_block(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level) int level)
{ {
struct xfs_attr3_icleaf_hdr leafhdr; struct xfs_attr3_icleaf_hdr leafhdr;
...@@ -275,10 +275,10 @@ xfs_scrub_xattr_block( ...@@ -275,10 +275,10 @@ xfs_scrub_xattr_block(
if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 || if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
leaf->hdr.info.hdr.pad != 0) leaf->hdr.info.hdr.pad != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} else { } else {
if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0) if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} }
/* Check the leaf header */ /* Check the leaf header */
...@@ -286,44 +286,44 @@ xfs_scrub_xattr_block( ...@@ -286,44 +286,44 @@ xfs_scrub_xattr_block(
hdrsize = xfs_attr3_leaf_hdr_size(leaf); hdrsize = xfs_attr3_leaf_hdr_size(leaf);
if (leafhdr.usedbytes > mp->m_attr_geo->blksize) if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused > mp->m_attr_geo->blksize) if (leafhdr.firstused > mp->m_attr_geo->blksize)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused < hdrsize) if (leafhdr.firstused < hdrsize)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, 0, hdrsize)) if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
entries = xfs_attr3_leaf_entryp(leaf); entries = xfs_attr3_leaf_entryp(leaf);
if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused) if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize; buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) { for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
/* Mark the leaf entry itself. */ /* Mark the leaf entry itself. */
off = (char *)ent - (char *)leaf; off = (char *)ent - (char *)leaf;
if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, off, if (!xchk_xattr_set_map(ds->sc, usedmap, off,
sizeof(xfs_attr_leaf_entry_t))) { sizeof(xfs_attr_leaf_entry_t))) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out; goto out;
} }
/* Check the entry and nameval. */ /* Check the entry and nameval. */
xfs_scrub_xattr_entry(ds, level, buf_end, leaf, &leafhdr, xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
usedmap, ent, i, &usedbytes, &last_hashval); usedmap, ent, i, &usedbytes, &last_hashval);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
} }
if (!xfs_scrub_xattr_check_freemap(ds->sc, usedmap, &leafhdr)) if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (leafhdr.usedbytes != usedbytes) if (leafhdr.usedbytes != usedbytes)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
out: out:
return 0; return 0;
...@@ -331,8 +331,8 @@ xfs_scrub_xattr_block( ...@@ -331,8 +331,8 @@ xfs_scrub_xattr_block(
/* Scrub a attribute btree record. */ /* Scrub a attribute btree record. */
STATIC int STATIC int
xfs_scrub_xattr_rec( xchk_xattr_rec(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
void *rec) void *rec)
{ {
...@@ -352,14 +352,14 @@ xfs_scrub_xattr_rec( ...@@ -352,14 +352,14 @@ xfs_scrub_xattr_rec(
blk = &ds->state->path.blk[level]; blk = &ds->state->path.blk[level];
/* Check the whole block, if necessary. */ /* Check the whole block, if necessary. */
error = xfs_scrub_xattr_block(ds, level); error = xchk_xattr_block(ds, level);
if (error) if (error)
goto out; goto out;
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
/* Check the hash of the entry. */ /* Check the hash of the entry. */
error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval); error = xchk_da_btree_hash(ds, level, &ent->hashval);
if (error) if (error)
goto out; goto out;
...@@ -368,7 +368,7 @@ xfs_scrub_xattr_rec( ...@@ -368,7 +368,7 @@ xfs_scrub_xattr_rec(
hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr); hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
nameidx = be16_to_cpu(ent->nameidx); nameidx = be16_to_cpu(ent->nameidx);
if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) { if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out; goto out;
} }
...@@ -377,12 +377,12 @@ xfs_scrub_xattr_rec( ...@@ -377,12 +377,12 @@ xfs_scrub_xattr_rec(
badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE | badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
XFS_ATTR_INCOMPLETE); XFS_ATTR_INCOMPLETE);
if ((ent->flags & badflags) != 0) if ((ent->flags & badflags) != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (ent->flags & XFS_ATTR_LOCAL) { if (ent->flags & XFS_ATTR_LOCAL) {
lentry = (struct xfs_attr_leaf_name_local *) lentry = (struct xfs_attr_leaf_name_local *)
(((char *)bp->b_addr) + nameidx); (((char *)bp->b_addr) + nameidx);
if (lentry->namelen <= 0) { if (lentry->namelen <= 0) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out; goto out;
} }
calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen); calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
...@@ -390,13 +390,13 @@ xfs_scrub_xattr_rec( ...@@ -390,13 +390,13 @@ xfs_scrub_xattr_rec(
rentry = (struct xfs_attr_leaf_name_remote *) rentry = (struct xfs_attr_leaf_name_remote *)
(((char *)bp->b_addr) + nameidx); (((char *)bp->b_addr) + nameidx);
if (rentry->namelen <= 0) { if (rentry->namelen <= 0) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out; goto out;
} }
calc_hash = xfs_da_hashname(rentry->name, rentry->namelen); calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
} }
if (calc_hash != hash) if (calc_hash != hash)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
out: out:
return error; return error;
...@@ -404,10 +404,10 @@ xfs_scrub_xattr_rec( ...@@ -404,10 +404,10 @@ xfs_scrub_xattr_rec(
/* Scrub the extended attribute metadata. */ /* Scrub the extended attribute metadata. */
int int
xfs_scrub_xattr( xchk_xattr(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_scrub_xattr sx; struct xchk_xattr sx;
struct attrlist_cursor_kern cursor = { 0 }; struct attrlist_cursor_kern cursor = { 0 };
xfs_dablk_t last_checked = -1U; xfs_dablk_t last_checked = -1U;
int error = 0; int error = 0;
...@@ -417,7 +417,7 @@ xfs_scrub_xattr( ...@@ -417,7 +417,7 @@ xfs_scrub_xattr(
memset(&sx, 0, sizeof(sx)); memset(&sx, 0, sizeof(sx));
/* Check attribute tree structure */ /* Check attribute tree structure */
error = xfs_scrub_da_btree(sc, XFS_ATTR_FORK, xfs_scrub_xattr_rec, error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec,
&last_checked); &last_checked);
if (error) if (error)
goto out; goto out;
...@@ -429,7 +429,7 @@ xfs_scrub_xattr( ...@@ -429,7 +429,7 @@ xfs_scrub_xattr(
sx.context.dp = sc->ip; sx.context.dp = sc->ip;
sx.context.cursor = &cursor; sx.context.cursor = &cursor;
sx.context.resynch = 1; sx.context.resynch = 1;
sx.context.put_listent = xfs_scrub_xattr_listent; sx.context.put_listent = xchk_xattr_listent;
sx.context.tp = sc->tp; sx.context.tp = sc->tp;
sx.context.flags = ATTR_INCOMPLETE; sx.context.flags = ATTR_INCOMPLETE;
sx.sc = sc; sx.sc = sc;
...@@ -438,7 +438,7 @@ xfs_scrub_xattr( ...@@ -438,7 +438,7 @@ xfs_scrub_xattr(
* Look up every xattr in this file by name. * Look up every xattr in this file by name.
* *
* Use the backend implementation of xfs_attr_list to call * Use the backend implementation of xfs_attr_list to call
* xfs_scrub_xattr_listent on every attribute key in this inode. * xchk_xattr_listent on every attribute key in this inode.
* In other words, we use the same iterator/callback mechanism * In other words, we use the same iterator/callback mechanism
* that listattr uses to scrub extended attributes, though in our * that listattr uses to scrub extended attributes, though in our
* _listent function, we check the value of the attribute. * _listent function, we check the value of the attribute.
...@@ -451,7 +451,7 @@ xfs_scrub_xattr( ...@@ -451,7 +451,7 @@ xfs_scrub_xattr(
* locking order. * locking order.
*/ */
error = xfs_attr_list_int_ilocked(&sx.context); error = xfs_attr_list_int_ilocked(&sx.context);
if (!xfs_scrub_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error)) if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
goto out; goto out;
out: out:
return error; return error;
......
...@@ -33,13 +33,13 @@ ...@@ -33,13 +33,13 @@
/* Set us up with an inode's bmap. */ /* Set us up with an inode's bmap. */
int int
xfs_scrub_setup_inode_bmap( xchk_setup_inode_bmap(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
int error; int error;
error = xfs_scrub_get_inode(sc, ip); error = xchk_get_inode(sc, ip);
if (error) if (error)
goto out; goto out;
...@@ -60,7 +60,7 @@ xfs_scrub_setup_inode_bmap( ...@@ -60,7 +60,7 @@ xfs_scrub_setup_inode_bmap(
} }
/* Got the inode, lock it and we're ready to go. */ /* Got the inode, lock it and we're ready to go. */
error = xfs_scrub_trans_alloc(sc, 0); error = xchk_trans_alloc(sc, 0);
if (error) if (error)
goto out; goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL; sc->ilock_flags |= XFS_ILOCK_EXCL;
...@@ -78,7 +78,7 @@ xfs_scrub_setup_inode_bmap( ...@@ -78,7 +78,7 @@ xfs_scrub_setup_inode_bmap(
* is in btree format. * is in btree format.
*/ */
struct xfs_scrub_bmap_info { struct xchk_bmap_info {
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
xfs_fileoff_t lastoff; xfs_fileoff_t lastoff;
bool is_rt; bool is_rt;
...@@ -88,8 +88,8 @@ struct xfs_scrub_bmap_info { ...@@ -88,8 +88,8 @@ struct xfs_scrub_bmap_info {
/* Look for a corresponding rmap for this irec. */ /* Look for a corresponding rmap for this irec. */
static inline bool static inline bool
xfs_scrub_bmap_get_rmap( xchk_bmap_get_rmap(
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec, struct xfs_bmbt_irec *irec,
xfs_agblock_t agbno, xfs_agblock_t agbno,
uint64_t owner, uint64_t owner,
...@@ -120,7 +120,7 @@ xfs_scrub_bmap_get_rmap( ...@@ -120,7 +120,7 @@ xfs_scrub_bmap_get_rmap(
if (info->is_shared) { if (info->is_shared) {
error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno, error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
owner, offset, rflags, rmap, &has_rmap); owner, offset, rflags, rmap, &has_rmap);
if (!xfs_scrub_should_check_xref(info->sc, &error, if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur)) &info->sc->sa.rmap_cur))
return false; return false;
goto out; goto out;
...@@ -131,28 +131,28 @@ xfs_scrub_bmap_get_rmap( ...@@ -131,28 +131,28 @@ xfs_scrub_bmap_get_rmap(
*/ */
error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner, error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
offset, rflags, &has_rmap); offset, rflags, &has_rmap);
if (!xfs_scrub_should_check_xref(info->sc, &error, if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur)) &info->sc->sa.rmap_cur))
return false; return false;
if (!has_rmap) if (!has_rmap)
goto out; goto out;
error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap); error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
if (!xfs_scrub_should_check_xref(info->sc, &error, if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur)) &info->sc->sa.rmap_cur))
return false; return false;
out: out:
if (!has_rmap) if (!has_rmap)
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
return has_rmap; return has_rmap;
} }
/* Make sure that we have rmapbt records for this extent. */ /* Make sure that we have rmapbt records for this extent. */
STATIC void STATIC void
xfs_scrub_bmap_xref_rmap( xchk_bmap_xref_rmap(
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec, struct xfs_bmbt_irec *irec,
xfs_agblock_t agbno) xfs_agblock_t agbno)
{ {
...@@ -160,7 +160,7 @@ xfs_scrub_bmap_xref_rmap( ...@@ -160,7 +160,7 @@ xfs_scrub_bmap_xref_rmap(
unsigned long long rmap_end; unsigned long long rmap_end;
uint64_t owner; uint64_t owner;
if (!info->sc->sa.rmap_cur || xfs_scrub_skip_xref(info->sc->sm)) if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
return; return;
if (info->whichfork == XFS_COW_FORK) if (info->whichfork == XFS_COW_FORK)
...@@ -169,14 +169,14 @@ xfs_scrub_bmap_xref_rmap( ...@@ -169,14 +169,14 @@ xfs_scrub_bmap_xref_rmap(
owner = info->sc->ip->i_ino; owner = info->sc->ip->i_ino;
/* Find the rmap record for this irec. */ /* Find the rmap record for this irec. */
if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap)) if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
return; return;
/* Check the rmap. */ /* Check the rmap. */
rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount; rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
if (rmap.rm_startblock > agbno || if (rmap.rm_startblock > agbno ||
agbno + irec->br_blockcount > rmap_end) agbno + irec->br_blockcount > rmap_end)
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* /*
...@@ -189,12 +189,12 @@ xfs_scrub_bmap_xref_rmap( ...@@ -189,12 +189,12 @@ xfs_scrub_bmap_xref_rmap(
rmap.rm_blockcount; rmap.rm_blockcount;
if (rmap.rm_offset > irec->br_startoff || if (rmap.rm_offset > irec->br_startoff ||
irec->br_startoff + irec->br_blockcount > rmap_end) irec->br_startoff + irec->br_blockcount > rmap_end)
xfs_scrub_fblock_xref_set_corrupt(info->sc, xchk_fblock_xref_set_corrupt(info->sc,
info->whichfork, irec->br_startoff); info->whichfork, irec->br_startoff);
} }
if (rmap.rm_owner != owner) if (rmap.rm_owner != owner)
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* /*
...@@ -207,22 +207,22 @@ xfs_scrub_bmap_xref_rmap( ...@@ -207,22 +207,22 @@ xfs_scrub_bmap_xref_rmap(
if (owner != XFS_RMAP_OWN_COW && if (owner != XFS_RMAP_OWN_COW &&
irec->br_state == XFS_EXT_UNWRITTEN && irec->br_state == XFS_EXT_UNWRITTEN &&
!(rmap.rm_flags & XFS_RMAP_UNWRITTEN)) !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (info->whichfork == XFS_ATTR_FORK && if (info->whichfork == XFS_ATTR_FORK &&
!(rmap.rm_flags & XFS_RMAP_ATTR_FORK)) !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK) if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
} }
/* Cross-reference a single rtdev extent record. */ /* Cross-reference a single rtdev extent record. */
STATIC void STATIC void
xfs_scrub_bmap_rt_extent_xref( xchk_bmap_rt_extent_xref(
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_inode *ip, struct xfs_inode *ip,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_bmbt_irec *irec) struct xfs_bmbt_irec *irec)
...@@ -230,14 +230,14 @@ xfs_scrub_bmap_rt_extent_xref( ...@@ -230,14 +230,14 @@ xfs_scrub_bmap_rt_extent_xref(
if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return; return;
xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock, xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
irec->br_blockcount); irec->br_blockcount);
} }
/* Cross-reference a single datadev extent record. */ /* Cross-reference a single datadev extent record. */
STATIC void STATIC void
xfs_scrub_bmap_extent_xref( xchk_bmap_extent_xref(
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_inode *ip, struct xfs_inode *ip,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_bmbt_irec *irec) struct xfs_bmbt_irec *irec)
...@@ -255,38 +255,38 @@ xfs_scrub_bmap_extent_xref( ...@@ -255,38 +255,38 @@ xfs_scrub_bmap_extent_xref(
agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
len = irec->br_blockcount; len = irec->br_blockcount;
error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa); error = xchk_ag_init(info->sc, agno, &info->sc->sa);
if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork, if (!xchk_fblock_process_error(info->sc, info->whichfork,
irec->br_startoff, &error)) irec->br_startoff, &error))
return; return;
xfs_scrub_xref_is_used_space(info->sc, agbno, len); xchk_xref_is_used_space(info->sc, agbno, len);
xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len); xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
xfs_scrub_bmap_xref_rmap(info, irec, agbno); xchk_bmap_xref_rmap(info, irec, agbno);
switch (info->whichfork) { switch (info->whichfork) {
case XFS_DATA_FORK: case XFS_DATA_FORK:
if (xfs_is_reflink_inode(info->sc->ip)) if (xfs_is_reflink_inode(info->sc->ip))
break; break;
/* fall through */ /* fall through */
case XFS_ATTR_FORK: case XFS_ATTR_FORK:
xfs_scrub_xref_is_not_shared(info->sc, agbno, xchk_xref_is_not_shared(info->sc, agbno,
irec->br_blockcount); irec->br_blockcount);
break; break;
case XFS_COW_FORK: case XFS_COW_FORK:
xfs_scrub_xref_is_cow_staging(info->sc, agbno, xchk_xref_is_cow_staging(info->sc, agbno,
irec->br_blockcount); irec->br_blockcount);
break; break;
} }
xfs_scrub_ag_free(info->sc, &info->sc->sa); xchk_ag_free(info->sc, &info->sc->sa);
} }
/* Scrub a single extent record. */ /* Scrub a single extent record. */
STATIC int STATIC int
xfs_scrub_bmap_extent( xchk_bmap_extent(
struct xfs_inode *ip, struct xfs_inode *ip,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec) struct xfs_bmbt_irec *irec)
{ {
struct xfs_mount *mp = info->sc->mp; struct xfs_mount *mp = info->sc->mp;
...@@ -302,12 +302,12 @@ xfs_scrub_bmap_extent( ...@@ -302,12 +302,12 @@ xfs_scrub_bmap_extent(
* from the incore list, for which there is no ordering check. * from the incore list, for which there is no ordering check.
*/ */
if (irec->br_startoff < info->lastoff) if (irec->br_startoff < info->lastoff)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* There should never be a "hole" extent in either extent list. */ /* There should never be a "hole" extent in either extent list. */
if (irec->br_startblock == HOLESTARTBLOCK) if (irec->br_startblock == HOLESTARTBLOCK)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* /*
...@@ -315,40 +315,40 @@ xfs_scrub_bmap_extent( ...@@ -315,40 +315,40 @@ xfs_scrub_bmap_extent(
* in-core extent scan, and we should never see these in the bmbt. * in-core extent scan, and we should never see these in the bmbt.
*/ */
if (isnullstartblock(irec->br_startblock)) if (isnullstartblock(irec->br_startblock))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* Make sure the extent points to a valid place. */ /* Make sure the extent points to a valid place. */
if (irec->br_blockcount > MAXEXTLEN) if (irec->br_blockcount > MAXEXTLEN)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock) if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
end = irec->br_startblock + irec->br_blockcount - 1; end = irec->br_startblock + irec->br_blockcount - 1;
if (info->is_rt && if (info->is_rt &&
(!xfs_verify_rtbno(mp, irec->br_startblock) || (!xfs_verify_rtbno(mp, irec->br_startblock) ||
!xfs_verify_rtbno(mp, end))) !xfs_verify_rtbno(mp, end)))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (!info->is_rt && if (!info->is_rt &&
(!xfs_verify_fsbno(mp, irec->br_startblock) || (!xfs_verify_fsbno(mp, irec->br_startblock) ||
!xfs_verify_fsbno(mp, end) || !xfs_verify_fsbno(mp, end) ||
XFS_FSB_TO_AGNO(mp, irec->br_startblock) != XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
XFS_FSB_TO_AGNO(mp, end))) XFS_FSB_TO_AGNO(mp, end)))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* We don't allow unwritten extents on attr forks. */ /* We don't allow unwritten extents on attr forks. */
if (irec->br_state == XFS_EXT_UNWRITTEN && if (irec->br_state == XFS_EXT_UNWRITTEN &&
info->whichfork == XFS_ATTR_FORK) info->whichfork == XFS_ATTR_FORK)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (info->is_rt) if (info->is_rt)
xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec); xchk_bmap_rt_extent_xref(info, ip, cur, irec);
else else
xfs_scrub_bmap_extent_xref(info, ip, cur, irec); xchk_bmap_extent_xref(info, ip, cur, irec);
info->lastoff = irec->br_startoff + irec->br_blockcount; info->lastoff = irec->br_startoff + irec->br_blockcount;
return error; return error;
...@@ -356,12 +356,12 @@ xfs_scrub_bmap_extent( ...@@ -356,12 +356,12 @@ xfs_scrub_bmap_extent(
/* Scrub a bmbt record. */ /* Scrub a bmbt record. */
STATIC int STATIC int
xfs_scrub_bmapbt_rec( xchk_bmapbt_rec(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec) union xfs_btree_rec *rec)
{ {
struct xfs_bmbt_irec irec; struct xfs_bmbt_irec irec;
struct xfs_scrub_bmap_info *info = bs->private; struct xchk_bmap_info *info = bs->private;
struct xfs_inode *ip = bs->cur->bc_private.b.ip; struct xfs_inode *ip = bs->cur->bc_private.b.ip;
struct xfs_buf *bp = NULL; struct xfs_buf *bp = NULL;
struct xfs_btree_block *block; struct xfs_btree_block *block;
...@@ -378,22 +378,22 @@ xfs_scrub_bmapbt_rec( ...@@ -378,22 +378,22 @@ xfs_scrub_bmapbt_rec(
block = xfs_btree_get_block(bs->cur, i, &bp); block = xfs_btree_get_block(bs->cur, i, &bp);
owner = be64_to_cpu(block->bb_u.l.bb_owner); owner = be64_to_cpu(block->bb_u.l.bb_owner);
if (owner != ip->i_ino) if (owner != ip->i_ino)
xfs_scrub_fblock_set_corrupt(bs->sc, xchk_fblock_set_corrupt(bs->sc,
info->whichfork, 0); info->whichfork, 0);
} }
} }
/* Set up the in-core record and scrub it. */ /* Set up the in-core record and scrub it. */
xfs_bmbt_disk_get_all(&rec->bmbt, &irec); xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec); return xchk_bmap_extent(ip, bs->cur, info, &irec);
} }
/* Scan the btree records. */ /* Scan the btree records. */
STATIC int STATIC int
xfs_scrub_bmap_btree( xchk_bmap_btree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
struct xfs_scrub_bmap_info *info) struct xchk_bmap_info *info)
{ {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
...@@ -403,12 +403,12 @@ xfs_scrub_bmap_btree( ...@@ -403,12 +403,12 @@ xfs_scrub_bmap_btree(
cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork); cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info); error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
xfs_btree_del_cursor(cur, error); xfs_btree_del_cursor(cur, error);
return error; return error;
} }
struct xfs_scrub_bmap_check_rmap_info { struct xchk_bmap_check_rmap_info {
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
int whichfork; int whichfork;
struct xfs_iext_cursor icur; struct xfs_iext_cursor icur;
...@@ -416,13 +416,13 @@ struct xfs_scrub_bmap_check_rmap_info { ...@@ -416,13 +416,13 @@ struct xfs_scrub_bmap_check_rmap_info {
/* Can we find bmaps that fit this rmap? */ /* Can we find bmaps that fit this rmap? */
STATIC int STATIC int
xfs_scrub_bmap_check_rmap( xchk_bmap_check_rmap(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec, struct xfs_rmap_irec *rec,
void *priv) void *priv)
{ {
struct xfs_bmbt_irec irec; struct xfs_bmbt_irec irec;
struct xfs_scrub_bmap_check_rmap_info *sbcri = priv; struct xchk_bmap_check_rmap_info *sbcri = priv;
struct xfs_ifork *ifp; struct xfs_ifork *ifp;
struct xfs_scrub_context *sc = sbcri->sc; struct xfs_scrub_context *sc = sbcri->sc;
bool have_map; bool have_map;
...@@ -439,14 +439,14 @@ xfs_scrub_bmap_check_rmap( ...@@ -439,14 +439,14 @@ xfs_scrub_bmap_check_rmap(
/* Now look up the bmbt record. */ /* Now look up the bmbt record. */
ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork); ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
if (!ifp) { if (!ifp) {
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
goto out; goto out;
} }
have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset, have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
&sbcri->icur, &irec); &sbcri->icur, &irec);
if (!have_map) if (!have_map)
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
/* /*
* bmap extent record lengths are constrained to 2^21 blocks in length * bmap extent record lengths are constrained to 2^21 blocks in length
...@@ -457,14 +457,14 @@ xfs_scrub_bmap_check_rmap( ...@@ -457,14 +457,14 @@ xfs_scrub_bmap_check_rmap(
*/ */
while (have_map) { while (have_map) {
if (irec.br_startoff != rec->rm_offset) if (irec.br_startoff != rec->rm_offset)
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp, if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
cur->bc_private.a.agno, rec->rm_startblock)) cur->bc_private.a.agno, rec->rm_startblock))
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
if (irec.br_blockcount > rec->rm_blockcount) if (irec.br_blockcount > rec->rm_blockcount)
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
break; break;
...@@ -475,7 +475,7 @@ xfs_scrub_bmap_check_rmap( ...@@ -475,7 +475,7 @@ xfs_scrub_bmap_check_rmap(
break; break;
have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec); have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
if (!have_map) if (!have_map)
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
} }
...@@ -487,12 +487,12 @@ xfs_scrub_bmap_check_rmap( ...@@ -487,12 +487,12 @@ xfs_scrub_bmap_check_rmap(
/* Make sure each rmap has a corresponding bmbt entry. */ /* Make sure each rmap has a corresponding bmbt entry. */
STATIC int STATIC int
xfs_scrub_bmap_check_ag_rmaps( xchk_bmap_check_ag_rmaps(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_agnumber_t agno) xfs_agnumber_t agno)
{ {
struct xfs_scrub_bmap_check_rmap_info sbcri; struct xchk_bmap_check_rmap_info sbcri;
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
struct xfs_buf *agf; struct xfs_buf *agf;
int error; int error;
...@@ -509,7 +509,7 @@ xfs_scrub_bmap_check_ag_rmaps( ...@@ -509,7 +509,7 @@ xfs_scrub_bmap_check_ag_rmaps(
sbcri.sc = sc; sbcri.sc = sc;
sbcri.whichfork = whichfork; sbcri.whichfork = whichfork;
error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri); error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
if (error == XFS_BTREE_QUERY_RANGE_ABORT) if (error == XFS_BTREE_QUERY_RANGE_ABORT)
error = 0; error = 0;
...@@ -521,7 +521,7 @@ xfs_scrub_bmap_check_ag_rmaps( ...@@ -521,7 +521,7 @@ xfs_scrub_bmap_check_ag_rmaps(
/* Make sure each rmap has a corresponding bmbt entry. */ /* Make sure each rmap has a corresponding bmbt entry. */
STATIC int STATIC int
xfs_scrub_bmap_check_rmaps( xchk_bmap_check_rmaps(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork) int whichfork)
{ {
...@@ -561,7 +561,7 @@ xfs_scrub_bmap_check_rmaps( ...@@ -561,7 +561,7 @@ xfs_scrub_bmap_check_rmaps(
return 0; return 0;
for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) { for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno); error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
if (error) if (error)
return error; return error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
...@@ -578,12 +578,12 @@ xfs_scrub_bmap_check_rmaps( ...@@ -578,12 +578,12 @@ xfs_scrub_bmap_check_rmaps(
* Then we unconditionally scan the incore extent cache. * Then we unconditionally scan the incore extent cache.
*/ */
STATIC int STATIC int
xfs_scrub_bmap( xchk_bmap(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork) int whichfork)
{ {
struct xfs_bmbt_irec irec; struct xfs_bmbt_irec irec;
struct xfs_scrub_bmap_info info = { NULL }; struct xchk_bmap_info info = { NULL };
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_inode *ip = sc->ip; struct xfs_inode *ip = sc->ip;
struct xfs_ifork *ifp; struct xfs_ifork *ifp;
...@@ -605,7 +605,7 @@ xfs_scrub_bmap( ...@@ -605,7 +605,7 @@ xfs_scrub_bmap(
goto out; goto out;
/* No CoW forks on non-reflink inodes/filesystems. */ /* No CoW forks on non-reflink inodes/filesystems. */
if (!xfs_is_reflink_inode(ip)) { if (!xfs_is_reflink_inode(ip)) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
goto out; goto out;
} }
break; break;
...@@ -614,7 +614,7 @@ xfs_scrub_bmap( ...@@ -614,7 +614,7 @@ xfs_scrub_bmap(
goto out_check_rmap; goto out_check_rmap;
if (!xfs_sb_version_hasattr(&mp->m_sb) && if (!xfs_sb_version_hasattr(&mp->m_sb) &&
!xfs_sb_version_hasattr2(&mp->m_sb)) !xfs_sb_version_hasattr2(&mp->m_sb))
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
break; break;
default: default:
ASSERT(whichfork == XFS_DATA_FORK); ASSERT(whichfork == XFS_DATA_FORK);
...@@ -630,22 +630,22 @@ xfs_scrub_bmap( ...@@ -630,22 +630,22 @@ xfs_scrub_bmap(
goto out; goto out;
case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_EXTENTS:
if (!(ifp->if_flags & XFS_IFEXTENTS)) { if (!(ifp->if_flags & XFS_IFEXTENTS)) {
xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out; goto out;
} }
break; break;
case XFS_DINODE_FMT_BTREE: case XFS_DINODE_FMT_BTREE:
if (whichfork == XFS_COW_FORK) { if (whichfork == XFS_COW_FORK) {
xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out; goto out;
} }
error = xfs_scrub_bmap_btree(sc, whichfork, &info); error = xchk_bmap_btree(sc, whichfork, &info);
if (error) if (error)
goto out; goto out;
break; break;
default: default:
xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out; goto out;
} }
...@@ -655,37 +655,37 @@ xfs_scrub_bmap( ...@@ -655,37 +655,37 @@ xfs_scrub_bmap(
/* Now try to scrub the in-memory extent list. */ /* Now try to scrub the in-memory extent list. */
if (!(ifp->if_flags & XFS_IFEXTENTS)) { if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(sc->tp, ip, whichfork); error = xfs_iread_extents(sc->tp, ip, whichfork);
if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error)) if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
goto out; goto out;
} }
/* Find the offset of the last extent in the mapping. */ /* Find the offset of the last extent in the mapping. */
error = xfs_bmap_last_offset(ip, &endoff, whichfork); error = xfs_bmap_last_offset(ip, &endoff, whichfork);
if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error)) if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
goto out; goto out;
/* Scrub extent records. */ /* Scrub extent records. */
info.lastoff = 0; info.lastoff = 0;
ifp = XFS_IFORK_PTR(ip, whichfork); ifp = XFS_IFORK_PTR(ip, whichfork);
for_each_xfs_iext(ifp, &icur, &irec) { for_each_xfs_iext(ifp, &icur, &irec) {
if (xfs_scrub_should_terminate(sc, &error) || if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break; break;
if (isnullstartblock(irec.br_startblock)) if (isnullstartblock(irec.br_startblock))
continue; continue;
if (irec.br_startoff >= endoff) { if (irec.br_startoff >= endoff) {
xfs_scrub_fblock_set_corrupt(sc, whichfork, xchk_fblock_set_corrupt(sc, whichfork,
irec.br_startoff); irec.br_startoff);
goto out; goto out;
} }
error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec); error = xchk_bmap_extent(ip, NULL, &info, &irec);
if (error) if (error)
goto out; goto out;
} }
out_check_rmap: out_check_rmap:
error = xfs_scrub_bmap_check_rmaps(sc, whichfork); error = xchk_bmap_check_rmaps(sc, whichfork);
if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error)) if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
goto out; goto out;
out: out:
return error; return error;
...@@ -693,27 +693,27 @@ xfs_scrub_bmap( ...@@ -693,27 +693,27 @@ xfs_scrub_bmap(
/* Scrub an inode's data fork. */ /* Scrub an inode's data fork. */
int int
xfs_scrub_bmap_data( xchk_bmap_data(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_bmap(sc, XFS_DATA_FORK); return xchk_bmap(sc, XFS_DATA_FORK);
} }
/* Scrub an inode's attr fork. */ /* Scrub an inode's attr fork. */
int int
xfs_scrub_bmap_attr( xchk_bmap_attr(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_bmap(sc, XFS_ATTR_FORK); return xchk_bmap(sc, XFS_ATTR_FORK);
} }
/* Scrub an inode's CoW fork. */ /* Scrub an inode's CoW fork. */
int int
xfs_scrub_bmap_cow( xchk_bmap_cow(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
if (!xfs_is_reflink_inode(sc->ip)) if (!xfs_is_reflink_inode(sc->ip))
return -ENOENT; return -ENOENT;
return xfs_scrub_bmap(sc, XFS_COW_FORK); return xchk_bmap(sc, XFS_COW_FORK);
} }
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* operational errors in common.c. * operational errors in common.c.
*/ */
static bool static bool
__xfs_scrub_btree_process_error( __xchk_btree_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level, int level,
...@@ -43,7 +43,7 @@ __xfs_scrub_btree_process_error( ...@@ -43,7 +43,7 @@ __xfs_scrub_btree_process_error(
switch (*error) { switch (*error) {
case -EDEADLOCK: case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */ /* Used to restart an op with deadlock avoidance. */
trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break; break;
case -EFSBADCRC: case -EFSBADCRC:
case -EFSCORRUPTED: case -EFSCORRUPTED:
...@@ -53,10 +53,10 @@ __xfs_scrub_btree_process_error( ...@@ -53,10 +53,10 @@ __xfs_scrub_btree_process_error(
/* fall through */ /* fall through */
default: default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
trace_xfs_scrub_ifork_btree_op_error(sc, cur, level, trace_xchk_ifork_btree_op_error(sc, cur, level,
*error, ret_ip); *error, ret_ip);
else else
trace_xfs_scrub_btree_op_error(sc, cur, level, trace_xchk_btree_op_error(sc, cur, level,
*error, ret_ip); *error, ret_ip);
break; break;
} }
...@@ -64,30 +64,30 @@ __xfs_scrub_btree_process_error( ...@@ -64,30 +64,30 @@ __xfs_scrub_btree_process_error(
} }
bool bool
xfs_scrub_btree_process_error( xchk_btree_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level, int level,
int *error) int *error)
{ {
return __xfs_scrub_btree_process_error(sc, cur, level, error, return __xchk_btree_process_error(sc, cur, level, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address); XFS_SCRUB_OFLAG_CORRUPT, __return_address);
} }
bool bool
xfs_scrub_btree_xref_process_error( xchk_btree_xref_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level, int level,
int *error) int *error)
{ {
return __xfs_scrub_btree_process_error(sc, cur, level, error, return __xchk_btree_process_error(sc, cur, level, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address); XFS_SCRUB_OFLAG_XFAIL, __return_address);
} }
/* Record btree block corruption. */ /* Record btree block corruption. */
static void static void
__xfs_scrub_btree_set_corrupt( __xchk_btree_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level, int level,
...@@ -97,30 +97,30 @@ __xfs_scrub_btree_set_corrupt( ...@@ -97,30 +97,30 @@ __xfs_scrub_btree_set_corrupt(
sc->sm->sm_flags |= errflag; sc->sm->sm_flags |= errflag;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
trace_xfs_scrub_ifork_btree_error(sc, cur, level, trace_xchk_ifork_btree_error(sc, cur, level,
ret_ip); ret_ip);
else else
trace_xfs_scrub_btree_error(sc, cur, level, trace_xchk_btree_error(sc, cur, level,
ret_ip); ret_ip);
} }
void void
xfs_scrub_btree_set_corrupt( xchk_btree_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level) int level)
{ {
__xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT, __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
__return_address); __return_address);
} }
void void
xfs_scrub_btree_xref_set_corrupt( xchk_btree_xref_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level) int level)
{ {
__xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT, __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
__return_address); __return_address);
} }
...@@ -129,8 +129,8 @@ xfs_scrub_btree_xref_set_corrupt( ...@@ -129,8 +129,8 @@ xfs_scrub_btree_xref_set_corrupt(
* keys. * keys.
*/ */
STATIC void STATIC void
xfs_scrub_btree_rec( xchk_btree_rec(
struct xfs_scrub_btree *bs) struct xchk_btree *bs)
{ {
struct xfs_btree_cur *cur = bs->cur; struct xfs_btree_cur *cur = bs->cur;
union xfs_btree_rec *rec; union xfs_btree_rec *rec;
...@@ -144,11 +144,11 @@ xfs_scrub_btree_rec( ...@@ -144,11 +144,11 @@ xfs_scrub_btree_rec(
block = xfs_btree_get_block(cur, 0, &bp); block = xfs_btree_get_block(cur, 0, &bp);
rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block); rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
trace_xfs_scrub_btree_rec(bs->sc, cur, 0); trace_xchk_btree_rec(bs->sc, cur, 0);
/* If this isn't the first record, are they in order? */ /* If this isn't the first record, are they in order? */
if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec)) if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
xfs_scrub_btree_set_corrupt(bs->sc, cur, 0); xchk_btree_set_corrupt(bs->sc, cur, 0);
bs->firstrec = false; bs->firstrec = false;
memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len); memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
...@@ -160,7 +160,7 @@ xfs_scrub_btree_rec( ...@@ -160,7 +160,7 @@ xfs_scrub_btree_rec(
keyblock = xfs_btree_get_block(cur, 1, &bp); keyblock = xfs_btree_get_block(cur, 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock); keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0) if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return; return;
...@@ -169,7 +169,7 @@ xfs_scrub_btree_rec( ...@@ -169,7 +169,7 @@ xfs_scrub_btree_rec(
cur->bc_ops->init_high_key_from_rec(&hkey, rec); cur->bc_ops->init_high_key_from_rec(&hkey, rec);
keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock); keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0) if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
} }
/* /*
...@@ -177,8 +177,8 @@ xfs_scrub_btree_rec( ...@@ -177,8 +177,8 @@ xfs_scrub_btree_rec(
* keys. * keys.
*/ */
STATIC void STATIC void
xfs_scrub_btree_key( xchk_btree_key(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level) int level)
{ {
struct xfs_btree_cur *cur = bs->cur; struct xfs_btree_cur *cur = bs->cur;
...@@ -191,12 +191,12 @@ xfs_scrub_btree_key( ...@@ -191,12 +191,12 @@ xfs_scrub_btree_key(
block = xfs_btree_get_block(cur, level, &bp); block = xfs_btree_get_block(cur, level, &bp);
key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block); key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
trace_xfs_scrub_btree_key(bs->sc, cur, level); trace_xchk_btree_key(bs->sc, cur, level);
/* If this isn't the first key, are they in order? */ /* If this isn't the first key, are they in order? */
if (!bs->firstkey[level] && if (!bs->firstkey[level] &&
!cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key)) !cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key))
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
bs->firstkey[level] = false; bs->firstkey[level] = false;
memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len); memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len);
...@@ -207,7 +207,7 @@ xfs_scrub_btree_key( ...@@ -207,7 +207,7 @@ xfs_scrub_btree_key(
keyblock = xfs_btree_get_block(cur, level + 1, &bp); keyblock = xfs_btree_get_block(cur, level + 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock); keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0) if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return; return;
...@@ -216,7 +216,7 @@ xfs_scrub_btree_key( ...@@ -216,7 +216,7 @@ xfs_scrub_btree_key(
key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block); key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock); keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0) if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
} }
/* /*
...@@ -224,8 +224,8 @@ xfs_scrub_btree_key( ...@@ -224,8 +224,8 @@ xfs_scrub_btree_key(
* Callers do not need to set the corrupt flag. * Callers do not need to set the corrupt flag.
*/ */
static bool static bool
xfs_scrub_btree_ptr_ok( xchk_btree_ptr_ok(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
union xfs_btree_ptr *ptr) union xfs_btree_ptr *ptr)
{ {
...@@ -242,15 +242,15 @@ xfs_scrub_btree_ptr_ok( ...@@ -242,15 +242,15 @@ xfs_scrub_btree_ptr_ok(
else else
res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level); res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
if (!res) if (!res)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level); xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return res; return res;
} }
/* Check that a btree block's sibling matches what we expect it. */ /* Check that a btree block's sibling matches what we expect it. */
STATIC int STATIC int
xfs_scrub_btree_block_check_sibling( xchk_btree_block_check_sibling(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
int direction, int direction,
union xfs_btree_ptr *sibling) union xfs_btree_ptr *sibling)
...@@ -264,7 +264,7 @@ xfs_scrub_btree_block_check_sibling( ...@@ -264,7 +264,7 @@ xfs_scrub_btree_block_check_sibling(
int error; int error;
error = xfs_btree_dup_cursor(cur, &ncur); error = xfs_btree_dup_cursor(cur, &ncur);
if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error) || if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error) ||
!ncur) !ncur)
return error; return error;
...@@ -278,7 +278,7 @@ xfs_scrub_btree_block_check_sibling( ...@@ -278,7 +278,7 @@ xfs_scrub_btree_block_check_sibling(
else else
error = xfs_btree_decrement(ncur, level + 1, &success); error = xfs_btree_decrement(ncur, level + 1, &success);
if (error == 0 && success) if (error == 0 && success)
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
error = 0; error = 0;
goto out; goto out;
} }
...@@ -288,23 +288,23 @@ xfs_scrub_btree_block_check_sibling( ...@@ -288,23 +288,23 @@ xfs_scrub_btree_block_check_sibling(
error = xfs_btree_increment(ncur, level + 1, &success); error = xfs_btree_increment(ncur, level + 1, &success);
else else
error = xfs_btree_decrement(ncur, level + 1, &success); error = xfs_btree_decrement(ncur, level + 1, &success);
if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error)) if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error))
goto out; goto out;
if (!success) { if (!success) {
xfs_scrub_btree_set_corrupt(bs->sc, cur, level + 1); xchk_btree_set_corrupt(bs->sc, cur, level + 1);
goto out; goto out;
} }
/* Compare upper level pointer to sibling pointer. */ /* Compare upper level pointer to sibling pointer. */
pblock = xfs_btree_get_block(ncur, level + 1, &pbp); pblock = xfs_btree_get_block(ncur, level + 1, &pbp);
pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock); pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock);
if (!xfs_scrub_btree_ptr_ok(bs, level + 1, pp)) if (!xchk_btree_ptr_ok(bs, level + 1, pp))
goto out; goto out;
if (pbp) if (pbp)
xfs_scrub_buffer_recheck(bs->sc, pbp); xchk_buffer_recheck(bs->sc, pbp);
if (xfs_btree_diff_two_ptrs(cur, pp, sibling)) if (xfs_btree_diff_two_ptrs(cur, pp, sibling))
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
out: out:
xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR); xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR);
return error; return error;
...@@ -312,8 +312,8 @@ xfs_scrub_btree_block_check_sibling( ...@@ -312,8 +312,8 @@ xfs_scrub_btree_block_check_sibling(
/* Check the siblings of a btree block. */ /* Check the siblings of a btree block. */
STATIC int STATIC int
xfs_scrub_btree_block_check_siblings( xchk_btree_block_check_siblings(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
struct xfs_btree_block *block) struct xfs_btree_block *block)
{ {
struct xfs_btree_cur *cur = bs->cur; struct xfs_btree_cur *cur = bs->cur;
...@@ -330,7 +330,7 @@ xfs_scrub_btree_block_check_siblings( ...@@ -330,7 +330,7 @@ xfs_scrub_btree_block_check_siblings(
if (level == cur->bc_nlevels - 1) { if (level == cur->bc_nlevels - 1) {
if (!xfs_btree_ptr_is_null(cur, &leftsib) || if (!xfs_btree_ptr_is_null(cur, &leftsib) ||
!xfs_btree_ptr_is_null(cur, &rightsib)) !xfs_btree_ptr_is_null(cur, &rightsib))
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
goto out; goto out;
} }
...@@ -339,10 +339,10 @@ xfs_scrub_btree_block_check_siblings( ...@@ -339,10 +339,10 @@ xfs_scrub_btree_block_check_siblings(
* parent level pointers? * parent level pointers?
* (These function absorbs error codes for us.) * (These function absorbs error codes for us.)
*/ */
error = xfs_scrub_btree_block_check_sibling(bs, level, -1, &leftsib); error = xchk_btree_block_check_sibling(bs, level, -1, &leftsib);
if (error) if (error)
return error; return error;
error = xfs_scrub_btree_block_check_sibling(bs, level, 1, &rightsib); error = xchk_btree_block_check_sibling(bs, level, 1, &rightsib);
if (error) if (error)
return error; return error;
out: out:
...@@ -360,8 +360,8 @@ struct check_owner { ...@@ -360,8 +360,8 @@ struct check_owner {
* an rmap record for it. * an rmap record for it.
*/ */
STATIC int STATIC int
xfs_scrub_btree_check_block_owner( xchk_btree_check_block_owner(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
xfs_daddr_t daddr) xfs_daddr_t daddr)
{ {
...@@ -380,13 +380,13 @@ xfs_scrub_btree_check_block_owner( ...@@ -380,13 +380,13 @@ xfs_scrub_btree_check_block_owner(
init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS; init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
if (init_sa) { if (init_sa) {
error = xfs_scrub_ag_init(bs->sc, agno, &bs->sc->sa); error = xchk_ag_init(bs->sc, agno, &bs->sc->sa);
if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur, if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
level, &error)) level, &error))
return error; return error;
} }
xfs_scrub_xref_is_used_space(bs->sc, agbno, 1); xchk_xref_is_used_space(bs->sc, agbno, 1);
/* /*
* The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we * The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we
* have to nullify it (to shut down further block owner checks) if * have to nullify it (to shut down further block owner checks) if
...@@ -395,20 +395,20 @@ xfs_scrub_btree_check_block_owner( ...@@ -395,20 +395,20 @@ xfs_scrub_btree_check_block_owner(
if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO) if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
bs->cur = NULL; bs->cur = NULL;
xfs_scrub_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo); xchk_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP) if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP)
bs->cur = NULL; bs->cur = NULL;
if (init_sa) if (init_sa)
xfs_scrub_ag_free(bs->sc, &bs->sc->sa); xchk_ag_free(bs->sc, &bs->sc->sa);
return error; return error;
} }
/* Check the owner of a btree block. */ /* Check the owner of a btree block. */
STATIC int STATIC int
xfs_scrub_btree_check_owner( xchk_btree_check_owner(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
...@@ -437,7 +437,7 @@ xfs_scrub_btree_check_owner( ...@@ -437,7 +437,7 @@ xfs_scrub_btree_check_owner(
return 0; return 0;
} }
return xfs_scrub_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp)); return xchk_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
} }
/* /*
...@@ -445,8 +445,8 @@ xfs_scrub_btree_check_owner( ...@@ -445,8 +445,8 @@ xfs_scrub_btree_check_owner(
* special blocks that don't require that. * special blocks that don't require that.
*/ */
STATIC void STATIC void
xfs_scrub_btree_check_minrecs( xchk_btree_check_minrecs(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
struct xfs_btree_block *block) struct xfs_btree_block *block)
{ {
...@@ -475,7 +475,7 @@ xfs_scrub_btree_check_minrecs( ...@@ -475,7 +475,7 @@ xfs_scrub_btree_check_minrecs(
if (level >= ok_level) if (level >= ok_level)
return; return;
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level); xchk_btree_set_corrupt(bs->sc, bs->cur, level);
} }
/* /*
...@@ -483,8 +483,8 @@ xfs_scrub_btree_check_minrecs( ...@@ -483,8 +483,8 @@ xfs_scrub_btree_check_minrecs(
* and buffer pointers (if applicable) if they're ok to use. * and buffer pointers (if applicable) if they're ok to use.
*/ */
STATIC int STATIC int
xfs_scrub_btree_get_block( xchk_btree_get_block(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
union xfs_btree_ptr *pp, union xfs_btree_ptr *pp,
struct xfs_btree_block **pblock, struct xfs_btree_block **pblock,
...@@ -497,7 +497,7 @@ xfs_scrub_btree_get_block( ...@@ -497,7 +497,7 @@ xfs_scrub_btree_get_block(
*pbp = NULL; *pbp = NULL;
error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock); error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock);
if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, level, &error) || if (!xchk_btree_process_error(bs->sc, bs->cur, level, &error) ||
!*pblock) !*pblock)
return error; return error;
...@@ -509,19 +509,19 @@ xfs_scrub_btree_get_block( ...@@ -509,19 +509,19 @@ xfs_scrub_btree_get_block(
failed_at = __xfs_btree_check_sblock(bs->cur, *pblock, failed_at = __xfs_btree_check_sblock(bs->cur, *pblock,
level, *pbp); level, *pbp);
if (failed_at) { if (failed_at) {
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level); xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0; return 0;
} }
if (*pbp) if (*pbp)
xfs_scrub_buffer_recheck(bs->sc, *pbp); xchk_buffer_recheck(bs->sc, *pbp);
xfs_scrub_btree_check_minrecs(bs, level, *pblock); xchk_btree_check_minrecs(bs, level, *pblock);
/* /*
* Check the block's owner; this function absorbs error codes * Check the block's owner; this function absorbs error codes
* for us. * for us.
*/ */
error = xfs_scrub_btree_check_owner(bs, level, *pbp); error = xchk_btree_check_owner(bs, level, *pbp);
if (error) if (error)
return error; return error;
...@@ -529,7 +529,7 @@ xfs_scrub_btree_get_block( ...@@ -529,7 +529,7 @@ xfs_scrub_btree_get_block(
* Check the block's siblings; this function absorbs error codes * Check the block's siblings; this function absorbs error codes
* for us. * for us.
*/ */
return xfs_scrub_btree_block_check_siblings(bs, *pblock); return xchk_btree_block_check_siblings(bs, *pblock);
} }
/* /*
...@@ -537,8 +537,8 @@ xfs_scrub_btree_get_block( ...@@ -537,8 +537,8 @@ xfs_scrub_btree_get_block(
* in the parent block. * in the parent block.
*/ */
STATIC void STATIC void
xfs_scrub_btree_block_keys( xchk_btree_block_keys(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
struct xfs_btree_block *block) struct xfs_btree_block *block)
{ {
...@@ -562,7 +562,7 @@ xfs_scrub_btree_block_keys( ...@@ -562,7 +562,7 @@ xfs_scrub_btree_block_keys(
parent_block); parent_block);
if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0) if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return; return;
...@@ -573,7 +573,7 @@ xfs_scrub_btree_block_keys( ...@@ -573,7 +573,7 @@ xfs_scrub_btree_block_keys(
parent_block); parent_block);
if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0) if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
} }
/* /*
...@@ -582,14 +582,14 @@ xfs_scrub_btree_block_keys( ...@@ -582,14 +582,14 @@ xfs_scrub_btree_block_keys(
* so that the caller can verify individual records. * so that the caller can verify individual records.
*/ */
int int
xfs_scrub_btree( xchk_btree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
xfs_scrub_btree_rec_fn scrub_fn, xchk_btree_rec_fn scrub_fn,
struct xfs_owner_info *oinfo, struct xfs_owner_info *oinfo,
void *private) void *private)
{ {
struct xfs_scrub_btree bs = { NULL }; struct xchk_btree bs = { NULL };
union xfs_btree_ptr ptr; union xfs_btree_ptr ptr;
union xfs_btree_ptr *pp; union xfs_btree_ptr *pp;
union xfs_btree_rec *recp; union xfs_btree_rec *recp;
...@@ -614,7 +614,7 @@ xfs_scrub_btree( ...@@ -614,7 +614,7 @@ xfs_scrub_btree(
/* Don't try to check a tree with a height we can't handle. */ /* Don't try to check a tree with a height we can't handle. */
if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) { if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) {
xfs_scrub_btree_set_corrupt(sc, cur, 0); xchk_btree_set_corrupt(sc, cur, 0);
goto out; goto out;
} }
...@@ -624,9 +624,9 @@ xfs_scrub_btree( ...@@ -624,9 +624,9 @@ xfs_scrub_btree(
*/ */
level = cur->bc_nlevels - 1; level = cur->bc_nlevels - 1;
cur->bc_ops->init_ptr_from_cur(cur, &ptr); cur->bc_ops->init_ptr_from_cur(cur, &ptr);
if (!xfs_scrub_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr)) if (!xchk_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr))
goto out; goto out;
error = xfs_scrub_btree_get_block(&bs, level, &ptr, &block, &bp); error = xchk_btree_get_block(&bs, level, &ptr, &block, &bp);
if (error || !block) if (error || !block)
goto out; goto out;
...@@ -639,7 +639,7 @@ xfs_scrub_btree( ...@@ -639,7 +639,7 @@ xfs_scrub_btree(
/* End of leaf, pop back towards the root. */ /* End of leaf, pop back towards the root. */
if (cur->bc_ptrs[level] > if (cur->bc_ptrs[level] >
be16_to_cpu(block->bb_numrecs)) { be16_to_cpu(block->bb_numrecs)) {
xfs_scrub_btree_block_keys(&bs, level, block); xchk_btree_block_keys(&bs, level, block);
if (level < cur->bc_nlevels - 1) if (level < cur->bc_nlevels - 1)
cur->bc_ptrs[level + 1]++; cur->bc_ptrs[level + 1]++;
level++; level++;
...@@ -647,14 +647,14 @@ xfs_scrub_btree( ...@@ -647,14 +647,14 @@ xfs_scrub_btree(
} }
/* Records in order for scrub? */ /* Records in order for scrub? */
xfs_scrub_btree_rec(&bs); xchk_btree_rec(&bs);
/* Call out to the record checker. */ /* Call out to the record checker. */
recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block); recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
error = bs.scrub_rec(&bs, recp); error = bs.scrub_rec(&bs, recp);
if (error) if (error)
break; break;
if (xfs_scrub_should_terminate(sc, &error) || if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break; break;
...@@ -664,7 +664,7 @@ xfs_scrub_btree( ...@@ -664,7 +664,7 @@ xfs_scrub_btree(
/* End of node, pop back towards the root. */ /* End of node, pop back towards the root. */
if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) { if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
xfs_scrub_btree_block_keys(&bs, level, block); xchk_btree_block_keys(&bs, level, block);
if (level < cur->bc_nlevels - 1) if (level < cur->bc_nlevels - 1)
cur->bc_ptrs[level + 1]++; cur->bc_ptrs[level + 1]++;
level++; level++;
...@@ -672,16 +672,16 @@ xfs_scrub_btree( ...@@ -672,16 +672,16 @@ xfs_scrub_btree(
} }
/* Keys in order for scrub? */ /* Keys in order for scrub? */
xfs_scrub_btree_key(&bs, level); xchk_btree_key(&bs, level);
/* Drill another level deeper. */ /* Drill another level deeper. */
pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block); pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
if (!xfs_scrub_btree_ptr_ok(&bs, level, pp)) { if (!xchk_btree_ptr_ok(&bs, level, pp)) {
cur->bc_ptrs[level]++; cur->bc_ptrs[level]++;
continue; continue;
} }
level--; level--;
error = xfs_scrub_btree_get_block(&bs, level, pp, &block, &bp); error = xchk_btree_get_block(&bs, level, pp, &block, &bp);
if (error || !block) if (error || !block)
goto out; goto out;
...@@ -692,7 +692,7 @@ xfs_scrub_btree( ...@@ -692,7 +692,7 @@ xfs_scrub_btree(
/* Process deferred owner checks on btree blocks. */ /* Process deferred owner checks on btree blocks. */
list_for_each_entry_safe(co, n, &bs.to_check, list) { list_for_each_entry_safe(co, n, &bs.to_check, list) {
if (!error && bs.cur) if (!error && bs.cur)
error = xfs_scrub_btree_check_block_owner(&bs, error = xchk_btree_check_block_owner(&bs,
co->level, co->daddr); co->level, co->daddr);
list_del(&co->list); list_del(&co->list);
kmem_free(co); kmem_free(co);
......
...@@ -9,32 +9,32 @@ ...@@ -9,32 +9,32 @@
/* btree scrub */ /* btree scrub */
/* Check for btree operation errors. */ /* Check for btree operation errors. */
bool xfs_scrub_btree_process_error(struct xfs_scrub_context *sc, bool xchk_btree_process_error(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level, int *error); struct xfs_btree_cur *cur, int level, int *error);
/* Check for btree xref operation errors. */ /* Check for btree xref operation errors. */
bool xfs_scrub_btree_xref_process_error(struct xfs_scrub_context *sc, bool xchk_btree_xref_process_error(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level, struct xfs_btree_cur *cur, int level,
int *error); int *error);
/* Check for btree corruption. */ /* Check for btree corruption. */
void xfs_scrub_btree_set_corrupt(struct xfs_scrub_context *sc, void xchk_btree_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level); struct xfs_btree_cur *cur, int level);
/* Check for btree xref discrepancies. */ /* Check for btree xref discrepancies. */
void xfs_scrub_btree_xref_set_corrupt(struct xfs_scrub_context *sc, void xchk_btree_xref_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level); struct xfs_btree_cur *cur, int level);
struct xfs_scrub_btree; struct xchk_btree;
typedef int (*xfs_scrub_btree_rec_fn)( typedef int (*xchk_btree_rec_fn)(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec); union xfs_btree_rec *rec);
struct xfs_scrub_btree { struct xchk_btree {
/* caller-provided scrub state */ /* caller-provided scrub state */
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
xfs_scrub_btree_rec_fn scrub_rec; xchk_btree_rec_fn scrub_rec;
struct xfs_owner_info *oinfo; struct xfs_owner_info *oinfo;
void *private; void *private;
...@@ -45,8 +45,8 @@ struct xfs_scrub_btree { ...@@ -45,8 +45,8 @@ struct xfs_scrub_btree {
bool firstkey[XFS_BTREE_MAXLEVELS]; bool firstkey[XFS_BTREE_MAXLEVELS];
struct list_head to_check; struct list_head to_check;
}; };
int xfs_scrub_btree(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, int xchk_btree(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
xfs_scrub_btree_rec_fn scrub_fn, xchk_btree_rec_fn scrub_fn,
struct xfs_owner_info *oinfo, void *private); struct xfs_owner_info *oinfo, void *private);
#endif /* __XFS_SCRUB_BTREE_H__ */ #endif /* __XFS_SCRUB_BTREE_H__ */
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
/* Check for operational errors. */ /* Check for operational errors. */
static bool static bool
__xfs_scrub_process_error( __xchk_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_agblock_t bno,
...@@ -81,7 +81,7 @@ __xfs_scrub_process_error( ...@@ -81,7 +81,7 @@ __xfs_scrub_process_error(
return true; return true;
case -EDEADLOCK: case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */ /* Used to restart an op with deadlock avoidance. */
trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break; break;
case -EFSBADCRC: case -EFSBADCRC:
case -EFSCORRUPTED: case -EFSCORRUPTED:
...@@ -90,7 +90,7 @@ __xfs_scrub_process_error( ...@@ -90,7 +90,7 @@ __xfs_scrub_process_error(
*error = 0; *error = 0;
/* fall through */ /* fall through */
default: default:
trace_xfs_scrub_op_error(sc, agno, bno, *error, trace_xchk_op_error(sc, agno, bno, *error,
ret_ip); ret_ip);
break; break;
} }
...@@ -98,30 +98,30 @@ __xfs_scrub_process_error( ...@@ -98,30 +98,30 @@ __xfs_scrub_process_error(
} }
bool bool
xfs_scrub_process_error( xchk_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_agblock_t bno,
int *error) int *error)
{ {
return __xfs_scrub_process_error(sc, agno, bno, error, return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address); XFS_SCRUB_OFLAG_CORRUPT, __return_address);
} }
bool bool
xfs_scrub_xref_process_error( xchk_xref_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_agblock_t bno,
int *error) int *error)
{ {
return __xfs_scrub_process_error(sc, agno, bno, error, return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address); XFS_SCRUB_OFLAG_XFAIL, __return_address);
} }
/* Check for operational errors for a file offset. */ /* Check for operational errors for a file offset. */
static bool static bool
__xfs_scrub_fblock_process_error( __xchk_fblock_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset, xfs_fileoff_t offset,
...@@ -134,7 +134,7 @@ __xfs_scrub_fblock_process_error( ...@@ -134,7 +134,7 @@ __xfs_scrub_fblock_process_error(
return true; return true;
case -EDEADLOCK: case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */ /* Used to restart an op with deadlock avoidance. */
trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break; break;
case -EFSBADCRC: case -EFSBADCRC:
case -EFSCORRUPTED: case -EFSCORRUPTED:
...@@ -143,7 +143,7 @@ __xfs_scrub_fblock_process_error( ...@@ -143,7 +143,7 @@ __xfs_scrub_fblock_process_error(
*error = 0; *error = 0;
/* fall through */ /* fall through */
default: default:
trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error, trace_xchk_file_op_error(sc, whichfork, offset, *error,
ret_ip); ret_ip);
break; break;
} }
...@@ -151,24 +151,24 @@ __xfs_scrub_fblock_process_error( ...@@ -151,24 +151,24 @@ __xfs_scrub_fblock_process_error(
} }
bool bool
xfs_scrub_fblock_process_error( xchk_fblock_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset, xfs_fileoff_t offset,
int *error) int *error)
{ {
return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error, return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address); XFS_SCRUB_OFLAG_CORRUPT, __return_address);
} }
bool bool
xfs_scrub_fblock_xref_process_error( xchk_fblock_xref_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset, xfs_fileoff_t offset,
int *error) int *error)
{ {
return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error, return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address); XFS_SCRUB_OFLAG_XFAIL, __return_address);
} }
...@@ -186,12 +186,12 @@ xfs_scrub_fblock_xref_process_error( ...@@ -186,12 +186,12 @@ xfs_scrub_fblock_xref_process_error(
/* Record a block which could be optimized. */ /* Record a block which could be optimized. */
void void
xfs_scrub_block_set_preen( xchk_block_set_preen(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
trace_xfs_scrub_block_preen(sc, bp->b_bn, __return_address); trace_xchk_block_preen(sc, bp->b_bn, __return_address);
} }
/* /*
...@@ -200,32 +200,32 @@ xfs_scrub_block_set_preen( ...@@ -200,32 +200,32 @@ xfs_scrub_block_set_preen(
* the block location of the inode record itself. * the block location of the inode record itself.
*/ */
void void
xfs_scrub_ino_set_preen( xchk_ino_set_preen(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
trace_xfs_scrub_ino_preen(sc, ino, __return_address); trace_xchk_ino_preen(sc, ino, __return_address);
} }
/* Record a corrupt block. */ /* Record a corrupt block. */
void void
xfs_scrub_block_set_corrupt( xchk_block_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address); trace_xchk_block_error(sc, bp->b_bn, __return_address);
} }
/* Record a corruption while cross-referencing. */ /* Record a corruption while cross-referencing. */
void void
xfs_scrub_block_xref_set_corrupt( xchk_block_xref_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address); trace_xchk_block_error(sc, bp->b_bn, __return_address);
} }
/* /*
...@@ -234,44 +234,44 @@ xfs_scrub_block_xref_set_corrupt( ...@@ -234,44 +234,44 @@ xfs_scrub_block_xref_set_corrupt(
* inode record itself. * inode record itself.
*/ */
void void
xfs_scrub_ino_set_corrupt( xchk_ino_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_ino_error(sc, ino, __return_address); trace_xchk_ino_error(sc, ino, __return_address);
} }
/* Record a corruption while cross-referencing with an inode. */ /* Record a corruption while cross-referencing with an inode. */
void void
xfs_scrub_ino_xref_set_corrupt( xchk_ino_xref_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
trace_xfs_scrub_ino_error(sc, ino, __return_address); trace_xchk_ino_error(sc, ino, __return_address);
} }
/* Record corruption in a block indexed by a file fork. */ /* Record corruption in a block indexed by a file fork. */
void void
xfs_scrub_fblock_set_corrupt( xchk_fblock_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset) xfs_fileoff_t offset)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address); trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
} }
/* Record a corruption while cross-referencing a fork block. */ /* Record a corruption while cross-referencing a fork block. */
void void
xfs_scrub_fblock_xref_set_corrupt( xchk_fblock_xref_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset) xfs_fileoff_t offset)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address); trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
} }
/* /*
...@@ -279,32 +279,32 @@ xfs_scrub_fblock_xref_set_corrupt( ...@@ -279,32 +279,32 @@ xfs_scrub_fblock_xref_set_corrupt(
* incorrect. * incorrect.
*/ */
void void
xfs_scrub_ino_set_warning( xchk_ino_set_warning(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
trace_xfs_scrub_ino_warning(sc, ino, __return_address); trace_xchk_ino_warning(sc, ino, __return_address);
} }
/* Warn about a block indexed by a file fork that needs review. */ /* Warn about a block indexed by a file fork that needs review. */
void void
xfs_scrub_fblock_set_warning( xchk_fblock_set_warning(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset) xfs_fileoff_t offset)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
trace_xfs_scrub_fblock_warning(sc, whichfork, offset, __return_address); trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
} }
/* Signal an incomplete scrub. */ /* Signal an incomplete scrub. */
void void
xfs_scrub_set_incomplete( xchk_set_incomplete(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
trace_xfs_scrub_incomplete(sc, __return_address); trace_xchk_incomplete(sc, __return_address);
} }
/* /*
...@@ -312,18 +312,18 @@ xfs_scrub_set_incomplete( ...@@ -312,18 +312,18 @@ xfs_scrub_set_incomplete(
* at least according to the reverse mapping data. * at least according to the reverse mapping data.
*/ */
struct xfs_scrub_rmap_ownedby_info { struct xchk_rmap_ownedby_info {
struct xfs_owner_info *oinfo; struct xfs_owner_info *oinfo;
xfs_filblks_t *blocks; xfs_filblks_t *blocks;
}; };
STATIC int STATIC int
xfs_scrub_count_rmap_ownedby_irec( xchk_count_rmap_ownedby_irec(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec, struct xfs_rmap_irec *rec,
void *priv) void *priv)
{ {
struct xfs_scrub_rmap_ownedby_info *sroi = priv; struct xchk_rmap_ownedby_info *sroi = priv;
bool irec_attr; bool irec_attr;
bool oinfo_attr; bool oinfo_attr;
...@@ -344,19 +344,19 @@ xfs_scrub_count_rmap_ownedby_irec( ...@@ -344,19 +344,19 @@ xfs_scrub_count_rmap_ownedby_irec(
* The caller should pass us an rmapbt cursor. * The caller should pass us an rmapbt cursor.
*/ */
int int
xfs_scrub_count_rmap_ownedby_ag( xchk_count_rmap_ownedby_ag(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_owner_info *oinfo, struct xfs_owner_info *oinfo,
xfs_filblks_t *blocks) xfs_filblks_t *blocks)
{ {
struct xfs_scrub_rmap_ownedby_info sroi; struct xchk_rmap_ownedby_info sroi;
sroi.oinfo = oinfo; sroi.oinfo = oinfo;
*blocks = 0; *blocks = 0;
sroi.blocks = blocks; sroi.blocks = blocks;
return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec, return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
&sroi); &sroi);
} }
...@@ -392,12 +392,12 @@ want_ag_read_header_failure( ...@@ -392,12 +392,12 @@ want_ag_read_header_failure(
/* /*
* Grab all the headers for an AG. * Grab all the headers for an AG.
* *
* The headers should be released by xfs_scrub_ag_free, but as a fail * The headers should be released by xchk_ag_free, but as a fail
* safe we attach all the buffers we grab to the scrub transaction so * safe we attach all the buffers we grab to the scrub transaction so
* they'll all be freed when we cancel it. * they'll all be freed when we cancel it.
*/ */
int int
xfs_scrub_ag_read_headers( xchk_ag_read_headers(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
struct xfs_buf **agi, struct xfs_buf **agi,
...@@ -425,8 +425,8 @@ xfs_scrub_ag_read_headers( ...@@ -425,8 +425,8 @@ xfs_scrub_ag_read_headers(
/* Release all the AG btree cursors. */ /* Release all the AG btree cursors. */
void void
xfs_scrub_ag_btcur_free( xchk_ag_btcur_free(
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
if (sa->refc_cur) if (sa->refc_cur)
xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR); xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
...@@ -451,9 +451,9 @@ xfs_scrub_ag_btcur_free( ...@@ -451,9 +451,9 @@ xfs_scrub_ag_btcur_free(
/* Initialize all the btree cursors for an AG. */ /* Initialize all the btree cursors for an AG. */
int int
xfs_scrub_ag_btcur_init( xchk_ag_btcur_init(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
xfs_agnumber_t agno = sa->agno; xfs_agnumber_t agno = sa->agno;
...@@ -511,11 +511,11 @@ xfs_scrub_ag_btcur_init( ...@@ -511,11 +511,11 @@ xfs_scrub_ag_btcur_init(
/* Release the AG header context and btree cursors. */ /* Release the AG header context and btree cursors. */
void void
xfs_scrub_ag_free( xchk_ag_free(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
xfs_scrub_ag_btcur_free(sa); xchk_ag_btcur_free(sa);
if (sa->agfl_bp) { if (sa->agfl_bp) {
xfs_trans_brelse(sc->tp, sa->agfl_bp); xfs_trans_brelse(sc->tp, sa->agfl_bp);
sa->agfl_bp = NULL; sa->agfl_bp = NULL;
...@@ -543,30 +543,30 @@ xfs_scrub_ag_free( ...@@ -543,30 +543,30 @@ xfs_scrub_ag_free(
* transaction ourselves. * transaction ourselves.
*/ */
int int
xfs_scrub_ag_init( xchk_ag_init(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
int error; int error;
sa->agno = agno; sa->agno = agno;
error = xfs_scrub_ag_read_headers(sc, agno, &sa->agi_bp, error = xchk_ag_read_headers(sc, agno, &sa->agi_bp,
&sa->agf_bp, &sa->agfl_bp); &sa->agf_bp, &sa->agfl_bp);
if (error) if (error)
return error; return error;
return xfs_scrub_ag_btcur_init(sc, sa); return xchk_ag_btcur_init(sc, sa);
} }
/* /*
* Grab the per-ag structure if we haven't already gotten it. Teardown of the * Grab the per-ag structure if we haven't already gotten it. Teardown of the
* xfs_scrub_ag will release it for us. * xchk_ag will release it for us.
*/ */
void void
xfs_scrub_perag_get( xchk_perag_get(
struct xfs_mount *mp, struct xfs_mount *mp,
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
if (!sa->pag) if (!sa->pag)
sa->pag = xfs_perag_get(mp, sa->agno); sa->pag = xfs_perag_get(mp, sa->agno);
...@@ -585,7 +585,7 @@ xfs_scrub_perag_get( ...@@ -585,7 +585,7 @@ xfs_scrub_perag_get(
* the metadata object. * the metadata object.
*/ */
int int
xfs_scrub_trans_alloc( xchk_trans_alloc(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
uint resblks) uint resblks)
{ {
...@@ -598,19 +598,19 @@ xfs_scrub_trans_alloc( ...@@ -598,19 +598,19 @@ xfs_scrub_trans_alloc(
/* Set us up with a transaction and an empty context. */ /* Set us up with a transaction and an empty context. */
int int
xfs_scrub_setup_fs( xchk_setup_fs(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
uint resblks; uint resblks;
resblks = xfs_repair_calc_ag_resblks(sc); resblks = xfs_repair_calc_ag_resblks(sc);
return xfs_scrub_trans_alloc(sc, resblks); return xchk_trans_alloc(sc, resblks);
} }
/* Set us up with AG headers and btree cursors. */ /* Set us up with AG headers and btree cursors. */
int int
xfs_scrub_setup_ag_btree( xchk_setup_ag_btree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip, struct xfs_inode *ip,
bool force_log) bool force_log)
...@@ -625,21 +625,21 @@ xfs_scrub_setup_ag_btree( ...@@ -625,21 +625,21 @@ xfs_scrub_setup_ag_btree(
* document why they need to do so. * document why they need to do so.
*/ */
if (force_log) { if (force_log) {
error = xfs_scrub_checkpoint_log(mp); error = xchk_checkpoint_log(mp);
if (error) if (error)
return error; return error;
} }
error = xfs_scrub_setup_fs(sc, ip); error = xchk_setup_fs(sc, ip);
if (error) if (error)
return error; return error;
return xfs_scrub_ag_init(sc, sc->sm->sm_agno, &sc->sa); return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
} }
/* Push everything out of the log onto disk. */ /* Push everything out of the log onto disk. */
int int
xfs_scrub_checkpoint_log( xchk_checkpoint_log(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
int error; int error;
...@@ -657,7 +657,7 @@ xfs_scrub_checkpoint_log( ...@@ -657,7 +657,7 @@ xfs_scrub_checkpoint_log(
* The inode is not locked. * The inode is not locked.
*/ */
int int
xfs_scrub_get_inode( xchk_get_inode(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip_in) struct xfs_inode *ip_in)
{ {
...@@ -704,7 +704,7 @@ xfs_scrub_get_inode( ...@@ -704,7 +704,7 @@ xfs_scrub_get_inode(
error = -EFSCORRUPTED; error = -EFSCORRUPTED;
/* fall through */ /* fall through */
default: default:
trace_xfs_scrub_op_error(sc, trace_xchk_op_error(sc,
XFS_INO_TO_AGNO(mp, sc->sm->sm_ino), XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino), XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
error, __return_address); error, __return_address);
...@@ -721,21 +721,21 @@ xfs_scrub_get_inode( ...@@ -721,21 +721,21 @@ xfs_scrub_get_inode(
/* Set us up to scrub a file's contents. */ /* Set us up to scrub a file's contents. */
int int
xfs_scrub_setup_inode_contents( xchk_setup_inode_contents(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip, struct xfs_inode *ip,
unsigned int resblks) unsigned int resblks)
{ {
int error; int error;
error = xfs_scrub_get_inode(sc, ip); error = xchk_get_inode(sc, ip);
if (error) if (error)
return error; return error;
/* Got the inode, lock it and we're ready to go. */ /* Got the inode, lock it and we're ready to go. */
sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags); xfs_ilock(sc->ip, sc->ilock_flags);
error = xfs_scrub_trans_alloc(sc, resblks); error = xchk_trans_alloc(sc, resblks);
if (error) if (error)
goto out; goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL; sc->ilock_flags |= XFS_ILOCK_EXCL;
...@@ -752,13 +752,13 @@ xfs_scrub_setup_inode_contents( ...@@ -752,13 +752,13 @@ xfs_scrub_setup_inode_contents(
* the cursor and skip the check. * the cursor and skip the check.
*/ */
bool bool
xfs_scrub_should_check_xref( xchk_should_check_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int *error, int *error,
struct xfs_btree_cur **curpp) struct xfs_btree_cur **curpp)
{ {
/* No point in xref if we already know we're corrupt. */ /* No point in xref if we already know we're corrupt. */
if (xfs_scrub_skip_xref(sc->sm)) if (xchk_skip_xref(sc->sm))
return false; return false;
if (*error == 0) if (*error == 0)
...@@ -775,7 +775,7 @@ xfs_scrub_should_check_xref( ...@@ -775,7 +775,7 @@ xfs_scrub_should_check_xref(
} }
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
trace_xfs_scrub_xref_error(sc, *error, __return_address); trace_xchk_xref_error(sc, *error, __return_address);
/* /*
* Errors encountered during cross-referencing with another * Errors encountered during cross-referencing with another
...@@ -787,25 +787,25 @@ xfs_scrub_should_check_xref( ...@@ -787,25 +787,25 @@ xfs_scrub_should_check_xref(
/* Run the structure verifiers on in-memory buffers to detect bad memory. */ /* Run the structure verifiers on in-memory buffers to detect bad memory. */
void void
xfs_scrub_buffer_recheck( xchk_buffer_recheck(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
xfs_failaddr_t fa; xfs_failaddr_t fa;
if (bp->b_ops == NULL) { if (bp->b_ops == NULL) {
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
return; return;
} }
if (bp->b_ops->verify_struct == NULL) { if (bp->b_ops->verify_struct == NULL) {
xfs_scrub_set_incomplete(sc); xchk_set_incomplete(sc);
return; return;
} }
fa = bp->b_ops->verify_struct(bp); fa = bp->b_ops->verify_struct(bp);
if (!fa) if (!fa)
return; return;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_block_error(sc, bp->b_bn, fa); trace_xchk_block_error(sc, bp->b_bn, fa);
} }
/* /*
...@@ -813,7 +813,7 @@ xfs_scrub_buffer_recheck( ...@@ -813,7 +813,7 @@ xfs_scrub_buffer_recheck(
* pointed to by sc->ip and the ILOCK must be held. * pointed to by sc->ip and the ILOCK must be held.
*/ */
int int
xfs_scrub_metadata_inode_forks( xchk_metadata_inode_forks(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
__u32 smtype; __u32 smtype;
...@@ -825,26 +825,26 @@ xfs_scrub_metadata_inode_forks( ...@@ -825,26 +825,26 @@ xfs_scrub_metadata_inode_forks(
/* Metadata inodes don't live on the rt device. */ /* Metadata inodes don't live on the rt device. */
if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) { if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0; return 0;
} }
/* They should never participate in reflink. */ /* They should never participate in reflink. */
if (xfs_is_reflink_inode(sc->ip)) { if (xfs_is_reflink_inode(sc->ip)) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0; return 0;
} }
/* They also should never have extended attributes. */ /* They also should never have extended attributes. */
if (xfs_inode_hasattr(sc->ip)) { if (xfs_inode_hasattr(sc->ip)) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0; return 0;
} }
/* Invoke the data fork scrubber. */ /* Invoke the data fork scrubber. */
smtype = sc->sm->sm_type; smtype = sc->sm->sm_type;
sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD; sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
error = xfs_scrub_bmap_data(sc); error = xchk_bmap_data(sc);
sc->sm->sm_type = smtype; sc->sm->sm_type = smtype;
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error; return error;
...@@ -853,11 +853,11 @@ xfs_scrub_metadata_inode_forks( ...@@ -853,11 +853,11 @@ xfs_scrub_metadata_inode_forks(
if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) { if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip, error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&shared); &shared);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
&error)) &error))
return error; return error;
if (shared) if (shared)
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
} }
return error; return error;
...@@ -871,7 +871,7 @@ xfs_scrub_metadata_inode_forks( ...@@ -871,7 +871,7 @@ xfs_scrub_metadata_inode_forks(
* we can't. * we can't.
*/ */
int int
xfs_scrub_ilock_inverted( xchk_ilock_inverted(
struct xfs_inode *ip, struct xfs_inode *ip,
uint lock_mode) uint lock_mode)
{ {
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
* Note that we're careful not to make any judgements about *error. * Note that we're careful not to make any judgements about *error.
*/ */
static inline bool static inline bool
xfs_scrub_should_terminate( xchk_should_terminate(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int *error) int *error)
{ {
...@@ -24,121 +24,121 @@ xfs_scrub_should_terminate( ...@@ -24,121 +24,121 @@ xfs_scrub_should_terminate(
return false; return false;
} }
int xfs_scrub_trans_alloc(struct xfs_scrub_context *sc, uint resblks); int xchk_trans_alloc(struct xfs_scrub_context *sc, uint resblks);
bool xfs_scrub_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno, bool xchk_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int *error); xfs_agblock_t bno, int *error);
bool xfs_scrub_fblock_process_error(struct xfs_scrub_context *sc, int whichfork, bool xchk_fblock_process_error(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset, int *error); xfs_fileoff_t offset, int *error);
bool xfs_scrub_xref_process_error(struct xfs_scrub_context *sc, bool xchk_xref_process_error(struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agblock_t bno, int *error); xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
bool xfs_scrub_fblock_xref_process_error(struct xfs_scrub_context *sc, bool xchk_fblock_xref_process_error(struct xfs_scrub_context *sc,
int whichfork, xfs_fileoff_t offset, int *error); int whichfork, xfs_fileoff_t offset, int *error);
void xfs_scrub_block_set_preen(struct xfs_scrub_context *sc, void xchk_block_set_preen(struct xfs_scrub_context *sc,
struct xfs_buf *bp); struct xfs_buf *bp);
void xfs_scrub_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino); void xchk_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino);
void xfs_scrub_block_set_corrupt(struct xfs_scrub_context *sc, void xchk_block_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_buf *bp); struct xfs_buf *bp);
void xfs_scrub_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino); void xchk_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino);
void xfs_scrub_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork, void xchk_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset); xfs_fileoff_t offset);
void xfs_scrub_block_xref_set_corrupt(struct xfs_scrub_context *sc, void xchk_block_xref_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_buf *bp); struct xfs_buf *bp);
void xfs_scrub_ino_xref_set_corrupt(struct xfs_scrub_context *sc, void xchk_ino_xref_set_corrupt(struct xfs_scrub_context *sc,
xfs_ino_t ino); xfs_ino_t ino);
void xfs_scrub_fblock_xref_set_corrupt(struct xfs_scrub_context *sc, void xchk_fblock_xref_set_corrupt(struct xfs_scrub_context *sc,
int whichfork, xfs_fileoff_t offset); int whichfork, xfs_fileoff_t offset);
void xfs_scrub_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino); void xchk_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino);
void xfs_scrub_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork, void xchk_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset); xfs_fileoff_t offset);
void xfs_scrub_set_incomplete(struct xfs_scrub_context *sc); void xchk_set_incomplete(struct xfs_scrub_context *sc);
int xfs_scrub_checkpoint_log(struct xfs_mount *mp); int xchk_checkpoint_log(struct xfs_mount *mp);
/* Are we set up for a cross-referencing check? */ /* Are we set up for a cross-referencing check? */
bool xfs_scrub_should_check_xref(struct xfs_scrub_context *sc, int *error, bool xchk_should_check_xref(struct xfs_scrub_context *sc, int *error,
struct xfs_btree_cur **curpp); struct xfs_btree_cur **curpp);
/* Setup functions */ /* Setup functions */
int xfs_scrub_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip); int xchk_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip);
int xfs_scrub_setup_ag_allocbt(struct xfs_scrub_context *sc, int xchk_setup_ag_allocbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_ag_iallocbt(struct xfs_scrub_context *sc, int xchk_setup_ag_iallocbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_ag_rmapbt(struct xfs_scrub_context *sc, int xchk_setup_ag_rmapbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_ag_refcountbt(struct xfs_scrub_context *sc, int xchk_setup_ag_refcountbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_inode(struct xfs_scrub_context *sc, int xchk_setup_inode(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_inode_bmap(struct xfs_scrub_context *sc, int xchk_setup_inode_bmap(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_inode_bmap_data(struct xfs_scrub_context *sc, int xchk_setup_inode_bmap_data(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_directory(struct xfs_scrub_context *sc, int xchk_setup_directory(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_xattr(struct xfs_scrub_context *sc, int xchk_setup_xattr(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_symlink(struct xfs_scrub_context *sc, int xchk_setup_symlink(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_parent(struct xfs_scrub_context *sc, int xchk_setup_parent(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
#ifdef CONFIG_XFS_RT #ifdef CONFIG_XFS_RT
int xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip); int xchk_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip);
#else #else
static inline int static inline int
xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip) xchk_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip)
{ {
return -ENOENT; return -ENOENT;
} }
#endif #endif
#ifdef CONFIG_XFS_QUOTA #ifdef CONFIG_XFS_QUOTA
int xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip); int xchk_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip);
#else #else
static inline int static inline int
xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip) xchk_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip)
{ {
return -ENOENT; return -ENOENT;
} }
#endif #endif
void xfs_scrub_ag_free(struct xfs_scrub_context *sc, struct xfs_scrub_ag *sa); void xchk_ag_free(struct xfs_scrub_context *sc, struct xchk_ag *sa);
int xfs_scrub_ag_init(struct xfs_scrub_context *sc, xfs_agnumber_t agno, int xchk_ag_init(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
struct xfs_scrub_ag *sa); struct xchk_ag *sa);
void xfs_scrub_perag_get(struct xfs_mount *mp, struct xfs_scrub_ag *sa); void xchk_perag_get(struct xfs_mount *mp, struct xchk_ag *sa);
int xfs_scrub_ag_read_headers(struct xfs_scrub_context *sc, xfs_agnumber_t agno, int xchk_ag_read_headers(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
struct xfs_buf **agi, struct xfs_buf **agf, struct xfs_buf **agi, struct xfs_buf **agf,
struct xfs_buf **agfl); struct xfs_buf **agfl);
void xfs_scrub_ag_btcur_free(struct xfs_scrub_ag *sa); void xchk_ag_btcur_free(struct xchk_ag *sa);
int xfs_scrub_ag_btcur_init(struct xfs_scrub_context *sc, int xchk_ag_btcur_init(struct xfs_scrub_context *sc,
struct xfs_scrub_ag *sa); struct xchk_ag *sa);
int xfs_scrub_count_rmap_ownedby_ag(struct xfs_scrub_context *sc, int xchk_count_rmap_ownedby_ag(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_owner_info *oinfo, struct xfs_owner_info *oinfo,
xfs_filblks_t *blocks); xfs_filblks_t *blocks);
int xfs_scrub_setup_ag_btree(struct xfs_scrub_context *sc, int xchk_setup_ag_btree(struct xfs_scrub_context *sc,
struct xfs_inode *ip, bool force_log); struct xfs_inode *ip, bool force_log);
int xfs_scrub_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in); int xchk_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in);
int xfs_scrub_setup_inode_contents(struct xfs_scrub_context *sc, int xchk_setup_inode_contents(struct xfs_scrub_context *sc,
struct xfs_inode *ip, unsigned int resblks); struct xfs_inode *ip, unsigned int resblks);
void xfs_scrub_buffer_recheck(struct xfs_scrub_context *sc, struct xfs_buf *bp); void xchk_buffer_recheck(struct xfs_scrub_context *sc, struct xfs_buf *bp);
/* /*
* Don't bother cross-referencing if we already found corruption or cross * Don't bother cross-referencing if we already found corruption or cross
* referencing discrepancies. * referencing discrepancies.
*/ */
static inline bool xfs_scrub_skip_xref(struct xfs_scrub_metadata *sm) static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
{ {
return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
XFS_SCRUB_OFLAG_XCORRUPT); XFS_SCRUB_OFLAG_XCORRUPT);
} }
int xfs_scrub_metadata_inode_forks(struct xfs_scrub_context *sc); int xchk_metadata_inode_forks(struct xfs_scrub_context *sc);
int xfs_scrub_ilock_inverted(struct xfs_inode *ip, uint lock_mode); int xchk_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
#endif /* __XFS_SCRUB_COMMON_H__ */ #endif /* __XFS_SCRUB_COMMON_H__ */
...@@ -35,8 +35,8 @@ ...@@ -35,8 +35,8 @@
* operational errors in common.c. * operational errors in common.c.
*/ */
bool bool
xfs_scrub_da_process_error( xchk_da_process_error(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
int *error) int *error)
{ {
...@@ -48,7 +48,7 @@ xfs_scrub_da_process_error( ...@@ -48,7 +48,7 @@ xfs_scrub_da_process_error(
switch (*error) { switch (*error) {
case -EDEADLOCK: case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */ /* Used to restart an op with deadlock avoidance. */
trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break; break;
case -EFSBADCRC: case -EFSBADCRC:
case -EFSCORRUPTED: case -EFSCORRUPTED:
...@@ -57,7 +57,7 @@ xfs_scrub_da_process_error( ...@@ -57,7 +57,7 @@ xfs_scrub_da_process_error(
*error = 0; *error = 0;
/* fall through */ /* fall through */
default: default:
trace_xfs_scrub_file_op_error(sc, ds->dargs.whichfork, trace_xchk_file_op_error(sc, ds->dargs.whichfork,
xfs_dir2_da_to_db(ds->dargs.geo, xfs_dir2_da_to_db(ds->dargs.geo,
ds->state->path.blk[level].blkno), ds->state->path.blk[level].blkno),
*error, __return_address); *error, __return_address);
...@@ -71,15 +71,15 @@ xfs_scrub_da_process_error( ...@@ -71,15 +71,15 @@ xfs_scrub_da_process_error(
* operational errors in common.c. * operational errors in common.c.
*/ */
void void
xfs_scrub_da_set_corrupt( xchk_da_set_corrupt(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level) int level)
{ {
struct xfs_scrub_context *sc = ds->sc; struct xfs_scrub_context *sc = ds->sc;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_fblock_error(sc, ds->dargs.whichfork, trace_xchk_fblock_error(sc, ds->dargs.whichfork,
xfs_dir2_da_to_db(ds->dargs.geo, xfs_dir2_da_to_db(ds->dargs.geo,
ds->state->path.blk[level].blkno), ds->state->path.blk[level].blkno),
__return_address); __return_address);
...@@ -87,8 +87,8 @@ xfs_scrub_da_set_corrupt( ...@@ -87,8 +87,8 @@ xfs_scrub_da_set_corrupt(
/* Find an entry at a certain level in a da btree. */ /* Find an entry at a certain level in a da btree. */
STATIC void * STATIC void *
xfs_scrub_da_btree_entry( xchk_da_btree_entry(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
int rec) int rec)
{ {
...@@ -123,8 +123,8 @@ xfs_scrub_da_btree_entry( ...@@ -123,8 +123,8 @@ xfs_scrub_da_btree_entry(
/* Scrub a da btree hash (key). */ /* Scrub a da btree hash (key). */
int int
xfs_scrub_da_btree_hash( xchk_da_btree_hash(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
__be32 *hashp) __be32 *hashp)
{ {
...@@ -136,7 +136,7 @@ xfs_scrub_da_btree_hash( ...@@ -136,7 +136,7 @@ xfs_scrub_da_btree_hash(
/* Is this hash in order? */ /* Is this hash in order? */
hash = be32_to_cpu(*hashp); hash = be32_to_cpu(*hashp);
if (hash < ds->hashes[level]) if (hash < ds->hashes[level])
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
ds->hashes[level] = hash; ds->hashes[level] = hash;
if (level == 0) if (level == 0)
...@@ -144,10 +144,10 @@ xfs_scrub_da_btree_hash( ...@@ -144,10 +144,10 @@ xfs_scrub_da_btree_hash(
/* Is this hash no larger than the parent hash? */ /* Is this hash no larger than the parent hash? */
blks = ds->state->path.blk; blks = ds->state->path.blk;
entry = xfs_scrub_da_btree_entry(ds, level - 1, blks[level - 1].index); entry = xchk_da_btree_entry(ds, level - 1, blks[level - 1].index);
parent_hash = be32_to_cpu(entry->hashval); parent_hash = be32_to_cpu(entry->hashval);
if (parent_hash < hash) if (parent_hash < hash)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return 0; return 0;
} }
...@@ -157,13 +157,13 @@ xfs_scrub_da_btree_hash( ...@@ -157,13 +157,13 @@ xfs_scrub_da_btree_hash(
* pointer. * pointer.
*/ */
STATIC bool STATIC bool
xfs_scrub_da_btree_ptr_ok( xchk_da_btree_ptr_ok(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
xfs_dablk_t blkno) xfs_dablk_t blkno)
{ {
if (blkno < ds->lowest || (ds->highest != 0 && blkno >= ds->highest)) { if (blkno < ds->lowest || (ds->highest != 0 && blkno >= ds->highest)) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return false; return false;
} }
...@@ -176,7 +176,7 @@ xfs_scrub_da_btree_ptr_ok( ...@@ -176,7 +176,7 @@ xfs_scrub_da_btree_ptr_ok(
* leaf1, we must multiplex the verifiers. * leaf1, we must multiplex the verifiers.
*/ */
static void static void
xfs_scrub_da_btree_read_verify( xchk_da_btree_read_verify(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
struct xfs_da_blkinfo *info = bp->b_addr; struct xfs_da_blkinfo *info = bp->b_addr;
...@@ -198,7 +198,7 @@ xfs_scrub_da_btree_read_verify( ...@@ -198,7 +198,7 @@ xfs_scrub_da_btree_read_verify(
} }
} }
static void static void
xfs_scrub_da_btree_write_verify( xchk_da_btree_write_verify(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
struct xfs_da_blkinfo *info = bp->b_addr; struct xfs_da_blkinfo *info = bp->b_addr;
...@@ -220,7 +220,7 @@ xfs_scrub_da_btree_write_verify( ...@@ -220,7 +220,7 @@ xfs_scrub_da_btree_write_verify(
} }
} }
static void * static void *
xfs_scrub_da_btree_verify( xchk_da_btree_verify(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
struct xfs_da_blkinfo *info = bp->b_addr; struct xfs_da_blkinfo *info = bp->b_addr;
...@@ -236,17 +236,17 @@ xfs_scrub_da_btree_verify( ...@@ -236,17 +236,17 @@ xfs_scrub_da_btree_verify(
} }
} }
static const struct xfs_buf_ops xfs_scrub_da_btree_buf_ops = { static const struct xfs_buf_ops xchk_da_btree_buf_ops = {
.name = "xfs_scrub_da_btree", .name = "xchk_da_btree",
.verify_read = xfs_scrub_da_btree_read_verify, .verify_read = xchk_da_btree_read_verify,
.verify_write = xfs_scrub_da_btree_write_verify, .verify_write = xchk_da_btree_write_verify,
.verify_struct = xfs_scrub_da_btree_verify, .verify_struct = xchk_da_btree_verify,
}; };
/* Check a block's sibling. */ /* Check a block's sibling. */
STATIC int STATIC int
xfs_scrub_da_btree_block_check_sibling( xchk_da_btree_block_check_sibling(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
int direction, int direction,
xfs_dablk_t sibling) xfs_dablk_t sibling)
...@@ -265,7 +265,7 @@ xfs_scrub_da_btree_block_check_sibling( ...@@ -265,7 +265,7 @@ xfs_scrub_da_btree_block_check_sibling(
error = xfs_da3_path_shift(ds->state, &ds->state->altpath, error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
direction, false, &retval); direction, false, &retval);
if (error == 0 && retval == 0) if (error == 0 && retval == 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
error = 0; error = 0;
goto out; goto out;
} }
...@@ -273,19 +273,19 @@ xfs_scrub_da_btree_block_check_sibling( ...@@ -273,19 +273,19 @@ xfs_scrub_da_btree_block_check_sibling(
/* Move the alternate cursor one block in the direction given. */ /* Move the alternate cursor one block in the direction given. */
error = xfs_da3_path_shift(ds->state, &ds->state->altpath, error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
direction, false, &retval); direction, false, &retval);
if (!xfs_scrub_da_process_error(ds, level, &error)) if (!xchk_da_process_error(ds, level, &error))
return error; return error;
if (retval) { if (retval) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return error; return error;
} }
if (ds->state->altpath.blk[level].bp) if (ds->state->altpath.blk[level].bp)
xfs_scrub_buffer_recheck(ds->sc, xchk_buffer_recheck(ds->sc,
ds->state->altpath.blk[level].bp); ds->state->altpath.blk[level].bp);
/* Compare upper level pointer to sibling pointer. */ /* Compare upper level pointer to sibling pointer. */
if (ds->state->altpath.blk[level].blkno != sibling) if (ds->state->altpath.blk[level].blkno != sibling)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp); xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp);
out: out:
return error; return error;
...@@ -293,8 +293,8 @@ xfs_scrub_da_btree_block_check_sibling( ...@@ -293,8 +293,8 @@ xfs_scrub_da_btree_block_check_sibling(
/* Check a block's sibling pointers. */ /* Check a block's sibling pointers. */
STATIC int STATIC int
xfs_scrub_da_btree_block_check_siblings( xchk_da_btree_block_check_siblings(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
struct xfs_da_blkinfo *hdr) struct xfs_da_blkinfo *hdr)
{ {
...@@ -308,7 +308,7 @@ xfs_scrub_da_btree_block_check_siblings( ...@@ -308,7 +308,7 @@ xfs_scrub_da_btree_block_check_siblings(
/* Top level blocks should not have sibling pointers. */ /* Top level blocks should not have sibling pointers. */
if (level == 0) { if (level == 0) {
if (forw != 0 || back != 0) if (forw != 0 || back != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return 0; return 0;
} }
...@@ -316,10 +316,10 @@ xfs_scrub_da_btree_block_check_siblings( ...@@ -316,10 +316,10 @@ xfs_scrub_da_btree_block_check_siblings(
* Check back (left) and forw (right) pointers. These functions * Check back (left) and forw (right) pointers. These functions
* absorb error codes for us. * absorb error codes for us.
*/ */
error = xfs_scrub_da_btree_block_check_sibling(ds, level, 0, back); error = xchk_da_btree_block_check_sibling(ds, level, 0, back);
if (error) if (error)
goto out; goto out;
error = xfs_scrub_da_btree_block_check_sibling(ds, level, 1, forw); error = xchk_da_btree_block_check_sibling(ds, level, 1, forw);
out: out:
memset(&ds->state->altpath, 0, sizeof(ds->state->altpath)); memset(&ds->state->altpath, 0, sizeof(ds->state->altpath));
...@@ -328,8 +328,8 @@ xfs_scrub_da_btree_block_check_siblings( ...@@ -328,8 +328,8 @@ xfs_scrub_da_btree_block_check_siblings(
/* Load a dir/attribute block from a btree. */ /* Load a dir/attribute block from a btree. */
STATIC int STATIC int
xfs_scrub_da_btree_block( xchk_da_btree_block(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
xfs_dablk_t blkno) xfs_dablk_t blkno)
{ {
...@@ -355,17 +355,17 @@ xfs_scrub_da_btree_block( ...@@ -355,17 +355,17 @@ xfs_scrub_da_btree_block(
/* Check the pointer. */ /* Check the pointer. */
blk->blkno = blkno; blk->blkno = blkno;
if (!xfs_scrub_da_btree_ptr_ok(ds, level, blkno)) if (!xchk_da_btree_ptr_ok(ds, level, blkno))
goto out_nobuf; goto out_nobuf;
/* Read the buffer. */ /* Read the buffer. */
error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, -2, error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, -2,
&blk->bp, dargs->whichfork, &blk->bp, dargs->whichfork,
&xfs_scrub_da_btree_buf_ops); &xchk_da_btree_buf_ops);
if (!xfs_scrub_da_process_error(ds, level, &error)) if (!xchk_da_process_error(ds, level, &error))
goto out_nobuf; goto out_nobuf;
if (blk->bp) if (blk->bp)
xfs_scrub_buffer_recheck(ds->sc, blk->bp); xchk_buffer_recheck(ds->sc, blk->bp);
/* /*
* We didn't find a dir btree root block, which means that * We didn't find a dir btree root block, which means that
...@@ -378,7 +378,7 @@ xfs_scrub_da_btree_block( ...@@ -378,7 +378,7 @@ xfs_scrub_da_btree_block(
/* It's /not/ ok for attr trees not to have a da btree. */ /* It's /not/ ok for attr trees not to have a da btree. */
if (blk->bp == NULL) { if (blk->bp == NULL) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out_nobuf; goto out_nobuf;
} }
...@@ -388,17 +388,17 @@ xfs_scrub_da_btree_block( ...@@ -388,17 +388,17 @@ xfs_scrub_da_btree_block(
/* We only started zeroing the header on v5 filesystems. */ /* We only started zeroing the header on v5 filesystems. */
if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb) && hdr3->hdr.pad) if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb) && hdr3->hdr.pad)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
/* Check the owner. */ /* Check the owner. */
if (xfs_sb_version_hascrc(&ip->i_mount->m_sb)) { if (xfs_sb_version_hascrc(&ip->i_mount->m_sb)) {
owner = be64_to_cpu(hdr3->owner); owner = be64_to_cpu(hdr3->owner);
if (owner != ip->i_ino) if (owner != ip->i_ino)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} }
/* Check the siblings. */ /* Check the siblings. */
error = xfs_scrub_da_btree_block_check_siblings(ds, level, &hdr3->hdr); error = xchk_da_btree_block_check_siblings(ds, level, &hdr3->hdr);
if (error) if (error)
goto out; goto out;
...@@ -411,7 +411,7 @@ xfs_scrub_da_btree_block( ...@@ -411,7 +411,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_ATTR_LEAF_MAGIC; blk->magic = XFS_ATTR_LEAF_MAGIC;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, pmaxrecs); blk->hashval = xfs_attr_leaf_lasthash(blk->bp, pmaxrecs);
if (ds->tree_level != 0) if (ds->tree_level != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
break; break;
case XFS_DIR2_LEAFN_MAGIC: case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC: case XFS_DIR3_LEAFN_MAGIC:
...@@ -420,7 +420,7 @@ xfs_scrub_da_btree_block( ...@@ -420,7 +420,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_DIR2_LEAFN_MAGIC; blk->magic = XFS_DIR2_LEAFN_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs); blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
if (ds->tree_level != 0) if (ds->tree_level != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
break; break;
case XFS_DIR2_LEAF1_MAGIC: case XFS_DIR2_LEAF1_MAGIC:
case XFS_DIR3_LEAF1_MAGIC: case XFS_DIR3_LEAF1_MAGIC:
...@@ -429,7 +429,7 @@ xfs_scrub_da_btree_block( ...@@ -429,7 +429,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_DIR2_LEAF1_MAGIC; blk->magic = XFS_DIR2_LEAF1_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs); blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
if (ds->tree_level != 0) if (ds->tree_level != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
break; break;
case XFS_DA_NODE_MAGIC: case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC: case XFS_DA3_NODE_MAGIC:
...@@ -443,13 +443,13 @@ xfs_scrub_da_btree_block( ...@@ -443,13 +443,13 @@ xfs_scrub_da_btree_block(
blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval); blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval);
if (level == 0) { if (level == 0) {
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) { if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out_freebp; goto out_freebp;
} }
ds->tree_level = nodehdr.level; ds->tree_level = nodehdr.level;
} else { } else {
if (ds->tree_level != nodehdr.level) { if (ds->tree_level != nodehdr.level) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out_freebp; goto out_freebp;
} }
} }
...@@ -457,7 +457,7 @@ xfs_scrub_da_btree_block( ...@@ -457,7 +457,7 @@ xfs_scrub_da_btree_block(
/* XXX: Check hdr3.pad32 once we know how to fix it. */ /* XXX: Check hdr3.pad32 once we know how to fix it. */
break; break;
default: default:
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out_freebp; goto out_freebp;
} }
...@@ -473,13 +473,13 @@ xfs_scrub_da_btree_block( ...@@ -473,13 +473,13 @@ xfs_scrub_da_btree_block(
/* Visit all nodes and leaves of a da btree. */ /* Visit all nodes and leaves of a da btree. */
int int
xfs_scrub_da_btree( xchk_da_btree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_scrub_da_btree_rec_fn scrub_fn, xchk_da_btree_rec_fn scrub_fn,
void *private) void *private)
{ {
struct xfs_scrub_da_btree ds = {}; struct xchk_da_btree ds = {};
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_da_state_blk *blks; struct xfs_da_state_blk *blks;
struct xfs_da_node_entry *key; struct xfs_da_node_entry *key;
...@@ -517,7 +517,7 @@ xfs_scrub_da_btree( ...@@ -517,7 +517,7 @@ xfs_scrub_da_btree(
/* Find the root of the da tree, if present. */ /* Find the root of the da tree, if present. */
blks = ds.state->path.blk; blks = ds.state->path.blk;
error = xfs_scrub_da_btree_block(&ds, level, blkno); error = xchk_da_btree_block(&ds, level, blkno);
if (error) if (error)
goto out_state; goto out_state;
/* /*
...@@ -542,12 +542,12 @@ xfs_scrub_da_btree( ...@@ -542,12 +542,12 @@ xfs_scrub_da_btree(
} }
/* Dispatch record scrubbing. */ /* Dispatch record scrubbing. */
rec = xfs_scrub_da_btree_entry(&ds, level, rec = xchk_da_btree_entry(&ds, level,
blks[level].index); blks[level].index);
error = scrub_fn(&ds, level, rec); error = scrub_fn(&ds, level, rec);
if (error) if (error)
break; break;
if (xfs_scrub_should_terminate(sc, &error) || if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break; break;
...@@ -566,8 +566,8 @@ xfs_scrub_da_btree( ...@@ -566,8 +566,8 @@ xfs_scrub_da_btree(
} }
/* Hashes in order for scrub? */ /* Hashes in order for scrub? */
key = xfs_scrub_da_btree_entry(&ds, level, blks[level].index); key = xchk_da_btree_entry(&ds, level, blks[level].index);
error = xfs_scrub_da_btree_hash(&ds, level, &key->hashval); error = xchk_da_btree_hash(&ds, level, &key->hashval);
if (error) if (error)
goto out; goto out;
...@@ -575,7 +575,7 @@ xfs_scrub_da_btree( ...@@ -575,7 +575,7 @@ xfs_scrub_da_btree(
blkno = be32_to_cpu(key->before); blkno = be32_to_cpu(key->before);
level++; level++;
ds.tree_level--; ds.tree_level--;
error = xfs_scrub_da_btree_block(&ds, level, blkno); error = xchk_da_btree_block(&ds, level, blkno);
if (error) if (error)
goto out; goto out;
if (blks[level].bp == NULL) if (blks[level].bp == NULL)
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
/* dir/attr btree */ /* dir/attr btree */
struct xfs_scrub_da_btree { struct xchk_da_btree {
struct xfs_da_args dargs; struct xfs_da_args dargs;
xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH]; xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH];
int maxrecs[XFS_DA_NODE_MAXDEPTH]; int maxrecs[XFS_DA_NODE_MAXDEPTH];
...@@ -28,18 +28,18 @@ struct xfs_scrub_da_btree { ...@@ -28,18 +28,18 @@ struct xfs_scrub_da_btree {
int tree_level; int tree_level;
}; };
typedef int (*xfs_scrub_da_btree_rec_fn)(struct xfs_scrub_da_btree *ds, typedef int (*xchk_da_btree_rec_fn)(struct xchk_da_btree *ds,
int level, void *rec); int level, void *rec);
/* Check for da btree operation errors. */ /* Check for da btree operation errors. */
bool xfs_scrub_da_process_error(struct xfs_scrub_da_btree *ds, int level, int *error); bool xchk_da_process_error(struct xchk_da_btree *ds, int level, int *error);
/* Check for da btree corruption. */ /* Check for da btree corruption. */
void xfs_scrub_da_set_corrupt(struct xfs_scrub_da_btree *ds, int level); void xchk_da_set_corrupt(struct xchk_da_btree *ds, int level);
int xfs_scrub_da_btree_hash(struct xfs_scrub_da_btree *ds, int level, int xchk_da_btree_hash(struct xchk_da_btree *ds, int level,
__be32 *hashp); __be32 *hashp);
int xfs_scrub_da_btree(struct xfs_scrub_context *sc, int whichfork, int xchk_da_btree(struct xfs_scrub_context *sc, int whichfork,
xfs_scrub_da_btree_rec_fn scrub_fn, void *private); xchk_da_btree_rec_fn scrub_fn, void *private);
#endif /* __XFS_SCRUB_DABTREE_H__ */ #endif /* __XFS_SCRUB_DABTREE_H__ */
此差异已折叠。
...@@ -35,11 +35,11 @@ ...@@ -35,11 +35,11 @@
* try again after forcing logged inode cores out to disk. * try again after forcing logged inode cores out to disk.
*/ */
int int
xfs_scrub_setup_ag_iallocbt( xchk_setup_ag_iallocbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
return xfs_scrub_setup_ag_btree(sc, ip, sc->try_harder); return xchk_setup_ag_btree(sc, ip, sc->try_harder);
} }
/* Inode btree scrubber. */ /* Inode btree scrubber. */
...@@ -50,7 +50,7 @@ xfs_scrub_setup_ag_iallocbt( ...@@ -50,7 +50,7 @@ xfs_scrub_setup_ag_iallocbt(
* we have a record or not depending on freecount. * we have a record or not depending on freecount.
*/ */
static inline void static inline void
xfs_scrub_iallocbt_chunk_xref_other( xchk_iallocbt_chunk_xref_other(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inobt_rec_incore *irec, struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino) xfs_agino_t agino)
...@@ -66,16 +66,16 @@ xfs_scrub_iallocbt_chunk_xref_other( ...@@ -66,16 +66,16 @@ xfs_scrub_iallocbt_chunk_xref_other(
if (!(*pcur)) if (!(*pcur))
return; return;
error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec); error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
if (!xfs_scrub_should_check_xref(sc, &error, pcur)) if (!xchk_should_check_xref(sc, &error, pcur))
return; return;
if (((irec->ir_freecount > 0 && !has_irec) || if (((irec->ir_freecount > 0 && !has_irec) ||
(irec->ir_freecount == 0 && has_irec))) (irec->ir_freecount == 0 && has_irec)))
xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); xchk_btree_xref_set_corrupt(sc, *pcur, 0);
} }
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_iallocbt_chunk_xref( xchk_iallocbt_chunk_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inobt_rec_incore *irec, struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino, xfs_agino_t agino,
...@@ -87,17 +87,17 @@ xfs_scrub_iallocbt_chunk_xref( ...@@ -87,17 +87,17 @@ xfs_scrub_iallocbt_chunk_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, len); xchk_xref_is_used_space(sc, agbno, len);
xfs_scrub_iallocbt_chunk_xref_other(sc, irec, agino); xchk_iallocbt_chunk_xref_other(sc, irec, agino);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
xfs_scrub_xref_is_owned_by(sc, agbno, len, &oinfo); xchk_xref_is_owned_by(sc, agbno, len, &oinfo);
xfs_scrub_xref_is_not_shared(sc, agbno, len); xchk_xref_is_not_shared(sc, agbno, len);
} }
/* Is this chunk worth checking? */ /* Is this chunk worth checking? */
STATIC bool STATIC bool
xfs_scrub_iallocbt_chunk( xchk_iallocbt_chunk(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec, struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino, xfs_agino_t agino,
xfs_extlen_t len) xfs_extlen_t len)
...@@ -110,16 +110,16 @@ xfs_scrub_iallocbt_chunk( ...@@ -110,16 +110,16 @@ xfs_scrub_iallocbt_chunk(
if (bno + len <= bno || if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) || !xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1)) !xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
xfs_scrub_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len); xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
return true; return true;
} }
/* Count the number of free inodes. */ /* Count the number of free inodes. */
static unsigned int static unsigned int
xfs_scrub_iallocbt_freecount( xchk_iallocbt_freecount(
xfs_inofree_t freemask) xfs_inofree_t freemask)
{ {
BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64)); BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
...@@ -128,8 +128,8 @@ xfs_scrub_iallocbt_freecount( ...@@ -128,8 +128,8 @@ xfs_scrub_iallocbt_freecount(
/* Check a particular inode with ir_free. */ /* Check a particular inode with ir_free. */
STATIC int STATIC int
xfs_scrub_iallocbt_check_cluster_freemask( xchk_iallocbt_check_cluster_freemask(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
xfs_ino_t fsino, xfs_ino_t fsino,
xfs_agino_t chunkino, xfs_agino_t chunkino,
xfs_agino_t clusterino, xfs_agino_t clusterino,
...@@ -143,14 +143,14 @@ xfs_scrub_iallocbt_check_cluster_freemask( ...@@ -143,14 +143,14 @@ xfs_scrub_iallocbt_check_cluster_freemask(
bool inuse; bool inuse;
int error = 0; int error = 0;
if (xfs_scrub_should_terminate(bs->sc, &error)) if (xchk_should_terminate(bs->sc, &error))
return error; return error;
dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize); dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize);
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
(dip->di_version >= 3 && (dip->di_version >= 3 &&
be64_to_cpu(dip->di_ino) != fsino + clusterino)) { be64_to_cpu(dip->di_ino) != fsino + clusterino)) {
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out; goto out;
} }
...@@ -175,15 +175,15 @@ xfs_scrub_iallocbt_check_cluster_freemask( ...@@ -175,15 +175,15 @@ xfs_scrub_iallocbt_check_cluster_freemask(
freemask_ok = inode_is_free ^ inuse; freemask_ok = inode_is_free ^ inuse;
} }
if (!freemask_ok) if (!freemask_ok)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
out: out:
return 0; return 0;
} }
/* Make sure the free mask is consistent with what the inodes think. */ /* Make sure the free mask is consistent with what the inodes think. */
STATIC int STATIC int
xfs_scrub_iallocbt_check_freemask( xchk_iallocbt_check_freemask(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec) struct xfs_inobt_rec_incore *irec)
{ {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
...@@ -223,18 +223,18 @@ xfs_scrub_iallocbt_check_freemask( ...@@ -223,18 +223,18 @@ xfs_scrub_iallocbt_check_freemask(
/* The whole cluster must be a hole or not a hole. */ /* The whole cluster must be a hole or not a hole. */
ir_holemask = (irec->ir_holemask & holemask); ir_holemask = (irec->ir_holemask & holemask);
if (ir_holemask != holemask && ir_holemask != 0) { if (ir_holemask != holemask && ir_holemask != 0) {
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
continue; continue;
} }
/* If any part of this is a hole, skip it. */ /* If any part of this is a hole, skip it. */
if (ir_holemask) { if (ir_holemask) {
xfs_scrub_xref_is_not_owned_by(bs->sc, agbno, xchk_xref_is_not_owned_by(bs->sc, agbno,
blks_per_cluster, &oinfo); blks_per_cluster, &oinfo);
continue; continue;
} }
xfs_scrub_xref_is_owned_by(bs->sc, agbno, blks_per_cluster, xchk_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
&oinfo); &oinfo);
/* Grab the inode cluster buffer. */ /* Grab the inode cluster buffer. */
...@@ -245,13 +245,13 @@ xfs_scrub_iallocbt_check_freemask( ...@@ -245,13 +245,13 @@ xfs_scrub_iallocbt_check_freemask(
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
&dip, &bp, 0, 0); &dip, &bp, 0, 0);
if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur, 0, if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
&error)) &error))
continue; continue;
/* Which inodes are free? */ /* Which inodes are free? */
for (clusterino = 0; clusterino < nr_inodes; clusterino++) { for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
error = xfs_scrub_iallocbt_check_cluster_freemask(bs, error = xchk_iallocbt_check_cluster_freemask(bs,
fsino, chunkino, clusterino, irec, bp); fsino, chunkino, clusterino, irec, bp);
if (error) { if (error) {
xfs_trans_brelse(bs->cur->bc_tp, bp); xfs_trans_brelse(bs->cur->bc_tp, bp);
...@@ -267,8 +267,8 @@ xfs_scrub_iallocbt_check_freemask( ...@@ -267,8 +267,8 @@ xfs_scrub_iallocbt_check_freemask(
/* Scrub an inobt/finobt record. */ /* Scrub an inobt/finobt record. */
STATIC int STATIC int
xfs_scrub_iallocbt_rec( xchk_iallocbt_rec(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec) union xfs_btree_rec *rec)
{ {
struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_mount *mp = bs->cur->bc_mp;
...@@ -289,18 +289,18 @@ xfs_scrub_iallocbt_rec( ...@@ -289,18 +289,18 @@ xfs_scrub_iallocbt_rec(
if (irec.ir_count > XFS_INODES_PER_CHUNK || if (irec.ir_count > XFS_INODES_PER_CHUNK ||
irec.ir_freecount > XFS_INODES_PER_CHUNK) irec.ir_freecount > XFS_INODES_PER_CHUNK)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
real_freecount = irec.ir_freecount + real_freecount = irec.ir_freecount +
(XFS_INODES_PER_CHUNK - irec.ir_count); (XFS_INODES_PER_CHUNK - irec.ir_count);
if (real_freecount != xfs_scrub_iallocbt_freecount(irec.ir_free)) if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
agino = irec.ir_startino; agino = irec.ir_startino;
/* Record has to be properly aligned within the AG. */ /* Record has to be properly aligned within the AG. */
if (!xfs_verify_agino(mp, agno, agino) || if (!xfs_verify_agino(mp, agno, agino) ||
!xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) { !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out; goto out;
} }
...@@ -308,7 +308,7 @@ xfs_scrub_iallocbt_rec( ...@@ -308,7 +308,7 @@ xfs_scrub_iallocbt_rec(
agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino); agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) || if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) ||
(agbno & (xfs_icluster_size_fsb(mp) - 1))) (agbno & (xfs_icluster_size_fsb(mp) - 1)))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
*inode_blocks += XFS_B_TO_FSB(mp, *inode_blocks += XFS_B_TO_FSB(mp,
irec.ir_count * mp->m_sb.sb_inodesize); irec.ir_count * mp->m_sb.sb_inodesize);
...@@ -318,9 +318,9 @@ xfs_scrub_iallocbt_rec( ...@@ -318,9 +318,9 @@ xfs_scrub_iallocbt_rec(
len = XFS_B_TO_FSB(mp, len = XFS_B_TO_FSB(mp,
XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize); XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
if (irec.ir_count != XFS_INODES_PER_CHUNK) if (irec.ir_count != XFS_INODES_PER_CHUNK)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len)) if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
goto out; goto out;
goto check_freemask; goto check_freemask;
} }
...@@ -333,12 +333,12 @@ xfs_scrub_iallocbt_rec( ...@@ -333,12 +333,12 @@ xfs_scrub_iallocbt_rec(
holes = ~xfs_inobt_irec_to_allocmask(&irec); holes = ~xfs_inobt_irec_to_allocmask(&irec);
if ((holes & irec.ir_free) != holes || if ((holes & irec.ir_free) != holes ||
irec.ir_freecount > irec.ir_count) irec.ir_freecount > irec.ir_count)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) { for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
if (holemask & 1) if (holemask & 1)
holecount += XFS_INODES_PER_HOLEMASK_BIT; holecount += XFS_INODES_PER_HOLEMASK_BIT;
else if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len)) else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
break; break;
holemask >>= 1; holemask >>= 1;
agino += XFS_INODES_PER_HOLEMASK_BIT; agino += XFS_INODES_PER_HOLEMASK_BIT;
...@@ -346,10 +346,10 @@ xfs_scrub_iallocbt_rec( ...@@ -346,10 +346,10 @@ xfs_scrub_iallocbt_rec(
if (holecount > XFS_INODES_PER_CHUNK || if (holecount > XFS_INODES_PER_CHUNK ||
holecount + irec.ir_count != XFS_INODES_PER_CHUNK) holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
check_freemask: check_freemask:
error = xfs_scrub_iallocbt_check_freemask(bs, &irec); error = xchk_iallocbt_check_freemask(bs, &irec);
if (error) if (error)
goto out; goto out;
...@@ -362,7 +362,7 @@ xfs_scrub_iallocbt_rec( ...@@ -362,7 +362,7 @@ xfs_scrub_iallocbt_rec(
* Don't bother if we're missing btree cursors, as we're already corrupt. * Don't bother if we're missing btree cursors, as we're already corrupt.
*/ */
STATIC void STATIC void
xfs_scrub_iallocbt_xref_rmap_btreeblks( xchk_iallocbt_xref_rmap_btreeblks(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int which) int which)
{ {
...@@ -374,27 +374,27 @@ xfs_scrub_iallocbt_xref_rmap_btreeblks( ...@@ -374,27 +374,27 @@ xfs_scrub_iallocbt_xref_rmap_btreeblks(
if (!sc->sa.ino_cur || !sc->sa.rmap_cur || if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
(xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) || (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
xfs_scrub_skip_xref(sc->sm)) xchk_skip_xref(sc->sm))
return; return;
/* Check that we saw as many inobt blocks as the rmap says. */ /* Check that we saw as many inobt blocks as the rmap says. */
error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks); error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
if (!xfs_scrub_process_error(sc, 0, 0, &error)) if (!xchk_process_error(sc, 0, 0, &error))
return; return;
if (sc->sa.fino_cur) { if (sc->sa.fino_cur) {
error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks); error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
if (!xfs_scrub_process_error(sc, 0, 0, &error)) if (!xchk_process_error(sc, 0, 0, &error))
return; return;
} }
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo, error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
&blocks); &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return; return;
if (blocks != inobt_blocks + finobt_blocks) if (blocks != inobt_blocks + finobt_blocks)
xfs_scrub_btree_set_corrupt(sc, sc->sa.ino_cur, 0); xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
} }
/* /*
...@@ -402,7 +402,7 @@ xfs_scrub_iallocbt_xref_rmap_btreeblks( ...@@ -402,7 +402,7 @@ xfs_scrub_iallocbt_xref_rmap_btreeblks(
* the rmap says are owned by inodes. * the rmap says are owned by inodes.
*/ */
STATIC void STATIC void
xfs_scrub_iallocbt_xref_rmap_inodes( xchk_iallocbt_xref_rmap_inodes(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int which, int which,
xfs_filblks_t inode_blocks) xfs_filblks_t inode_blocks)
...@@ -411,22 +411,22 @@ xfs_scrub_iallocbt_xref_rmap_inodes( ...@@ -411,22 +411,22 @@ xfs_scrub_iallocbt_xref_rmap_inodes(
xfs_filblks_t blocks; xfs_filblks_t blocks;
int error; int error;
if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return; return;
/* Check that we saw as many inode blocks as the rmap knows about. */ /* Check that we saw as many inode blocks as the rmap knows about. */
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo, error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
&blocks); &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return; return;
if (blocks != inode_blocks) if (blocks != inode_blocks)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
} }
/* Scrub the inode btrees for some AG. */ /* Scrub the inode btrees for some AG. */
STATIC int STATIC int
xfs_scrub_iallocbt( xchk_iallocbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_btnum_t which) xfs_btnum_t which)
{ {
...@@ -437,12 +437,12 @@ xfs_scrub_iallocbt( ...@@ -437,12 +437,12 @@ xfs_scrub_iallocbt(
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur; cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
error = xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo, error = xchk_btree(sc, cur, xchk_iallocbt_rec, &oinfo,
&inode_blocks); &inode_blocks);
if (error) if (error)
return error; return error;
xfs_scrub_iallocbt_xref_rmap_btreeblks(sc, which); xchk_iallocbt_xref_rmap_btreeblks(sc, which);
/* /*
* If we're scrubbing the inode btree, inode_blocks is the number of * If we're scrubbing the inode btree, inode_blocks is the number of
...@@ -452,28 +452,28 @@ xfs_scrub_iallocbt( ...@@ -452,28 +452,28 @@ xfs_scrub_iallocbt(
* to inode chunks with free inodes. * to inode chunks with free inodes.
*/ */
if (which == XFS_BTNUM_INO) if (which == XFS_BTNUM_INO)
xfs_scrub_iallocbt_xref_rmap_inodes(sc, which, inode_blocks); xchk_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
return error; return error;
} }
int int
xfs_scrub_inobt( xchk_inobt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_iallocbt(sc, XFS_BTNUM_INO); return xchk_iallocbt(sc, XFS_BTNUM_INO);
} }
int int
xfs_scrub_finobt( xchk_finobt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_iallocbt(sc, XFS_BTNUM_FINO); return xchk_iallocbt(sc, XFS_BTNUM_FINO);
} }
/* See if an inode btree has (or doesn't have) an inode chunk record. */ /* See if an inode btree has (or doesn't have) an inode chunk record. */
static inline void static inline void
xfs_scrub_xref_inode_check( xchk_xref_inode_check(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len, xfs_extlen_t len,
...@@ -483,33 +483,33 @@ xfs_scrub_xref_inode_check( ...@@ -483,33 +483,33 @@ xfs_scrub_xref_inode_check(
bool has_inodes; bool has_inodes;
int error; int error;
if (!(*icur) || xfs_scrub_skip_xref(sc->sm)) if (!(*icur) || xchk_skip_xref(sc->sm))
return; return;
error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes); error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
if (!xfs_scrub_should_check_xref(sc, &error, icur)) if (!xchk_should_check_xref(sc, &error, icur))
return; return;
if (has_inodes != should_have_inodes) if (has_inodes != should_have_inodes)
xfs_scrub_btree_xref_set_corrupt(sc, *icur, 0); xchk_btree_xref_set_corrupt(sc, *icur, 0);
} }
/* xref check that the extent is not covered by inodes */ /* xref check that the extent is not covered by inodes */
void void
xfs_scrub_xref_is_not_inode_chunk( xchk_xref_is_not_inode_chunk(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
{ {
xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false); xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false); xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
} }
/* xref check that the extent is covered by inodes */ /* xref check that the extent is covered by inodes */
void void
xfs_scrub_xref_is_inode_chunk( xchk_xref_is_inode_chunk(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
{ {
xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true); xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
} }
此差异已折叠。
...@@ -27,18 +27,18 @@ ...@@ -27,18 +27,18 @@
/* Set us up to scrub parents. */ /* Set us up to scrub parents. */
int int
xfs_scrub_setup_parent( xchk_setup_parent(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
return xfs_scrub_setup_inode_contents(sc, ip, 0); return xchk_setup_inode_contents(sc, ip, 0);
} }
/* Parent pointers */ /* Parent pointers */
/* Look for an entry in a parent pointing to this inode. */ /* Look for an entry in a parent pointing to this inode. */
struct xfs_scrub_parent_ctx { struct xchk_parent_ctx {
struct dir_context dc; struct dir_context dc;
xfs_ino_t ino; xfs_ino_t ino;
xfs_nlink_t nlink; xfs_nlink_t nlink;
...@@ -46,7 +46,7 @@ struct xfs_scrub_parent_ctx { ...@@ -46,7 +46,7 @@ struct xfs_scrub_parent_ctx {
/* Look for a single entry in a directory pointing to an inode. */ /* Look for a single entry in a directory pointing to an inode. */
STATIC int STATIC int
xfs_scrub_parent_actor( xchk_parent_actor(
struct dir_context *dc, struct dir_context *dc,
const char *name, const char *name,
int namelen, int namelen,
...@@ -54,9 +54,9 @@ xfs_scrub_parent_actor( ...@@ -54,9 +54,9 @@ xfs_scrub_parent_actor(
u64 ino, u64 ino,
unsigned type) unsigned type)
{ {
struct xfs_scrub_parent_ctx *spc; struct xchk_parent_ctx *spc;
spc = container_of(dc, struct xfs_scrub_parent_ctx, dc); spc = container_of(dc, struct xchk_parent_ctx, dc);
if (spc->ino == ino) if (spc->ino == ino)
spc->nlink++; spc->nlink++;
return 0; return 0;
...@@ -64,13 +64,13 @@ xfs_scrub_parent_actor( ...@@ -64,13 +64,13 @@ xfs_scrub_parent_actor(
/* Count the number of dentries in the parent dir that point to this inode. */ /* Count the number of dentries in the parent dir that point to this inode. */
STATIC int STATIC int
xfs_scrub_parent_count_parent_dentries( xchk_parent_count_parent_dentries(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *parent, struct xfs_inode *parent,
xfs_nlink_t *nlink) xfs_nlink_t *nlink)
{ {
struct xfs_scrub_parent_ctx spc = { struct xchk_parent_ctx spc = {
.dc.actor = xfs_scrub_parent_actor, .dc.actor = xchk_parent_actor,
.dc.pos = 0, .dc.pos = 0,
.ino = sc->ip->i_ino, .ino = sc->ip->i_ino,
.nlink = 0, .nlink = 0,
...@@ -120,7 +120,7 @@ xfs_scrub_parent_count_parent_dentries( ...@@ -120,7 +120,7 @@ xfs_scrub_parent_count_parent_dentries(
* entry pointing back to the inode being scrubbed. * entry pointing back to the inode being scrubbed.
*/ */
STATIC int STATIC int
xfs_scrub_parent_validate( xchk_parent_validate(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t dnum, xfs_ino_t dnum,
bool *try_again) bool *try_again)
...@@ -138,7 +138,7 @@ xfs_scrub_parent_validate( ...@@ -138,7 +138,7 @@ xfs_scrub_parent_validate(
/* '..' must not point to ourselves. */ /* '..' must not point to ourselves. */
if (sc->ip->i_ino == dnum) { if (sc->ip->i_ino == dnum) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
...@@ -165,13 +165,13 @@ xfs_scrub_parent_validate( ...@@ -165,13 +165,13 @@ xfs_scrub_parent_validate(
error = xfs_iget(mp, sc->tp, dnum, XFS_IGET_UNTRUSTED, 0, &dp); error = xfs_iget(mp, sc->tp, dnum, XFS_IGET_UNTRUSTED, 0, &dp);
if (error == -EINVAL) { if (error == -EINVAL) {
error = -EFSCORRUPTED; error = -EFSCORRUPTED;
xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error); xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error);
goto out; goto out;
} }
if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out; goto out;
if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) { if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_rele; goto out_rele;
} }
...@@ -183,12 +183,12 @@ xfs_scrub_parent_validate( ...@@ -183,12 +183,12 @@ xfs_scrub_parent_validate(
* the child inodes. * the child inodes.
*/ */
if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) { if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) {
error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink); error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
&error)) &error))
goto out_unlock; goto out_unlock;
if (nlink != expected_nlink) if (nlink != expected_nlink)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_unlock; goto out_unlock;
} }
...@@ -200,18 +200,18 @@ xfs_scrub_parent_validate( ...@@ -200,18 +200,18 @@ xfs_scrub_parent_validate(
*/ */
xfs_iunlock(sc->ip, sc->ilock_flags); xfs_iunlock(sc->ip, sc->ilock_flags);
sc->ilock_flags = 0; sc->ilock_flags = 0;
error = xfs_scrub_ilock_inverted(dp, XFS_IOLOCK_SHARED); error = xchk_ilock_inverted(dp, XFS_IOLOCK_SHARED);
if (error) if (error)
goto out_rele; goto out_rele;
/* Go looking for our dentry. */ /* Go looking for our dentry. */
error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink); error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_unlock; goto out_unlock;
/* Drop the parent lock, relock this inode. */ /* Drop the parent lock, relock this inode. */
xfs_iunlock(dp, XFS_IOLOCK_SHARED); xfs_iunlock(dp, XFS_IOLOCK_SHARED);
error = xfs_scrub_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL); error = xchk_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL);
if (error) if (error)
goto out_rele; goto out_rele;
sc->ilock_flags = XFS_IOLOCK_EXCL; sc->ilock_flags = XFS_IOLOCK_EXCL;
...@@ -225,7 +225,7 @@ xfs_scrub_parent_validate( ...@@ -225,7 +225,7 @@ xfs_scrub_parent_validate(
/* Look up '..' to see if the inode changed. */ /* Look up '..' to see if the inode changed. */
error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL); error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_rele; goto out_rele;
/* Drat, parent changed. Try again! */ /* Drat, parent changed. Try again! */
...@@ -241,7 +241,7 @@ xfs_scrub_parent_validate( ...@@ -241,7 +241,7 @@ xfs_scrub_parent_validate(
* for us in the parent. * for us in the parent.
*/ */
if (nlink != expected_nlink) if (nlink != expected_nlink)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
return error; return error;
out_unlock: out_unlock:
...@@ -254,7 +254,7 @@ xfs_scrub_parent_validate( ...@@ -254,7 +254,7 @@ xfs_scrub_parent_validate(
/* Scrub a parent pointer. */ /* Scrub a parent pointer. */
int int
xfs_scrub_parent( xchk_parent(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
...@@ -272,7 +272,7 @@ xfs_scrub_parent( ...@@ -272,7 +272,7 @@ xfs_scrub_parent(
/* We're not a special inode, are we? */ /* We're not a special inode, are we? */
if (!xfs_verify_dir_ino(mp, sc->ip->i_ino)) { if (!xfs_verify_dir_ino(mp, sc->ip->i_ino)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
...@@ -288,10 +288,10 @@ xfs_scrub_parent( ...@@ -288,10 +288,10 @@ xfs_scrub_parent(
/* Look up '..' */ /* Look up '..' */
error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL); error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out; goto out;
if (!xfs_verify_dir_ino(mp, dnum)) { if (!xfs_verify_dir_ino(mp, dnum)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
...@@ -299,12 +299,12 @@ xfs_scrub_parent( ...@@ -299,12 +299,12 @@ xfs_scrub_parent(
if (sc->ip == mp->m_rootip) { if (sc->ip == mp->m_rootip) {
if (sc->ip->i_ino != mp->m_sb.sb_rootino || if (sc->ip->i_ino != mp->m_sb.sb_rootino ||
sc->ip->i_ino != dnum) sc->ip->i_ino != dnum)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
do { do {
error = xfs_scrub_parent_validate(sc, dnum, &try_again); error = xchk_parent_validate(sc, dnum, &try_again);
if (error) if (error)
goto out; goto out;
} while (try_again && ++tries < 20); } while (try_again && ++tries < 20);
...@@ -314,7 +314,7 @@ xfs_scrub_parent( ...@@ -314,7 +314,7 @@ xfs_scrub_parent(
* incomplete. Userspace can decide if it wants to try again. * incomplete. Userspace can decide if it wants to try again.
*/ */
if (try_again && tries == 20) if (try_again && tries == 20)
xfs_scrub_set_incomplete(sc); xchk_set_incomplete(sc);
out: out:
/* /*
* If we failed to lock the parent inode even after a retry, just mark * If we failed to lock the parent inode even after a retry, just mark
...@@ -322,7 +322,7 @@ xfs_scrub_parent( ...@@ -322,7 +322,7 @@ xfs_scrub_parent(
*/ */
if (sc->try_harder && error == -EDEADLOCK) { if (sc->try_harder && error == -EDEADLOCK) {
error = 0; error = 0;
xfs_scrub_set_incomplete(sc); xchk_set_incomplete(sc);
} }
return error; return error;
} }
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
/* Convert a scrub type code to a DQ flag, or return 0 if error. */ /* Convert a scrub type code to a DQ flag, or return 0 if error. */
static inline uint static inline uint
xfs_scrub_quota_to_dqtype( xchk_quota_to_dqtype(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
switch (sc->sm->sm_type) { switch (sc->sm->sm_type) {
...@@ -47,7 +47,7 @@ xfs_scrub_quota_to_dqtype( ...@@ -47,7 +47,7 @@ xfs_scrub_quota_to_dqtype(
/* Set us up to scrub a quota. */ /* Set us up to scrub a quota. */
int int
xfs_scrub_setup_quota( xchk_setup_quota(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
...@@ -57,14 +57,14 @@ xfs_scrub_setup_quota( ...@@ -57,14 +57,14 @@ xfs_scrub_setup_quota(
if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp)) if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
return -ENOENT; return -ENOENT;
dqtype = xfs_scrub_quota_to_dqtype(sc); dqtype = xchk_quota_to_dqtype(sc);
if (dqtype == 0) if (dqtype == 0)
return -EINVAL; return -EINVAL;
sc->has_quotaofflock = true; sc->has_quotaofflock = true;
mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock); mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
if (!xfs_this_quota_on(sc->mp, dqtype)) if (!xfs_this_quota_on(sc->mp, dqtype))
return -ENOENT; return -ENOENT;
error = xfs_scrub_setup_fs(sc, ip); error = xchk_setup_fs(sc, ip);
if (error) if (error)
return error; return error;
sc->ip = xfs_quota_inode(sc->mp, dqtype); sc->ip = xfs_quota_inode(sc->mp, dqtype);
...@@ -75,19 +75,19 @@ xfs_scrub_setup_quota( ...@@ -75,19 +75,19 @@ xfs_scrub_setup_quota(
/* Quotas. */ /* Quotas. */
struct xfs_scrub_quota_info { struct xchk_quota_info {
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
xfs_dqid_t last_id; xfs_dqid_t last_id;
}; };
/* Scrub the fields in an individual quota item. */ /* Scrub the fields in an individual quota item. */
STATIC int STATIC int
xfs_scrub_quota_item( xchk_quota_item(
struct xfs_dquot *dq, struct xfs_dquot *dq,
uint dqtype, uint dqtype,
void *priv) void *priv)
{ {
struct xfs_scrub_quota_info *sqi = priv; struct xchk_quota_info *sqi = priv;
struct xfs_scrub_context *sc = sqi->sc; struct xfs_scrub_context *sc = sqi->sc;
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_disk_dquot *d = &dq->q_core; struct xfs_disk_dquot *d = &dq->q_core;
...@@ -111,16 +111,16 @@ xfs_scrub_quota_item( ...@@ -111,16 +111,16 @@ xfs_scrub_quota_item(
*/ */
offset = id / qi->qi_dqperchunk; offset = id / qi->qi_dqperchunk;
if (id && id <= sqi->last_id) if (id && id <= sqi->last_id)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
sqi->last_id = id; sqi->last_id = id;
/* Did we get the dquot type we wanted? */ /* Did we get the dquot type we wanted? */
if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES)) if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0)) if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the limits. */ /* Check the limits. */
bhard = be64_to_cpu(d->d_blk_hardlimit); bhard = be64_to_cpu(d->d_blk_hardlimit);
...@@ -140,19 +140,19 @@ xfs_scrub_quota_item( ...@@ -140,19 +140,19 @@ xfs_scrub_quota_item(
* the hard limit. * the hard limit.
*/ */
if (bhard > mp->m_sb.sb_dblocks) if (bhard > mp->m_sb.sb_dblocks)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (bsoft > bhard) if (bsoft > bhard)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (ihard > mp->m_maxicount) if (ihard > mp->m_maxicount)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (isoft > ihard) if (isoft > ihard)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (rhard > mp->m_sb.sb_rblocks) if (rhard > mp->m_sb.sb_rblocks)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (rsoft > rhard) if (rsoft > rhard)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the resource counts. */ /* Check the resource counts. */
bcount = be64_to_cpu(d->d_bcount); bcount = be64_to_cpu(d->d_bcount);
...@@ -167,15 +167,15 @@ xfs_scrub_quota_item( ...@@ -167,15 +167,15 @@ xfs_scrub_quota_item(
*/ */
if (xfs_sb_version_hasreflink(&mp->m_sb)) { if (xfs_sb_version_hasreflink(&mp->m_sb)) {
if (mp->m_sb.sb_dblocks < bcount) if (mp->m_sb.sb_dblocks < bcount)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, xchk_fblock_set_warning(sc, XFS_DATA_FORK,
offset); offset);
} else { } else {
if (mp->m_sb.sb_dblocks < bcount) if (mp->m_sb.sb_dblocks < bcount)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
offset); offset);
} }
if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks) if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* /*
* We can violate the hard limits if the admin suddenly sets a * We can violate the hard limits if the admin suddenly sets a
...@@ -183,18 +183,18 @@ xfs_scrub_quota_item( ...@@ -183,18 +183,18 @@ xfs_scrub_quota_item(
* admin review. * admin review.
*/ */
if (id != 0 && bhard != 0 && bcount > bhard) if (id != 0 && bhard != 0 && bcount > bhard)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (id != 0 && ihard != 0 && icount > ihard) if (id != 0 && ihard != 0 && icount > ihard)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (id != 0 && rhard != 0 && rcount > rhard) if (id != 0 && rhard != 0 && rcount > rhard)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
return 0; return 0;
} }
/* Check the quota's data fork. */ /* Check the quota's data fork. */
STATIC int STATIC int
xfs_scrub_quota_data_fork( xchk_quota_data_fork(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_bmbt_irec irec = { 0 }; struct xfs_bmbt_irec irec = { 0 };
...@@ -205,7 +205,7 @@ xfs_scrub_quota_data_fork( ...@@ -205,7 +205,7 @@ xfs_scrub_quota_data_fork(
int error = 0; int error = 0;
/* Invoke the fork scrubber. */ /* Invoke the fork scrubber. */
error = xfs_scrub_metadata_inode_forks(sc); error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error; return error;
...@@ -213,7 +213,7 @@ xfs_scrub_quota_data_fork( ...@@ -213,7 +213,7 @@ xfs_scrub_quota_data_fork(
max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk; max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK); ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
for_each_xfs_iext(ifp, &icur, &irec) { for_each_xfs_iext(ifp, &icur, &irec) {
if (xfs_scrub_should_terminate(sc, &error)) if (xchk_should_terminate(sc, &error))
break; break;
/* /*
* delalloc extents or blocks mapped above the highest * delalloc extents or blocks mapped above the highest
...@@ -222,7 +222,7 @@ xfs_scrub_quota_data_fork( ...@@ -222,7 +222,7 @@ xfs_scrub_quota_data_fork(
if (isnullstartblock(irec.br_startblock) || if (isnullstartblock(irec.br_startblock) ||
irec.br_startoff > max_dqid_off || irec.br_startoff > max_dqid_off ||
irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) { irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
irec.br_startoff); irec.br_startoff);
break; break;
} }
...@@ -233,19 +233,19 @@ xfs_scrub_quota_data_fork( ...@@ -233,19 +233,19 @@ xfs_scrub_quota_data_fork(
/* Scrub all of a quota type's items. */ /* Scrub all of a quota type's items. */
int int
xfs_scrub_quota( xchk_quota(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_scrub_quota_info sqi; struct xchk_quota_info sqi;
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_quotainfo *qi = mp->m_quotainfo; struct xfs_quotainfo *qi = mp->m_quotainfo;
uint dqtype; uint dqtype;
int error = 0; int error = 0;
dqtype = xfs_scrub_quota_to_dqtype(sc); dqtype = xchk_quota_to_dqtype(sc);
/* Look for problem extents. */ /* Look for problem extents. */
error = xfs_scrub_quota_data_fork(sc); error = xchk_quota_data_fork(sc);
if (error) if (error)
goto out; goto out;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
...@@ -260,10 +260,10 @@ xfs_scrub_quota( ...@@ -260,10 +260,10 @@ xfs_scrub_quota(
sc->ilock_flags = 0; sc->ilock_flags = 0;
sqi.sc = sc; sqi.sc = sc;
sqi.last_id = 0; sqi.last_id = 0;
error = xfs_qm_dqiterate(mp, dqtype, xfs_scrub_quota_item, &sqi); error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
sc->ilock_flags = XFS_ILOCK_EXCL; sc->ilock_flags = XFS_ILOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags); xfs_ilock(sc->ip, sc->ilock_flags);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
sqi.last_id * qi->qi_dqperchunk, &error)) sqi.last_id * qi->qi_dqperchunk, &error))
goto out; goto out;
......
此差异已折叠。
...@@ -50,7 +50,7 @@ xfs_repair_attempt( ...@@ -50,7 +50,7 @@ xfs_repair_attempt(
trace_xfs_repair_attempt(ip, sc->sm, error); trace_xfs_repair_attempt(ip, sc->sm, error);
xfs_scrub_ag_btcur_free(&sc->sa); xchk_ag_btcur_free(&sc->sa);
/* Repair whatever's broken. */ /* Repair whatever's broken. */
ASSERT(sc->ops->repair); ASSERT(sc->ops->repair);
...@@ -110,7 +110,7 @@ xfs_repair_probe( ...@@ -110,7 +110,7 @@ xfs_repair_probe(
{ {
int error = 0; int error = 0;
if (xfs_scrub_should_terminate(sc, &error)) if (xchk_should_terminate(sc, &error))
return error; return error;
return 0; return 0;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册