提交 6bdcf26a 编写于 作者: C Christoph Hellwig 提交者: Darrick J. Wong

xfs: use a b+tree for the in-core extent list

Replace the current linear list and the indirection array for the in-core
extent list with a b+tree to avoid the need for larger memory allocations
for the indirection array when lots of extents are present.  The current
extent list implementations leads to heavy pressure on the memory
allocator when modifying files with a high extent count, and can lead
to high latencies because of that.

The replacement is a b+tree with a few quirks.  The leaf nodes directly
store the extent record in two u64 values.  The encoding is a little bit
different from the existing in-core extent records so that the start
offset and length which are required for lookups can be retreived with
simple mask operations.  The inner nodes store a 64-bit key containing
the start offset in the first half of the node, and the pointers to the
next lower level in the second half.  In either case we walk the node
from the beginninig to the end and do a linear search, as that is more
efficient for the low number of cache lines touched during a search
(2 for the inner nodes, 4 for the leaf nodes) than a binary search.
We store termination markers (zero length for the leaf nodes, an
otherwise impossible high bit for the inner nodes) to terminate the key
list / records instead of storing a count to use the available cache
lines as efficiently as possible.

One quirk of the algorithm is that while we normally split a node half and
half like usual btree implementations we just spill over entries added at
the very end of the list to a new node on its own.  This means we get a
100% fill grade for the common cases of bulk insertion when reading an
inode into memory, and when only sequentially appending to a file.  The
downside is a slightly higher chance of splits on the first random
insertions.

Both insert and removal manually recurse into the lower levels, but
the bulk deletion of the whole tree is still implemented as a recursive
function call, although one limited by the overall depth and with very
little stack usage in every iteration.

For the first few extents we dynamically grow the list from a single
extent to the next powers of two until we have a first full leaf block
and that building the actual tree.

The code started out based on the generic lib/btree.c code from Joern
Engel based on earlier work from Peter Zijlstra, but has since been
rewritten beyond recognition.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: NDarrick J. Wong <darrick.wong@oracle.com>
上级 135dcc10
......@@ -49,6 +49,7 @@ xfs-y += $(addprefix libxfs/, \
xfs_dquot_buf.o \
xfs_ialloc.o \
xfs_ialloc_btree.o \
xfs_iext_tree.o \
xfs_inode_fork.o \
xfs_inode_buf.o \
xfs_log_rlimit.o \
......
......@@ -806,6 +806,8 @@ xfs_bmap_local_to_extents_empty(
xfs_bmap_forkoff_reset(ip, whichfork);
ifp->if_flags &= ~XFS_IFINLINE;
ifp->if_flags |= XFS_IFEXTENTS;
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
}
......@@ -847,8 +849,7 @@ xfs_bmap_local_to_extents(
flags = 0;
error = 0;
ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
XFS_IFINLINE);
ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = ip->i_mount;
......@@ -892,6 +893,9 @@ xfs_bmap_local_to_extents(
xfs_bmap_local_to_extents_empty(ip, whichfork);
flags |= XFS_ILOG_CORE;
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
rec.br_startoff = 0;
rec.br_startblock = args.fsbno;
rec.br_blockcount = 1;
......@@ -1178,6 +1182,7 @@ xfs_iread_extents(
xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
struct xfs_btree_block *block = ifp->if_broot;
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec new;
xfs_fsblock_t bno;
struct xfs_buf *bp;
xfs_extnum_t i, j;
......@@ -1192,10 +1197,6 @@ xfs_iread_extents(
return -EFSCORRUPTED;
}
ifp->if_bytes = 0;
ifp->if_real_bytes = 0;
xfs_iext_add(ifp, 0, nextents);
/*
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
*/
......@@ -1259,16 +1260,15 @@ xfs_iread_extents(
* Copy records into the extent records.
*/
frp = XFS_BMBT_REC_ADDR(mp, block, 1);
for (j = 0; j < num_recs; j++, i++, frp++) {
xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
for (j = 0; j < num_recs; j++, frp++, i++) {
if (!xfs_bmbt_validate_extent(mp, whichfork, frp)) {
XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto out_brelse;
}
trp->l0 = be64_to_cpu(frp->l0);
trp->l1 = be64_to_cpu(frp->l1);
xfs_bmbt_disk_get_all(frp, &new);
xfs_iext_insert(ip, &icur, 1, &new, state);
trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
xfs_iext_next(ifp, &icur);
}
......
......@@ -71,73 +71,21 @@ xfs_bmdr_to_bmbt(
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
}
/*
* Convert a compressed bmap extent record to an uncompressed form.
* This code must be in sync with the routines xfs_bmbt_get_startoff,
* xfs_bmbt_get_startblock and xfs_bmbt_get_blockcount.
*/
STATIC void
__xfs_bmbt_get_all(
uint64_t l0,
uint64_t l1,
xfs_bmbt_irec_t *s)
{
int ext_flag;
xfs_exntst_t st;
ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
s->br_startoff = ((xfs_fileoff_t)l0 &
xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
(((xfs_fsblock_t)l1) >> 21);
s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
/* This is xfs_extent_state() in-line */
if (ext_flag) {
ASSERT(s->br_blockcount != 0); /* saved for DMIG */
st = XFS_EXT_UNWRITTEN;
} else
st = XFS_EXT_NORM;
s->br_state = st;
}
void
xfs_bmbt_get_all(
xfs_bmbt_rec_host_t *r,
xfs_bmbt_irec_t *s)
{
__xfs_bmbt_get_all(r->l0, r->l1, s);
}
/*
* Extract the blockcount field from an in memory bmap extent record.
*/
xfs_filblks_t
xfs_bmbt_get_blockcount(
xfs_bmbt_rec_host_t *r)
{
return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
}
/*
* Extract the startblock field from an in memory bmap extent record.
*/
xfs_fsblock_t
xfs_bmbt_get_startblock(
xfs_bmbt_rec_host_t *r)
{
return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
(((xfs_fsblock_t)r->l1) >> 21);
}
/*
* Extract the startoff field from an in memory bmap extent record.
*/
xfs_fileoff_t
xfs_bmbt_get_startoff(
xfs_bmbt_rec_host_t *r)
{
return ((xfs_fileoff_t)r->l0 &
xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
xfs_bmbt_disk_get_all(
struct xfs_bmbt_rec *rec,
struct xfs_bmbt_irec *irec)
{
uint64_t l0 = get_unaligned_be64(&rec->l0);
uint64_t l1 = get_unaligned_be64(&rec->l1);
irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21);
irec->br_blockcount = l1 & xfs_mask64lo(21);
if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN))
irec->br_state = XFS_EXT_UNWRITTEN;
else
irec->br_state = XFS_EXT_NORM;
}
/*
......@@ -161,29 +109,6 @@ xfs_bmbt_disk_get_startoff(
xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
}
/*
* Set all the fields in a bmap extent record from the uncompressed form.
*/
void
xfs_bmbt_set_all(
struct xfs_bmbt_rec_host *r,
struct xfs_bmbt_irec *s)
{
int extent_flag = (s->br_state != XFS_EXT_NORM);
ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
((xfs_bmbt_rec_base_t)s->br_startblock >> 43);
r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
((xfs_bmbt_rec_base_t)s->br_blockcount &
(xfs_bmbt_rec_base_t)xfs_mask64lo(21));
}
/*
* Set all the fields in a bmap extent record from the uncompressed form.
*/
......
......@@ -98,16 +98,11 @@ struct xfs_trans;
*/
extern void xfs_bmdr_to_bmbt(struct xfs_inode *, xfs_bmdr_block_t *, int,
struct xfs_btree_block *, int);
extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s);
extern xfs_filblks_t xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t *r);
extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r);
extern xfs_fileoff_t xfs_bmbt_get_startoff(xfs_bmbt_rec_host_t *r);
void xfs_bmbt_disk_set_all(struct xfs_bmbt_rec *r, struct xfs_bmbt_irec *s);
extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r);
extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r);
extern void xfs_bmbt_set_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s);
extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
extern void xfs_bmbt_to_bmdr(struct xfs_mount *, struct xfs_btree_block *, int,
xfs_bmdr_block_t *, int);
......
......@@ -1553,10 +1553,6 @@ typedef struct xfs_bmbt_rec {
typedef uint64_t xfs_bmbt_rec_base_t; /* use this for casts */
typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
typedef struct xfs_bmbt_rec_host {
uint64_t l0, l1;
} xfs_bmbt_rec_host_t;
/*
* Values and macros for delayed-allocation startblock fields.
*/
......
此差异已折叠。
此差异已折叠。
......@@ -21,45 +21,18 @@
struct xfs_inode_log_item;
struct xfs_dinode;
/*
* The following xfs_ext_irec_t struct introduces a second (top) level
* to the in-core extent allocation scheme. These structs are allocated
* in a contiguous block, creating an indirection array where each entry
* (irec) contains a pointer to a buffer of in-core extent records which
* it manages. Each extent buffer is 4k in size, since 4k is the system
* page size on Linux i386 and systems with larger page sizes don't seem
* to gain much, if anything, by using their native page size as the
* extent buffer size. Also, using 4k extent buffers everywhere provides
* a consistent interface for CXFS across different platforms.
*
* There is currently no limit on the number of irec's (extent lists)
* allowed, so heavily fragmented files may require an indirection array
* which spans multiple system pages of memory. The number of extents
* which would require this amount of contiguous memory is very large
* and should not cause problems in the foreseeable future. However,
* if the memory needed for the contiguous array ever becomes a problem,
* it is possible that a third level of indirection may be required.
*/
typedef struct xfs_ext_irec {
xfs_bmbt_rec_host_t *er_extbuf; /* block of extent records */
xfs_extnum_t er_extoff; /* extent offset in file */
xfs_extnum_t er_extcount; /* number of extents in page/block */
} xfs_ext_irec_t;
/*
* File incore extent information, present for each of data & attr forks.
*/
#define XFS_IEXT_BUFSZ 4096
#define XFS_LINEAR_EXTS (XFS_IEXT_BUFSZ / (uint)sizeof(xfs_bmbt_rec_t))
typedef struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
int if_real_bytes; /* bytes allocated in if_u1 */
struct xfs_btree_block *if_broot; /* file's incore btree root */
short if_broot_bytes; /* bytes allocated for root */
unsigned char if_flags; /* per-fork flags */
int if_height; /* height of the extent tree */
union {
xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */
xfs_ext_irec_t *if_ext_irec; /* irec map file exts */
void *if_root; /* extent tree root */
char *if_data; /* inline file data */
} if_u1;
} xfs_ifork_t;
......@@ -70,7 +43,6 @@ typedef struct xfs_ifork {
#define XFS_IFINLINE 0x01 /* Inline data is read in */
#define XFS_IFEXTENTS 0x02 /* All extent pointers are read in */
#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */
#define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */
/*
* Fork handling.
......@@ -140,35 +112,12 @@ int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *,
int);
void xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
struct xfs_bmbt_rec_host *
xfs_iext_get_ext(struct xfs_ifork *, xfs_extnum_t);
xfs_extnum_t xfs_iext_count(struct xfs_ifork *);
xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp);
void xfs_iext_insert(struct xfs_inode *, struct xfs_iext_cursor *cur,
xfs_extnum_t, struct xfs_bmbt_irec *, int);
void xfs_iext_add(struct xfs_ifork *, xfs_extnum_t, int);
void xfs_iext_add_indirect_multi(struct xfs_ifork *, int,
xfs_extnum_t, int);
void xfs_iext_remove(struct xfs_inode *, struct xfs_iext_cursor *,
int, int);
void xfs_iext_remove_direct(struct xfs_ifork *, xfs_extnum_t, int);
void xfs_iext_remove_indirect(struct xfs_ifork *, xfs_extnum_t, int);
void xfs_iext_realloc_direct(struct xfs_ifork *, int);
void xfs_iext_destroy(struct xfs_ifork *);
struct xfs_bmbt_rec_host *
xfs_iext_bno_to_ext(struct xfs_ifork *, xfs_fileoff_t, int *);
struct xfs_ext_irec *
xfs_iext_bno_to_irec(struct xfs_ifork *, xfs_fileoff_t, int *);
struct xfs_ext_irec *
xfs_iext_idx_to_irec(struct xfs_ifork *, xfs_extnum_t *, int *,
int);
void xfs_iext_irec_init(struct xfs_ifork *);
struct xfs_ext_irec *
xfs_iext_irec_new(struct xfs_ifork *, int);
void xfs_iext_irec_remove(struct xfs_ifork *, int);
void xfs_iext_irec_compact(struct xfs_ifork *);
void xfs_iext_irec_compact_pages(struct xfs_ifork *);
void xfs_iext_irec_compact_full(struct xfs_ifork *);
void xfs_iext_irec_update_extoffs(struct xfs_ifork *, int, int);
bool xfs_iext_lookup_extent(struct xfs_inode *ip,
struct xfs_ifork *ifp, xfs_fileoff_t bno,
......@@ -185,29 +134,10 @@ void xfs_iext_update_extent(struct xfs_inode *ip, int state,
struct xfs_iext_cursor *cur,
struct xfs_bmbt_irec *gotp);
static inline void xfs_iext_first(struct xfs_ifork *ifp,
struct xfs_iext_cursor *cur)
{
cur->idx = 0;
}
static inline void xfs_iext_last(struct xfs_ifork *ifp,
struct xfs_iext_cursor *cur)
{
cur->idx = xfs_iext_count(ifp) - 1;
}
static inline void xfs_iext_next(struct xfs_ifork *ifp,
struct xfs_iext_cursor *cur)
{
cur->idx++;
}
static inline void xfs_iext_prev(struct xfs_ifork *ifp,
struct xfs_iext_cursor *cur)
{
cur->idx--;
}
void xfs_iext_first(struct xfs_ifork *, struct xfs_iext_cursor *);
void xfs_iext_last(struct xfs_ifork *, struct xfs_iext_cursor *);
void xfs_iext_next(struct xfs_ifork *, struct xfs_iext_cursor *);
void xfs_iext_prev(struct xfs_ifork *, struct xfs_iext_cursor *);
static inline bool xfs_iext_next_extent(struct xfs_ifork *ifp,
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *gotp)
......
......@@ -143,7 +143,8 @@ typedef uint32_t xfs_dqid_t;
#define XFS_WORDMASK ((1 << XFS_WORDLOG) - 1)
struct xfs_iext_cursor {
xfs_extnum_t idx;
struct xfs_iext_leaf *leaf;
int pos;
};
#endif /* __XFS_TYPES_H__ */
......@@ -168,7 +168,6 @@ xfs_scrub_bmapbt_rec(
struct xfs_scrub_btree *bs,
union xfs_btree_rec *rec)
{
struct xfs_bmbt_rec_host ihost;
struct xfs_bmbt_irec irec;
struct xfs_scrub_bmap_info *info = bs->private;
struct xfs_inode *ip = bs->cur->bc_private.b.ip;
......@@ -193,9 +192,7 @@ xfs_scrub_bmapbt_rec(
}
/* Set up the in-core record and scrub it. */
ihost.l0 = be64_to_cpu(rec->bmbt.l0);
ihost.l1 = be64_to_cpu(rec->bmbt.l1);
xfs_bmbt_get_all(&ihost, &irec);
xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec);
}
......
......@@ -934,7 +934,7 @@ xfs_ialloc(
ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
ip->i_df.if_flags = XFS_IFEXTENTS;
ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
ip->i_df.if_u1.if_extents = NULL;
ip->i_df.if_u1.if_root = NULL;
break;
default:
ASSERT(0);
......
......@@ -162,7 +162,6 @@ xfs_inode_item_format_data_fork(
ip->i_df.if_bytes > 0) {
struct xfs_bmbt_rec *p;
ASSERT(ip->i_df.if_u1.if_extents != NULL);
ASSERT(xfs_iext_count(&ip->i_df) > 0);
p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT);
......@@ -252,7 +251,6 @@ xfs_inode_item_format_attr_fork(
ASSERT(xfs_iext_count(ip->i_afp) ==
ip->i_d.di_anextents);
ASSERT(ip->i_afp->if_u1.if_extents != NULL);
p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT);
data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK);
......
......@@ -218,45 +218,6 @@ TRACE_EVENT(xfs_attr_list_node_descend,
__entry->bt_before)
);
TRACE_EVENT(xfs_iext_insert,
TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
TP_ARGS(ip, idx, r, state, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(xfs_extnum_t, idx)
__field(xfs_fileoff_t, startoff)
__field(xfs_fsblock_t, startblock)
__field(xfs_filblks_t, blockcount)
__field(xfs_exntst_t, state)
__field(int, bmap_state)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = VFS_I(ip)->i_sb->s_dev;
__entry->ino = ip->i_ino;
__entry->idx = idx;
__entry->startoff = r->br_startoff;
__entry->startblock = r->br_startblock;
__entry->blockcount = r->br_blockcount;
__entry->state = r->br_state;
__entry->bmap_state = state;
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
"offset %lld block %lld count %lld flag %d caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
(long)__entry->idx,
__entry->startoff,
(int64_t)__entry->startblock,
__entry->blockcount,
__entry->state,
(char *)__entry->caller_ip)
);
DECLARE_EVENT_CLASS(xfs_bmap_class,
TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state,
unsigned long caller_ip),
......@@ -264,7 +225,8 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(xfs_extnum_t, idx)
__field(void *, leaf);
__field(int, pos);
__field(xfs_fileoff_t, startoff)
__field(xfs_fsblock_t, startblock)
__field(xfs_filblks_t, blockcount)
......@@ -280,7 +242,8 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
xfs_iext_get_extent(ifp, cur, &r);
__entry->dev = VFS_I(ip)->i_sb->s_dev;
__entry->ino = ip->i_ino;
__entry->idx = cur->idx;
__entry->leaf = cur->leaf;
__entry->pos = cur->pos;
__entry->startoff = r.br_startoff;
__entry->startblock = r.br_startblock;
__entry->blockcount = r.br_blockcount;
......@@ -288,12 +251,13 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
__entry->bmap_state = state;
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
TP_printk("dev %d:%d ino 0x%llx state %s cur 0x%p/%d "
"offset %lld block %lld count %lld flag %d caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
(long)__entry->idx,
__entry->leaf,
__entry->pos,
__entry->startoff,
(int64_t)__entry->startblock,
__entry->blockcount,
......@@ -306,6 +270,7 @@ DEFINE_EVENT(xfs_bmap_class, name, \
TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state, \
unsigned long caller_ip), \
TP_ARGS(ip, cur, state, caller_ip))
DEFINE_BMAP_EVENT(xfs_iext_insert);
DEFINE_BMAP_EVENT(xfs_iext_remove);
DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
DEFINE_BMAP_EVENT(xfs_bmap_post_update);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册