提交 5dc7c10b 编写于 作者: D Dennis Zhou 提交者: David Sterba

btrfs: keep track of discardable_bytes for async discard

Keep track of this metric so that we can understand how ahead or behind
we are in discarding rate. This uses the same accounting method as
discardable_extents, deltas between previous/current values and
propagating them up.
Signed-off-by: NDennis Zhou <dennis@kernel.org>
Reviewed-by: NDavid Sterba <dsterba@suse.com>
[ update changelog ]
Signed-off-by: NDavid Sterba <dsterba@suse.com>
上级 dfb79ddb
......@@ -467,6 +467,7 @@ struct btrfs_discard_ctl {
struct btrfs_block_group *block_group;
struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
atomic_t discardable_extents;
atomic64_t discardable_bytes;
};
/* delayed seq elem */
......
......@@ -356,6 +356,7 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
{
struct btrfs_discard_ctl *discard_ctl;
s32 extents_delta;
s64 bytes_delta;
if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
return;
......@@ -369,6 +370,14 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
ctl->discardable_extents[BTRFS_STAT_PREV] =
ctl->discardable_extents[BTRFS_STAT_CURR];
}
bytes_delta = ctl->discardable_bytes[BTRFS_STAT_CURR] -
ctl->discardable_bytes[BTRFS_STAT_PREV];
if (bytes_delta) {
atomic64_add(bytes_delta, &discard_ctl->discardable_bytes);
ctl->discardable_bytes[BTRFS_STAT_PREV] =
ctl->discardable_bytes[BTRFS_STAT_CURR];
}
}
/**
......@@ -454,6 +463,7 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
atomic_set(&discard_ctl->discardable_extents, 0);
atomic64_set(&discard_ctl->discardable_bytes, 0);
}
void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
......
......@@ -819,9 +819,11 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
if (ret)
goto free_cache;
e->bitmap_extents = count_bitmap_extents(ctl, e);
if (!btrfs_free_space_trimmed(e))
if (!btrfs_free_space_trimmed(e)) {
ctl->discardable_extents[BTRFS_STAT_CURR] +=
e->bitmap_extents;
ctl->discardable_bytes[BTRFS_STAT_CURR] += e->bytes;
}
}
io_ctl_drop_pages(&io_ctl);
......@@ -1643,8 +1645,10 @@ __unlink_free_space(struct btrfs_free_space_ctl *ctl,
rb_erase(&info->offset_index, &ctl->free_space_offset);
ctl->free_extents--;
if (!info->bitmap && !btrfs_free_space_trimmed(info))
if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
ctl->discardable_extents[BTRFS_STAT_CURR]--;
ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes;
}
}
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
......@@ -1665,8 +1669,10 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
if (ret)
return ret;
if (!info->bitmap && !btrfs_free_space_trimmed(info))
if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
ctl->discardable_extents[BTRFS_STAT_CURR]++;
ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
}
ctl->free_space += info->bytes;
ctl->free_extents++;
......@@ -1745,8 +1751,10 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
extent_delta++;
info->bitmap_extents += extent_delta;
if (!btrfs_free_space_trimmed(info))
if (!btrfs_free_space_trimmed(info)) {
ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
}
}
static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
......@@ -1781,8 +1789,10 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
extent_delta--;
info->bitmap_extents += extent_delta;
if (!btrfs_free_space_trimmed(info))
if (!btrfs_free_space_trimmed(info)) {
ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes;
}
}
/*
......@@ -2053,9 +2063,11 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
* whole bitmap untrimmed if at any point we add untrimmed regions.
*/
if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) {
if (btrfs_free_space_trimmed(info))
if (btrfs_free_space_trimmed(info)) {
ctl->discardable_extents[BTRFS_STAT_CURR] +=
info->bitmap_extents;
ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
}
info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
}
......@@ -2713,15 +2725,21 @@ __btrfs_return_cluster_to_free_space(
bitmap = (entry->bitmap != NULL);
if (!bitmap) {
/* Merging treats extents as if they were new */
if (!btrfs_free_space_trimmed(entry))
if (!btrfs_free_space_trimmed(entry)) {
ctl->discardable_extents[BTRFS_STAT_CURR]--;
ctl->discardable_bytes[BTRFS_STAT_CURR] -=
entry->bytes;
}
try_merge_free_space(ctl, entry, false);
steal_from_bitmap(ctl, entry, false);
/* As we insert directly, update these statistics */
if (!btrfs_free_space_trimmed(entry))
if (!btrfs_free_space_trimmed(entry)) {
ctl->discardable_extents[BTRFS_STAT_CURR]++;
ctl->discardable_bytes[BTRFS_STAT_CURR] +=
entry->bytes;
}
}
tree_insert_offset(&ctl->free_space_offset,
entry->offset, &entry->offset_index, bitmap);
......@@ -3011,6 +3029,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
spin_lock(&ctl->tree_lock);
ctl->free_space -= bytes;
if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
if (entry->bytes == 0) {
ctl->free_extents--;
if (entry->bitmap) {
......@@ -3515,9 +3535,11 @@ static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset)
spin_lock(&ctl->tree_lock);
entry = tree_search_offset(ctl, offset, 1, 0);
if (entry) {
if (btrfs_free_space_trimmed(entry))
if (btrfs_free_space_trimmed(entry)) {
ctl->discardable_extents[BTRFS_STAT_CURR] +=
entry->bitmap_extents;
ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes;
}
entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
}
......@@ -3531,6 +3553,7 @@ static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
entry->trim_state = BTRFS_TRIM_STATE_TRIMMED;
ctl->discardable_extents[BTRFS_STAT_CURR] -=
entry->bitmap_extents;
ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes;
}
}
......
......@@ -52,6 +52,7 @@ struct btrfs_free_space_ctl {
int unit;
u64 start;
s32 discardable_extents[BTRFS_STAT_NR_ENTRIES];
s64 discardable_bytes[BTRFS_STAT_NR_ENTRIES];
const struct btrfs_free_space_op *op;
void *private;
struct mutex cache_writeout_mutex;
......
......@@ -344,6 +344,17 @@ static const struct attribute_group btrfs_static_feature_attr_group = {
*/
#define discard_to_fs_info(_kobj) to_fs_info((_kobj)->parent->parent)
static ssize_t btrfs_discardable_bytes_show(struct kobject *kobj,
struct kobj_attribute *a,
char *buf)
{
struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
return snprintf(buf, PAGE_SIZE, "%lld\n",
atomic64_read(&fs_info->discard_ctl.discardable_bytes));
}
BTRFS_ATTR(discard, discardable_bytes, btrfs_discardable_bytes_show);
static ssize_t btrfs_discardable_extents_show(struct kobject *kobj,
struct kobj_attribute *a,
char *buf)
......@@ -356,6 +367,7 @@ static ssize_t btrfs_discardable_extents_show(struct kobject *kobj,
BTRFS_ATTR(discard, discardable_extents, btrfs_discardable_extents_show);
static const struct attribute *discard_debug_attrs[] = {
BTRFS_ATTR_PTR(discard, discardable_bytes),
BTRFS_ATTR_PTR(discard, discardable_extents),
NULL,
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册