提交 dc89e982 编写于 作者: J Josef Bacik

Btrfs: use a slab for the free space entries

Since we alloc/free free space entries a whole lot, lets use a slab to keep
track of them.  This makes some of my tests slightly faster.  Thanks,
Signed-off-by: NJosef Bacik <josef@redhat.com>
上级 57a45ced
...@@ -40,6 +40,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep; ...@@ -40,6 +40,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_transaction_cachep; extern struct kmem_cache *btrfs_transaction_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep; extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep;
struct btrfs_ordered_sum; struct btrfs_ordered_sum;
#define BTRFS_MAGIC "_BHRfS_M" #define BTRFS_MAGIC "_BHRfS_M"
......
...@@ -393,7 +393,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, ...@@ -393,7 +393,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
break; break;
need_loop = 1; need_loop = 1;
e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); e = kmem_cache_zalloc(btrfs_free_space_cachep,
GFP_NOFS);
if (!e) { if (!e) {
kunmap(page); kunmap(page);
unlock_page(page); unlock_page(page);
...@@ -405,7 +406,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, ...@@ -405,7 +406,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
e->bytes = le64_to_cpu(entry->bytes); e->bytes = le64_to_cpu(entry->bytes);
if (!e->bytes) { if (!e->bytes) {
kunmap(page); kunmap(page);
kfree(e); kmem_cache_free(btrfs_free_space_cachep, e);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
goto free_cache; goto free_cache;
...@@ -420,7 +421,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, ...@@ -420,7 +421,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
if (!e->bitmap) { if (!e->bitmap) {
kunmap(page); kunmap(page);
kfree(e); kmem_cache_free(
btrfs_free_space_cachep, e);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
goto free_cache; goto free_cache;
...@@ -1187,7 +1189,7 @@ static void free_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1187,7 +1189,7 @@ static void free_bitmap(struct btrfs_block_group_cache *block_group,
{ {
unlink_free_space(block_group, bitmap_info); unlink_free_space(block_group, bitmap_info);
kfree(bitmap_info->bitmap); kfree(bitmap_info->bitmap);
kfree(bitmap_info); kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
block_group->total_bitmaps--; block_group->total_bitmaps--;
recalculate_thresholds(block_group); recalculate_thresholds(block_group);
} }
...@@ -1342,8 +1344,8 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1342,8 +1344,8 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
/* no pre-allocated info, allocate a new one */ /* no pre-allocated info, allocate a new one */
if (!info) { if (!info) {
info = kzalloc(sizeof(struct btrfs_free_space), info = kmem_cache_zalloc(btrfs_free_space_cachep,
GFP_NOFS); GFP_NOFS);
if (!info) { if (!info) {
spin_lock(&block_group->tree_lock); spin_lock(&block_group->tree_lock);
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1365,7 +1367,7 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1365,7 +1367,7 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
if (info) { if (info) {
if (info->bitmap) if (info->bitmap)
kfree(info->bitmap); kfree(info->bitmap);
kfree(info); kmem_cache_free(btrfs_free_space_cachep, info);
} }
return ret; return ret;
...@@ -1398,7 +1400,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1398,7 +1400,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
else else
__unlink_free_space(block_group, right_info); __unlink_free_space(block_group, right_info);
info->bytes += right_info->bytes; info->bytes += right_info->bytes;
kfree(right_info); kmem_cache_free(btrfs_free_space_cachep, right_info);
merged = true; merged = true;
} }
...@@ -1410,7 +1412,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1410,7 +1412,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
__unlink_free_space(block_group, left_info); __unlink_free_space(block_group, left_info);
info->offset = left_info->offset; info->offset = left_info->offset;
info->bytes += left_info->bytes; info->bytes += left_info->bytes;
kfree(left_info); kmem_cache_free(btrfs_free_space_cachep, left_info);
merged = true; merged = true;
} }
...@@ -1423,7 +1425,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1423,7 +1425,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info; struct btrfs_free_space *info;
int ret = 0; int ret = 0;
info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
...@@ -1450,7 +1452,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1450,7 +1452,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
link: link:
ret = link_free_space(block_group, info); ret = link_free_space(block_group, info);
if (ret) if (ret)
kfree(info); kmem_cache_free(btrfs_free_space_cachep, info);
out: out:
spin_unlock(&block_group->tree_lock); spin_unlock(&block_group->tree_lock);
...@@ -1520,7 +1522,7 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1520,7 +1522,7 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
kfree(info->bitmap); kfree(info->bitmap);
block_group->total_bitmaps--; block_group->total_bitmaps--;
} }
kfree(info); kmem_cache_free(btrfs_free_space_cachep, info);
goto out_lock; goto out_lock;
} }
...@@ -1556,7 +1558,7 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1556,7 +1558,7 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
/* the hole we're creating ends at the end /* the hole we're creating ends at the end
* of the info struct, just free the info * of the info struct, just free the info
*/ */
kfree(info); kmem_cache_free(btrfs_free_space_cachep, info);
} }
spin_unlock(&block_group->tree_lock); spin_unlock(&block_group->tree_lock);
...@@ -1689,7 +1691,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) ...@@ -1689,7 +1691,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
unlink_free_space(block_group, info); unlink_free_space(block_group, info);
if (info->bitmap) if (info->bitmap)
kfree(info->bitmap); kfree(info->bitmap);
kfree(info); kmem_cache_free(btrfs_free_space_cachep, info);
if (need_resched()) { if (need_resched()) {
spin_unlock(&block_group->tree_lock); spin_unlock(&block_group->tree_lock);
cond_resched(); cond_resched();
...@@ -1722,7 +1724,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -1722,7 +1724,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
entry->offset += bytes; entry->offset += bytes;
entry->bytes -= bytes; entry->bytes -= bytes;
if (!entry->bytes) if (!entry->bytes)
kfree(entry); kmem_cache_free(btrfs_free_space_cachep, entry);
else else
link_free_space(block_group, entry); link_free_space(block_group, entry);
} }
...@@ -1884,7 +1886,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, ...@@ -1884,7 +1886,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
block_group->free_space -= bytes; block_group->free_space -= bytes;
if (entry->bytes == 0) { if (entry->bytes == 0) {
block_group->free_extents--; block_group->free_extents--;
kfree(entry); kmem_cache_free(btrfs_free_space_cachep, entry);
} }
spin_unlock(&block_group->tree_lock); spin_unlock(&block_group->tree_lock);
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include "tree-log.h" #include "tree-log.h"
#include "compression.h" #include "compression.h"
#include "locking.h" #include "locking.h"
#include "free-space-cache.h"
struct btrfs_iget_args { struct btrfs_iget_args {
u64 ino; u64 ino;
...@@ -70,6 +71,7 @@ static struct kmem_cache *btrfs_inode_cachep; ...@@ -70,6 +71,7 @@ static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep; struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep; struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep; struct kmem_cache *btrfs_path_cachep;
struct kmem_cache *btrfs_free_space_cachep;
#define S_SHIFT 12 #define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
...@@ -6761,6 +6763,8 @@ void btrfs_destroy_cachep(void) ...@@ -6761,6 +6763,8 @@ void btrfs_destroy_cachep(void)
kmem_cache_destroy(btrfs_transaction_cachep); kmem_cache_destroy(btrfs_transaction_cachep);
if (btrfs_path_cachep) if (btrfs_path_cachep)
kmem_cache_destroy(btrfs_path_cachep); kmem_cache_destroy(btrfs_path_cachep);
if (btrfs_free_space_cachep)
kmem_cache_destroy(btrfs_free_space_cachep);
} }
int btrfs_init_cachep(void) int btrfs_init_cachep(void)
...@@ -6789,6 +6793,12 @@ int btrfs_init_cachep(void) ...@@ -6789,6 +6793,12 @@ int btrfs_init_cachep(void)
if (!btrfs_path_cachep) if (!btrfs_path_cachep)
goto fail; goto fail;
btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
sizeof(struct btrfs_free_space), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_cachep)
goto fail;
return 0; return 0;
fail: fail:
btrfs_destroy_cachep(); btrfs_destroy_cachep();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册