提交 f28491e0 编写于 作者: J Josef Bacik 提交者: Chris Mason

Btrfs: move the extent buffer radix tree into the fs_info

I need to create a fake tree to test qgroups and I don't want to have to setup a
fake btree_inode.  The fact is we only use the radix tree for the fs_info, so
everybody else who allocates an extent_io_tree is just wasting the space anyway.
This patch moves the radix tree and its lock into btrfs_fs_info so there is less
stuff I have to fake to do qgroup sanity tests.  Thanks,
Signed-off-by: NJosef Bacik <jbacik@fb.com>
Signed-off-by: NChris Mason <clm@fb.com>
上级 34b41ace
......@@ -1659,6 +1659,10 @@ struct btrfs_fs_info {
spinlock_t reada_lock;
struct radix_tree_root reada_tree;
/* Extent buffer radix tree */
spinlock_t buffer_lock;
struct radix_tree_root buffer_radix;
/* next backup root to be overwritten */
int backup_root_index;
......
......@@ -1089,21 +1089,13 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize)
{
struct inode *btree_inode = root->fs_info->btree_inode;
struct extent_buffer *eb;
eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, bytenr);
return eb;
return find_extent_buffer(root->fs_info, bytenr);
}
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize)
{
struct inode *btree_inode = root->fs_info->btree_inode;
struct extent_buffer *eb;
eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
bytenr, blocksize);
return eb;
return alloc_extent_buffer(root->fs_info, bytenr, blocksize);
}
......@@ -2145,6 +2137,7 @@ int open_ctree(struct super_block *sb,
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
......@@ -2158,6 +2151,7 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->free_chunk_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock);
spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->buffer_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->reloc_mutex);
seqlock_init(&fs_info->profiles_lock);
......
......@@ -194,11 +194,9 @@ void extent_io_tree_init(struct extent_io_tree *tree,
struct address_space *mapping)
{
tree->state = RB_ROOT;
INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
tree->ops = NULL;
tree->dirty_bytes = 0;
spin_lock_init(&tree->lock);
spin_lock_init(&tree->buffer_lock);
tree->mapping = mapping;
}
......@@ -3489,6 +3487,7 @@ static int write_one_eb(struct extent_buffer *eb,
struct extent_page_data *epd)
{
struct block_device *bdev = fs_info->fs_devices->latest_bdev;
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
unsigned long i, num_pages;
unsigned long bio_flags = 0;
......@@ -3506,7 +3505,7 @@ static int write_one_eb(struct extent_buffer *eb,
clear_page_dirty_for_io(p);
set_page_writeback(p);
ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
ret = submit_extent_page(rw, tree, p, offset >> 9,
PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
-1, end_bio_extent_buffer_writepage,
0, epd->bio_flags, bio_flags);
......@@ -4370,10 +4369,9 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
__free_extent_buffer(eb);
}
static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
u64 start,
unsigned long len,
gfp_t mask)
static struct extent_buffer *
__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
unsigned long len, gfp_t mask)
{
struct extent_buffer *eb = NULL;
......@@ -4382,7 +4380,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
return NULL;
eb->start = start;
eb->len = len;
eb->tree = tree;
eb->fs_info = fs_info;
eb->bflags = 0;
rwlock_init(&eb->lock);
atomic_set(&eb->write_locks, 0);
......@@ -4514,13 +4512,14 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb)
}
}
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
u64 start)
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
struct extent_buffer *eb;
rcu_read_lock();
eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
eb = radix_tree_lookup(&fs_info->buffer_radix,
start >> PAGE_CACHE_SHIFT);
if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
mark_extent_buffer_accessed(eb);
......@@ -4531,7 +4530,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
return NULL;
}
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len)
{
unsigned long num_pages = num_extent_pages(start, len);
......@@ -4540,16 +4539,15 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
struct extent_buffer *eb;
struct extent_buffer *exists = NULL;
struct page *p;
struct address_space *mapping = tree->mapping;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
int uptodate = 1;
int ret;
eb = find_extent_buffer(tree, start);
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
if (!eb)
return NULL;
......@@ -4604,12 +4602,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
if (ret)
goto free_eb;
spin_lock(&tree->buffer_lock);
ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
spin_unlock(&tree->buffer_lock);
spin_lock(&fs_info->buffer_lock);
ret = radix_tree_insert(&fs_info->buffer_radix,
start >> PAGE_CACHE_SHIFT, eb);
spin_unlock(&fs_info->buffer_lock);
radix_tree_preload_end();
if (ret == -EEXIST) {
exists = find_extent_buffer(tree, start);
exists = find_extent_buffer(fs_info, start);
if (exists)
goto free_eb;
else
......@@ -4662,14 +4661,14 @@ static int release_extent_buffer(struct extent_buffer *eb)
WARN_ON(atomic_read(&eb->refs) == 0);
if (atomic_dec_and_test(&eb->refs)) {
if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
struct extent_io_tree *tree = eb->tree;
struct btrfs_fs_info *fs_info = eb->fs_info;
spin_unlock(&eb->refs_lock);
spin_lock(&tree->buffer_lock);
radix_tree_delete(&tree->buffer,
spin_lock(&fs_info->buffer_lock);
radix_tree_delete(&fs_info->buffer_radix,
eb->start >> PAGE_CACHE_SHIFT);
spin_unlock(&tree->buffer_lock);
spin_unlock(&fs_info->buffer_lock);
} else {
spin_unlock(&eb->refs_lock);
}
......
......@@ -95,12 +95,10 @@ struct extent_io_ops {
struct extent_io_tree {
struct rb_root state;
struct radix_tree_root buffer;
struct address_space *mapping;
u64 dirty_bytes;
int track_uptodate;
spinlock_t lock;
spinlock_t buffer_lock;
struct extent_io_ops *ops;
};
......@@ -131,7 +129,7 @@ struct extent_buffer {
unsigned long map_start;
unsigned long map_len;
unsigned long bflags;
struct extent_io_tree *tree;
struct btrfs_fs_info *fs_info;
spinlock_t refs_lock;
atomic_t refs;
atomic_t io_pages;
......@@ -267,11 +265,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
void free_extent_buffer(struct extent_buffer *eb);
void free_extent_buffer_stale(struct extent_buffer *eb);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册