提交 a3058101 编写于 作者: C Chris Mason

Merge branch 'misc-for-4.5' of...

Merge branch 'misc-for-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux into for-linus-4.5
...@@ -192,6 +192,10 @@ struct btrfs_inode { ...@@ -192,6 +192,10 @@ struct btrfs_inode {
/* File creation time. */ /* File creation time. */
struct timespec i_otime; struct timespec i_otime;
/* Hook into fs_info->delayed_iputs */
struct list_head delayed_iput;
long delayed_iput_count;
struct inode vfs_inode; struct inode vfs_inode;
}; };
......
...@@ -2248,7 +2248,6 @@ static void reada_for_search(struct btrfs_root *root, ...@@ -2248,7 +2248,6 @@ static void reada_for_search(struct btrfs_root *root,
u64 target; u64 target;
u64 nread = 0; u64 nread = 0;
u64 gen; u64 gen;
int direction = path->reada;
struct extent_buffer *eb; struct extent_buffer *eb;
u32 nr; u32 nr;
u32 blocksize; u32 blocksize;
...@@ -2276,16 +2275,16 @@ static void reada_for_search(struct btrfs_root *root, ...@@ -2276,16 +2275,16 @@ static void reada_for_search(struct btrfs_root *root,
nr = slot; nr = slot;
while (1) { while (1) {
if (direction < 0) { if (path->reada == READA_BACK) {
if (nr == 0) if (nr == 0)
break; break;
nr--; nr--;
} else if (direction > 0) { } else if (path->reada == READA_FORWARD) {
nr++; nr++;
if (nr >= nritems) if (nr >= nritems)
break; break;
} }
if (path->reada < 0 && objectid) { if (path->reada == READA_BACK && objectid) {
btrfs_node_key(node, &disk_key, nr); btrfs_node_key(node, &disk_key, nr);
if (btrfs_disk_key_objectid(&disk_key) != objectid) if (btrfs_disk_key_objectid(&disk_key) != objectid)
break; break;
...@@ -2493,7 +2492,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, ...@@ -2493,7 +2492,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
btrfs_set_path_blocking(p); btrfs_set_path_blocking(p);
free_extent_buffer(tmp); free_extent_buffer(tmp);
if (p->reada) if (p->reada != READA_NONE)
reada_for_search(root, p, level, slot, key->objectid); reada_for_search(root, p, level, slot, key->objectid);
btrfs_release_path(p); btrfs_release_path(p);
......
...@@ -177,7 +177,7 @@ struct btrfs_ordered_sum; ...@@ -177,7 +177,7 @@ struct btrfs_ordered_sum;
/* csum types */ /* csum types */
#define BTRFS_CSUM_TYPE_CRC32 0 #define BTRFS_CSUM_TYPE_CRC32 0
static int btrfs_csum_sizes[] = { 4 }; static const int btrfs_csum_sizes[] = { 4 };
/* four bytes for CRC32 */ /* four bytes for CRC32 */
#define BTRFS_EMPTY_DIR_SIZE 0 #define BTRFS_EMPTY_DIR_SIZE 0
...@@ -598,14 +598,15 @@ struct btrfs_node { ...@@ -598,14 +598,15 @@ struct btrfs_node {
* The slots array records the index of the item or block pointer * The slots array records the index of the item or block pointer
* used while walking the tree. * used while walking the tree.
*/ */
enum { READA_NONE = 0, READA_BACK, READA_FORWARD };
struct btrfs_path { struct btrfs_path {
struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
int slots[BTRFS_MAX_LEVEL]; int slots[BTRFS_MAX_LEVEL];
/* if there is real range locking, this locks field will change */ /* if there is real range locking, this locks field will change */
int locks[BTRFS_MAX_LEVEL]; u8 locks[BTRFS_MAX_LEVEL];
int reada; u8 reada;
/* keep some upper locks as we walk down */ /* keep some upper locks as we walk down */
int lowest_level; u8 lowest_level;
/* /*
* set by btrfs_split_item, tells search_slot to keep all locks * set by btrfs_split_item, tells search_slot to keep all locks
......
...@@ -493,12 +493,12 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs, ...@@ -493,12 +493,12 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
memcpy(&existing_ref->extent_op->key, memcpy(&existing_ref->extent_op->key,
&ref->extent_op->key, &ref->extent_op->key,
sizeof(ref->extent_op->key)); sizeof(ref->extent_op->key));
existing_ref->extent_op->update_key = 1; existing_ref->extent_op->update_key = true;
} }
if (ref->extent_op->update_flags) { if (ref->extent_op->update_flags) {
existing_ref->extent_op->flags_to_set |= existing_ref->extent_op->flags_to_set |=
ref->extent_op->flags_to_set; ref->extent_op->flags_to_set;
existing_ref->extent_op->update_flags = 1; existing_ref->extent_op->update_flags = true;
} }
btrfs_free_delayed_extent_op(ref->extent_op); btrfs_free_delayed_extent_op(ref->extent_op);
} }
......
...@@ -75,11 +75,11 @@ struct btrfs_delayed_ref_node { ...@@ -75,11 +75,11 @@ struct btrfs_delayed_ref_node {
struct btrfs_delayed_extent_op { struct btrfs_delayed_extent_op {
struct btrfs_disk_key key; struct btrfs_disk_key key;
u8 level;
bool update_key;
bool update_flags;
bool is_data;
u64 flags_to_set; u64 flags_to_set;
int level;
unsigned int update_key:1;
unsigned int update_flags:1;
unsigned int is_data:1;
}; };
/* /*
......
...@@ -2682,6 +2682,7 @@ int open_ctree(struct super_block *sb, ...@@ -2682,6 +2682,7 @@ int open_ctree(struct super_block *sb,
if (btrfs_check_super_csum(bh->b_data)) { if (btrfs_check_super_csum(bh->b_data)) {
printk(KERN_ERR "BTRFS: superblock checksum mismatch\n"); printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
err = -EINVAL; err = -EINVAL;
brelse(bh);
goto fail_alloc; goto fail_alloc;
} }
......
...@@ -437,7 +437,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) ...@@ -437,7 +437,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
*/ */
path->skip_locking = 1; path->skip_locking = 1;
path->search_commit_root = 1; path->search_commit_root = 1;
path->reada = 1; path->reada = READA_FORWARD;
key.objectid = last; key.objectid = last;
key.offset = 0; key.offset = 0;
...@@ -2131,7 +2131,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, ...@@ -2131,7 +2131,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 1; path->reada = READA_FORWARD;
path->leave_spinning = 1; path->leave_spinning = 1;
/* this will setup the path even if it fails to insert the back ref */ /* this will setup the path even if it fails to insert the back ref */
ret = insert_inline_extent_backref(trans, fs_info->extent_root, path, ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
...@@ -2157,7 +2157,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, ...@@ -2157,7 +2157,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf); btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path); btrfs_release_path(path);
path->reada = 1; path->reada = READA_FORWARD;
path->leave_spinning = 1; path->leave_spinning = 1;
/* now insert the actual backref */ /* now insert the actual backref */
ret = insert_extent_backref(trans, root->fs_info->extent_root, ret = insert_extent_backref(trans, root->fs_info->extent_root,
...@@ -2270,7 +2270,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, ...@@ -2270,7 +2270,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
} }
again: again:
path->reada = 1; path->reada = READA_FORWARD;
path->leave_spinning = 1; path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
path, 0, 1); path, 0, 1);
...@@ -3007,9 +3007,9 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, ...@@ -3007,9 +3007,9 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
return -ENOMEM; return -ENOMEM;
extent_op->flags_to_set = flags; extent_op->flags_to_set = flags;
extent_op->update_flags = 1; extent_op->update_flags = true;
extent_op->update_key = 0; extent_op->update_key = false;
extent_op->is_data = is_data ? 1 : 0; extent_op->is_data = is_data ? true : false;
extent_op->level = level; extent_op->level = level;
ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr, ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
...@@ -6472,7 +6472,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, ...@@ -6472,7 +6472,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 1; path->reada = READA_FORWARD;
path->leave_spinning = 1; path->leave_spinning = 1;
is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
...@@ -8031,12 +8031,9 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, ...@@ -8031,12 +8031,9 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
else else
memset(&extent_op->key, 0, sizeof(extent_op->key)); memset(&extent_op->key, 0, sizeof(extent_op->key));
extent_op->flags_to_set = flags; extent_op->flags_to_set = flags;
if (skinny_metadata) extent_op->update_key = skinny_metadata ? false : true;
extent_op->update_key = 0; extent_op->update_flags = true;
else extent_op->is_data = false;
extent_op->update_key = 1;
extent_op->update_flags = 1;
extent_op->is_data = 0;
extent_op->level = level; extent_op->level = level;
ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
...@@ -9745,7 +9742,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -9745,7 +9742,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 1; path->reada = READA_FORWARD;
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy); cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
if (btrfs_test_opt(root, SPACE_CACHE) && if (btrfs_test_opt(root, SPACE_CACHE) &&
......
...@@ -202,7 +202,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, ...@@ -202,7 +202,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
} }
if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
path->reada = 2; path->reada = READA_FORWARD;
WARN_ON(bio->bi_vcnt <= 0); WARN_ON(bio->bi_vcnt <= 0);
...@@ -328,7 +328,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, ...@@ -328,7 +328,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
if (search_commit) { if (search_commit) {
path->skip_locking = 1; path->skip_locking = 1;
path->reada = 2; path->reada = READA_FORWARD;
path->search_commit_root = 1; path->search_commit_root = 1;
} }
......
...@@ -2016,7 +2016,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -2016,7 +2016,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
return true; return true;
} }
static struct btrfs_free_space_op free_space_op = { static const struct btrfs_free_space_op free_space_op = {
.recalc_thresholds = recalculate_thresholds, .recalc_thresholds = recalculate_thresholds,
.use_bitmap = use_bitmap, .use_bitmap = use_bitmap,
}; };
......
...@@ -37,7 +37,7 @@ struct btrfs_free_space_ctl { ...@@ -37,7 +37,7 @@ struct btrfs_free_space_ctl {
int total_bitmaps; int total_bitmaps;
int unit; int unit;
u64 start; u64 start;
struct btrfs_free_space_op *op; const struct btrfs_free_space_op *op;
void *private; void *private;
struct mutex cache_writeout_mutex; struct mutex cache_writeout_mutex;
struct list_head trimming_ranges; struct list_head trimming_ranges;
......
...@@ -48,7 +48,7 @@ static int caching_kthread(void *data) ...@@ -48,7 +48,7 @@ static int caching_kthread(void *data)
/* Since the commit root is read-only, we can safely skip locking. */ /* Since the commit root is read-only, we can safely skip locking. */
path->skip_locking = 1; path->skip_locking = 1;
path->search_commit_root = 1; path->search_commit_root = 1;
path->reada = 2; path->reada = READA_FORWARD;
key.objectid = BTRFS_FIRST_FREE_OBJECTID; key.objectid = BTRFS_FIRST_FREE_OBJECTID;
key.offset = 0; key.offset = 0;
...@@ -334,7 +334,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -334,7 +334,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
return true; return true;
} }
static struct btrfs_free_space_op free_ino_op = { static const struct btrfs_free_space_op free_ino_op = {
.recalc_thresholds = recalculate_thresholds, .recalc_thresholds = recalculate_thresholds,
.use_bitmap = use_bitmap, .use_bitmap = use_bitmap,
}; };
...@@ -356,7 +356,7 @@ static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -356,7 +356,7 @@ static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
return false; return false;
} }
static struct btrfs_free_space_op pinned_free_ino_op = { static const struct btrfs_free_space_op pinned_free_ino_op = {
.recalc_thresholds = pinned_recalc_thresholds, .recalc_thresholds = pinned_recalc_thresholds,
.use_bitmap = pinned_use_bitmap, .use_bitmap = pinned_use_bitmap,
}; };
......
...@@ -81,17 +81,16 @@ static const struct inode_operations btrfs_file_inode_operations; ...@@ -81,17 +81,16 @@ static const struct inode_operations btrfs_file_inode_operations;
static const struct address_space_operations btrfs_aops; static const struct address_space_operations btrfs_aops;
static const struct address_space_operations btrfs_symlink_aops; static const struct address_space_operations btrfs_symlink_aops;
static const struct file_operations btrfs_dir_file_operations; static const struct file_operations btrfs_dir_file_operations;
static struct extent_io_ops btrfs_extent_io_ops; static const struct extent_io_ops btrfs_extent_io_ops;
static struct kmem_cache *btrfs_inode_cachep; static struct kmem_cache *btrfs_inode_cachep;
static struct kmem_cache *btrfs_delalloc_work_cachep;
struct kmem_cache *btrfs_trans_handle_cachep; struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep; struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep; struct kmem_cache *btrfs_path_cachep;
struct kmem_cache *btrfs_free_space_cachep; struct kmem_cache *btrfs_free_space_cachep;
#define S_SHIFT 12 #define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
...@@ -3113,55 +3112,47 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio, ...@@ -3113,55 +3112,47 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
start, (size_t)(end - start + 1)); start, (size_t)(end - start + 1));
} }
struct delayed_iput {
struct list_head list;
struct inode *inode;
};
/* JDM: If this is fs-wide, why can't we add a pointer to
* btrfs_inode instead and avoid the allocation? */
void btrfs_add_delayed_iput(struct inode *inode) void btrfs_add_delayed_iput(struct inode *inode)
{ {
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct delayed_iput *delayed; struct btrfs_inode *binode = BTRFS_I(inode);
if (atomic_add_unless(&inode->i_count, -1, 1)) if (atomic_add_unless(&inode->i_count, -1, 1))
return; return;
delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
delayed->inode = inode;
spin_lock(&fs_info->delayed_iput_lock); spin_lock(&fs_info->delayed_iput_lock);
list_add_tail(&delayed->list, &fs_info->delayed_iputs); if (binode->delayed_iput_count == 0) {
ASSERT(list_empty(&binode->delayed_iput));
list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
} else {
binode->delayed_iput_count++;
}
spin_unlock(&fs_info->delayed_iput_lock); spin_unlock(&fs_info->delayed_iput_lock);
} }
void btrfs_run_delayed_iputs(struct btrfs_root *root) void btrfs_run_delayed_iputs(struct btrfs_root *root)
{ {
LIST_HEAD(list);
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_fs_info *fs_info = root->fs_info;
struct delayed_iput *delayed;
int empty;
spin_lock(&fs_info->delayed_iput_lock);
empty = list_empty(&fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
if (empty)
return;
down_read(&fs_info->delayed_iput_sem); down_read(&fs_info->delayed_iput_sem);
spin_lock(&fs_info->delayed_iput_lock); spin_lock(&fs_info->delayed_iput_lock);
list_splice_init(&fs_info->delayed_iputs, &list); while (!list_empty(&fs_info->delayed_iputs)) {
spin_unlock(&fs_info->delayed_iput_lock); struct btrfs_inode *inode;
while (!list_empty(&list)) { inode = list_first_entry(&fs_info->delayed_iputs,
delayed = list_entry(list.next, struct delayed_iput, list); struct btrfs_inode, delayed_iput);
list_del(&delayed->list); if (inode->delayed_iput_count) {
iput(delayed->inode); inode->delayed_iput_count--;
kfree(delayed); list_move_tail(&inode->delayed_iput,
&fs_info->delayed_iputs);
} else {
list_del_init(&inode->delayed_iput);
}
spin_unlock(&fs_info->delayed_iput_lock);
iput(&inode->vfs_inode);
spin_lock(&fs_info->delayed_iput_lock);
} }
spin_unlock(&fs_info->delayed_iput_lock);
up_read(&root->fs_info->delayed_iput_sem); up_read(&root->fs_info->delayed_iput_sem);
} }
...@@ -3358,7 +3349,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ...@@ -3358,7 +3349,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
path->reada = -1; path->reada = READA_BACK;
key.objectid = BTRFS_ORPHAN_OBJECTID; key.objectid = BTRFS_ORPHAN_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY; key.type = BTRFS_ORPHAN_ITEM_KEY;
...@@ -4324,7 +4315,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4324,7 +4315,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = -1; path->reada = READA_BACK;
/* /*
* We want to drop from the next block forward in case this new size is * We want to drop from the next block forward in case this new size is
...@@ -5760,7 +5751,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) ...@@ -5760,7 +5751,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 1; path->reada = READA_FORWARD;
if (key_type == BTRFS_DIR_INDEX_KEY) { if (key_type == BTRFS_DIR_INDEX_KEY) {
INIT_LIST_HEAD(&ins_list); INIT_LIST_HEAD(&ins_list);
...@@ -6791,7 +6782,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -6791,7 +6782,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
* Chances are we'll be called again, so go ahead and do * Chances are we'll be called again, so go ahead and do
* readahead * readahead
*/ */
path->reada = 1; path->reada = READA_FORWARD;
} }
ret = btrfs_lookup_file_extent(trans, root, path, ret = btrfs_lookup_file_extent(trans, root, path,
...@@ -8563,15 +8554,28 @@ int btrfs_readpage(struct file *file, struct page *page) ...@@ -8563,15 +8554,28 @@ int btrfs_readpage(struct file *file, struct page *page)
static int btrfs_writepage(struct page *page, struct writeback_control *wbc) static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
{ {
struct extent_io_tree *tree; struct extent_io_tree *tree;
struct inode *inode = page->mapping->host;
int ret;
if (current->flags & PF_MEMALLOC) { if (current->flags & PF_MEMALLOC) {
redirty_page_for_writepage(wbc, page); redirty_page_for_writepage(wbc, page);
unlock_page(page); unlock_page(page);
return 0; return 0;
} }
/*
* If we are under memory pressure we will call this directly from the
* VM, we need to make sure we have the inode referenced for the ordered
* extent. If not just return like we didn't do anything.
*/
if (!igrab(inode)) {
redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
}
tree = &BTRFS_I(page->mapping->host)->io_tree; tree = &BTRFS_I(page->mapping->host)->io_tree;
return extent_write_full_page(tree, page, btrfs_get_extent, wbc); ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
btrfs_add_delayed_iput(inode);
return ret;
} }
static int btrfs_writepages(struct address_space *mapping, static int btrfs_writepages(struct address_space *mapping,
...@@ -9053,6 +9057,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ...@@ -9053,6 +9057,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->dir_index = 0; ei->dir_index = 0;
ei->last_unlink_trans = 0; ei->last_unlink_trans = 0;
ei->last_log_commit = 0; ei->last_log_commit = 0;
ei->delayed_iput_count = 0;
spin_lock_init(&ei->lock); spin_lock_init(&ei->lock);
ei->outstanding_extents = 0; ei->outstanding_extents = 0;
...@@ -9077,6 +9082,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ...@@ -9077,6 +9082,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
mutex_init(&ei->delalloc_mutex); mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree); btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->delalloc_inodes); INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->delayed_iput);
RB_CLEAR_NODE(&ei->rb_node); RB_CLEAR_NODE(&ei->rb_node);
return inode; return inode;
...@@ -9181,8 +9187,6 @@ void btrfs_destroy_cachep(void) ...@@ -9181,8 +9187,6 @@ void btrfs_destroy_cachep(void)
kmem_cache_destroy(btrfs_path_cachep); kmem_cache_destroy(btrfs_path_cachep);
if (btrfs_free_space_cachep) if (btrfs_free_space_cachep)
kmem_cache_destroy(btrfs_free_space_cachep); kmem_cache_destroy(btrfs_free_space_cachep);
if (btrfs_delalloc_work_cachep)
kmem_cache_destroy(btrfs_delalloc_work_cachep);
} }
int btrfs_init_cachep(void) int btrfs_init_cachep(void)
...@@ -9217,13 +9221,6 @@ int btrfs_init_cachep(void) ...@@ -9217,13 +9221,6 @@ int btrfs_init_cachep(void)
if (!btrfs_free_space_cachep) if (!btrfs_free_space_cachep)
goto fail; goto fail;
btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
sizeof(struct btrfs_delalloc_work), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
NULL);
if (!btrfs_delalloc_work_cachep)
goto fail;
return 0; return 0;
fail: fail:
btrfs_destroy_cachep(); btrfs_destroy_cachep();
...@@ -9464,7 +9461,7 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, ...@@ -9464,7 +9461,7 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
{ {
struct btrfs_delalloc_work *work; struct btrfs_delalloc_work *work;
work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS); work = kmalloc(sizeof(*work), GFP_NOFS);
if (!work) if (!work)
return NULL; return NULL;
...@@ -9482,7 +9479,7 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, ...@@ -9482,7 +9479,7 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work) void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
{ {
wait_for_completion(&work->completion); wait_for_completion(&work->completion);
kmem_cache_free(btrfs_delalloc_work_cachep, work); kfree(work);
} }
/* /*
...@@ -10047,7 +10044,7 @@ static const struct file_operations btrfs_dir_file_operations = { ...@@ -10047,7 +10044,7 @@ static const struct file_operations btrfs_dir_file_operations = {
.fsync = btrfs_sync_file, .fsync = btrfs_sync_file,
}; };
static struct extent_io_ops btrfs_extent_io_ops = { static const struct extent_io_ops btrfs_extent_io_ops = {
.fill_delalloc = run_delalloc_range, .fill_delalloc = run_delalloc_range,
.submit_bio_hook = btrfs_submit_bio_hook, .submit_bio_hook = btrfs_submit_bio_hook,
.merge_bio_hook = btrfs_merge_bio_hook, .merge_bio_hook = btrfs_merge_bio_hook,
......
...@@ -655,22 +655,28 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, ...@@ -655,22 +655,28 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
return -EINVAL; return -EINVAL;
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
if (!pending_snapshot)
return -ENOMEM;
pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
GFP_NOFS);
pending_snapshot->path = btrfs_alloc_path();
if (!pending_snapshot->root_item || !pending_snapshot->path) {
ret = -ENOMEM;
goto free_pending;
}
atomic_inc(&root->will_be_snapshoted); atomic_inc(&root->will_be_snapshoted);
smp_mb__after_atomic(); smp_mb__after_atomic();
btrfs_wait_for_no_snapshoting_writes(root); btrfs_wait_for_no_snapshoting_writes(root);
ret = btrfs_start_delalloc_inodes(root, 0); ret = btrfs_start_delalloc_inodes(root, 0);
if (ret) if (ret)
goto out; goto dec_and_free;
btrfs_wait_ordered_extents(root, -1); btrfs_wait_ordered_extents(root, -1);
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
if (!pending_snapshot) {
ret = -ENOMEM;
goto out;
}
btrfs_init_block_rsv(&pending_snapshot->block_rsv, btrfs_init_block_rsv(&pending_snapshot->block_rsv,
BTRFS_BLOCK_RSV_TEMP); BTRFS_BLOCK_RSV_TEMP);
/* /*
...@@ -686,7 +692,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, ...@@ -686,7 +692,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
&pending_snapshot->qgroup_reserved, &pending_snapshot->qgroup_reserved,
false); false);
if (ret) if (ret)
goto free; goto dec_and_free;
pending_snapshot->dentry = dentry; pending_snapshot->dentry = dentry;
pending_snapshot->root = root; pending_snapshot->root = root;
...@@ -737,11 +743,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, ...@@ -737,11 +743,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
btrfs_subvolume_release_metadata(BTRFS_I(dir)->root, btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
&pending_snapshot->block_rsv, &pending_snapshot->block_rsv,
pending_snapshot->qgroup_reserved); pending_snapshot->qgroup_reserved);
free: dec_and_free:
kfree(pending_snapshot);
out:
if (atomic_dec_and_test(&root->will_be_snapshoted)) if (atomic_dec_and_test(&root->will_be_snapshoted))
wake_up_atomic_t(&root->will_be_snapshoted); wake_up_atomic_t(&root->will_be_snapshoted);
free_pending:
kfree(pending_snapshot->root_item);
btrfs_free_path(pending_snapshot->path);
kfree(pending_snapshot);
return ret; return ret;
} }
...@@ -3478,7 +3487,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode, ...@@ -3478,7 +3487,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
return ret; return ret;
} }
path->reada = 2; path->reada = READA_FORWARD;
/* clone data */ /* clone data */
key.objectid = btrfs_ino(src); key.objectid = btrfs_ino(src);
key.type = BTRFS_EXTENT_DATA_KEY; key.type = BTRFS_EXTENT_DATA_KEY;
...@@ -5286,7 +5295,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg) ...@@ -5286,7 +5295,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
static int btrfs_ioctl_get_supported_features(struct file *file, static int btrfs_ioctl_get_supported_features(struct file *file,
void __user *arg) void __user *arg)
{ {
static struct btrfs_ioctl_feature_flags features[3] = { static const struct btrfs_ioctl_feature_flags features[3] = {
INIT_FEATURE_FLAGS(SUPP), INIT_FEATURE_FLAGS(SUPP),
INIT_FEATURE_FLAGS(SAFE_SET), INIT_FEATURE_FLAGS(SAFE_SET),
INIT_FEATURE_FLAGS(SAFE_CLEAR) INIT_FEATURE_FLAGS(SAFE_CLEAR)
......
...@@ -708,8 +708,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, ...@@ -708,8 +708,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
path1->reada = 1; path1->reada = READA_FORWARD;
path2->reada = 2; path2->reada = READA_FORWARD;
node = alloc_backref_node(cache); node = alloc_backref_node(cache);
if (!node) { if (!node) {
...@@ -2130,7 +2130,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, ...@@ -2130,7 +2130,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 1; path->reada = READA_FORWARD;
reloc_root = root->reloc_root; reloc_root = root->reloc_root;
root_item = &reloc_root->root_item; root_item = &reloc_root->root_item;
...@@ -3527,7 +3527,7 @@ static int find_data_references(struct reloc_control *rc, ...@@ -3527,7 +3527,7 @@ static int find_data_references(struct reloc_control *rc,
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 1; path->reada = READA_FORWARD;
root = read_fs_root(rc->extent_root->fs_info, ref_root); root = read_fs_root(rc->extent_root->fs_info, ref_root);
if (IS_ERR(root)) { if (IS_ERR(root)) {
...@@ -3917,7 +3917,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) ...@@ -3917,7 +3917,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 1; path->reada = READA_FORWARD;
ret = prepare_to_relocate(rc); ret = prepare_to_relocate(rc);
if (ret) { if (ret) {
...@@ -4343,7 +4343,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) ...@@ -4343,7 +4343,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = -1; path->reada = READA_BACK;
key.objectid = BTRFS_TREE_RELOC_OBJECTID; key.objectid = BTRFS_TREE_RELOC_OBJECTID;
key.type = BTRFS_ROOT_ITEM_KEY; key.type = BTRFS_ROOT_ITEM_KEY;
......
...@@ -3507,7 +3507,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -3507,7 +3507,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 2; path->reada = READA_FORWARD;
path->search_commit_root = 1; path->search_commit_root = 1;
path->skip_locking = 1; path->skip_locking = 1;
...@@ -3735,27 +3735,27 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, ...@@ -3735,27 +3735,27 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
if (fs_info->scrub_workers_refcnt == 0) { if (fs_info->scrub_workers_refcnt == 0) {
if (is_dev_replace) if (is_dev_replace)
fs_info->scrub_workers = fs_info->scrub_workers =
btrfs_alloc_workqueue("btrfs-scrub", flags, btrfs_alloc_workqueue("scrub", flags,
1, 4); 1, 4);
else else
fs_info->scrub_workers = fs_info->scrub_workers =
btrfs_alloc_workqueue("btrfs-scrub", flags, btrfs_alloc_workqueue("scrub", flags,
max_active, 4); max_active, 4);
if (!fs_info->scrub_workers) if (!fs_info->scrub_workers)
goto fail_scrub_workers; goto fail_scrub_workers;
fs_info->scrub_wr_completion_workers = fs_info->scrub_wr_completion_workers =
btrfs_alloc_workqueue("btrfs-scrubwrc", flags, btrfs_alloc_workqueue("scrubwrc", flags,
max_active, 2); max_active, 2);
if (!fs_info->scrub_wr_completion_workers) if (!fs_info->scrub_wr_completion_workers)
goto fail_scrub_wr_completion_workers; goto fail_scrub_wr_completion_workers;
fs_info->scrub_nocow_workers = fs_info->scrub_nocow_workers =
btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0); btrfs_alloc_workqueue("scrubnc", flags, 1, 0);
if (!fs_info->scrub_nocow_workers) if (!fs_info->scrub_nocow_workers)
goto fail_scrub_nocow_workers; goto fail_scrub_nocow_workers;
fs_info->scrub_parity_workers = fs_info->scrub_parity_workers =
btrfs_alloc_workqueue("btrfs-scrubparity", flags, btrfs_alloc_workqueue("scrubparity", flags,
max_active, 2); max_active, 2);
if (!fs_info->scrub_parity_workers) if (!fs_info->scrub_parity_workers)
goto fail_scrub_parity_workers; goto fail_scrub_parity_workers;
......
...@@ -310,7 +310,7 @@ enum { ...@@ -310,7 +310,7 @@ enum {
Opt_err, Opt_err,
}; };
static match_table_t tokens = { static const match_table_t tokens = {
{Opt_degraded, "degraded"}, {Opt_degraded, "degraded"},
{Opt_subvol, "subvol=%s"}, {Opt_subvol, "subvol=%s"},
{Opt_subvolid, "subvolid=%s"}, {Opt_subvolid, "subvolid=%s"},
...@@ -1991,6 +1991,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) ...@@ -1991,6 +1991,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
* there are other factors that may change the result (like a new metadata * there are other factors that may change the result (like a new metadata
* chunk). * chunk).
* *
* If metadata is exhausted, f_bavail will be 0.
*
* FIXME: not accurate for mixed block groups, total and free/used are ok, * FIXME: not accurate for mixed block groups, total and free/used are ok,
* available appears slightly larger. * available appears slightly larger.
*/ */
...@@ -2002,11 +2004,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) ...@@ -2002,11 +2004,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct btrfs_space_info *found; struct btrfs_space_info *found;
u64 total_used = 0; u64 total_used = 0;
u64 total_free_data = 0; u64 total_free_data = 0;
u64 total_free_meta = 0;
int bits = dentry->d_sb->s_blocksize_bits; int bits = dentry->d_sb->s_blocksize_bits;
__be32 *fsid = (__be32 *)fs_info->fsid; __be32 *fsid = (__be32 *)fs_info->fsid;
unsigned factor = 1; unsigned factor = 1;
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
int ret; int ret;
u64 thresh = 0;
/* /*
* holding chunk_muext to avoid allocating new chunks, holding * holding chunk_muext to avoid allocating new chunks, holding
...@@ -2032,6 +2036,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) ...@@ -2032,6 +2036,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
} }
} }
} }
if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
total_free_meta += found->disk_total - found->disk_used;
total_used += found->disk_used; total_used += found->disk_used;
} }
...@@ -2054,6 +2060,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) ...@@ -2054,6 +2060,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bavail += div_u64(total_free_data, factor); buf->f_bavail += div_u64(total_free_data, factor);
buf->f_bavail = buf->f_bavail >> bits; buf->f_bavail = buf->f_bavail >> bits;
/*
* We calculate the remaining metadata space minus global reserve. If
* this is (supposedly) smaller than zero, there's no space. But this
* does not hold in practice, the exhausted state happens where's still
* some positive delta. So we apply some guesswork and compare the
* delta to a 4M threshold. (Practically observed delta was ~2M.)
*
* We probably cannot calculate the exact threshold value because this
* depends on the internal reservations requested by various
* operations, so some operations that consume a few metadata will
* succeed even if the Avail is zero. But this is better than the other
* way around.
*/
thresh = 4 * 1024 * 1024;
if (total_free_meta - thresh < block_rsv->size)
buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC; buf->f_type = BTRFS_SUPER_MAGIC;
buf->f_bsize = dentry->d_sb->s_blocksize; buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_namelen = BTRFS_NAME_LEN; buf->f_namelen = BTRFS_NAME_LEN;
......
...@@ -410,9 +410,11 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) ...@@ -410,9 +410,11 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
int ret; int ret;
u64 offset; u64 offset;
u64 max_extent_size; u64 max_extent_size;
const struct btrfs_free_space_op test_free_space_ops = {
bool (*use_bitmap_op)(struct btrfs_free_space_ctl *, .recalc_thresholds = cache->free_space_ctl->op->recalc_thresholds,
struct btrfs_free_space *); .use_bitmap = test_use_bitmap,
};
const struct btrfs_free_space_op *orig_free_space_ops;
test_msg("Running space stealing from bitmap to extent\n"); test_msg("Running space stealing from bitmap to extent\n");
...@@ -434,8 +436,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) ...@@ -434,8 +436,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
* that forces use of bitmaps as soon as we have at least 1 * that forces use of bitmaps as soon as we have at least 1
* extent entry. * extent entry.
*/ */
use_bitmap_op = cache->free_space_ctl->op->use_bitmap; orig_free_space_ops = cache->free_space_ctl->op;
cache->free_space_ctl->op->use_bitmap = test_use_bitmap; cache->free_space_ctl->op = &test_free_space_ops;
/* /*
* Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[ * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
...@@ -842,7 +844,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) ...@@ -842,7 +844,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
if (ret) if (ret)
return ret; return ret;
cache->free_space_ctl->op->use_bitmap = use_bitmap_op; cache->free_space_ctl->op = orig_free_space_ops;
__btrfs_remove_free_space_cache(cache->free_space_ctl); __btrfs_remove_free_space_cache(cache->free_space_ctl);
return 0; return 0;
......
...@@ -1336,17 +1336,11 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ...@@ -1336,17 +1336,11 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
u64 root_flags; u64 root_flags;
uuid_le new_uuid; uuid_le new_uuid;
path = btrfs_alloc_path(); ASSERT(pending->path);
if (!path) { path = pending->path;
pending->error = -ENOMEM;
return 0;
}
new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); ASSERT(pending->root_item);
if (!new_root_item) { new_root_item = pending->root_item;
pending->error = -ENOMEM;
goto root_item_alloc_fail;
}
pending->error = btrfs_find_free_objectid(tree_root, &objectid); pending->error = btrfs_find_free_objectid(tree_root, &objectid);
if (pending->error) if (pending->error)
...@@ -1579,8 +1573,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ...@@ -1579,8 +1573,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_clear_skip_qgroup(trans); btrfs_clear_skip_qgroup(trans);
no_free_objectid: no_free_objectid:
kfree(new_root_item); kfree(new_root_item);
root_item_alloc_fail: pending->root_item = NULL;
btrfs_free_path(path); btrfs_free_path(path);
pending->path = NULL;
return ret; return ret;
} }
......
...@@ -137,8 +137,10 @@ struct btrfs_pending_snapshot { ...@@ -137,8 +137,10 @@ struct btrfs_pending_snapshot {
struct dentry *dentry; struct dentry *dentry;
struct inode *dir; struct inode *dir;
struct btrfs_root *root; struct btrfs_root *root;
struct btrfs_root_item *root_item;
struct btrfs_root *snap; struct btrfs_root *snap;
struct btrfs_qgroup_inherit *inherit; struct btrfs_qgroup_inherit *inherit;
struct btrfs_path *path;
/* block reservation for the operation */ /* block reservation for the operation */
struct btrfs_block_rsv block_rsv; struct btrfs_block_rsv block_rsv;
u64 qgroup_reserved; u64 qgroup_reserved;
......
...@@ -1103,7 +1103,7 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, ...@@ -1103,7 +1103,7 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 2; path->reada = READA_FORWARD;
key.objectid = device->devid; key.objectid = device->devid;
key.offset = start; key.offset = start;
...@@ -1272,7 +1272,7 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction, ...@@ -1272,7 +1272,7 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
goto out; goto out;
} }
path->reada = 2; path->reada = READA_FORWARD;
path->search_commit_root = 1; path->search_commit_root = 1;
path->skip_locking = 1; path->skip_locking = 1;
...@@ -3724,14 +3724,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl, ...@@ -3724,14 +3724,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
goto out; goto out;
} }
/* allow dup'ed data chunks only in mixed mode */
if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
btrfs_err(fs_info, "dup for data is not allowed");
ret = -EINVAL;
goto out;
}
/* allow to reduce meta or sys integrity only if force set */ /* allow to reduce meta or sys integrity only if force set */
allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID10 |
...@@ -3757,6 +3749,13 @@ int btrfs_balance(struct btrfs_balance_control *bctl, ...@@ -3757,6 +3749,13 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
} }
} while (read_seqretry(&fs_info->profiles_lock, seq)); } while (read_seqretry(&fs_info->profiles_lock, seq));
if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
btrfs_warn(fs_info,
"metatdata profile 0x%llx has lower redundancy than data profile 0x%llx",
bctl->meta.target, bctl->data.target);
}
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
fs_info->num_tolerated_disk_barrier_failures = min( fs_info->num_tolerated_disk_barrier_failures = min(
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info), btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
...@@ -4269,7 +4268,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) ...@@ -4269,7 +4268,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 2; path->reada = READA_FORWARD;
lock_chunks(root); lock_chunks(root);
...@@ -6526,6 +6525,14 @@ int btrfs_read_sys_array(struct btrfs_root *root) ...@@ -6526,6 +6525,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
goto out_short_read; goto out_short_read;
num_stripes = btrfs_chunk_num_stripes(sb, chunk); num_stripes = btrfs_chunk_num_stripes(sb, chunk);
if (!num_stripes) {
printk(KERN_ERR
"BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
num_stripes, cur_offset);
ret = -EIO;
break;
}
len = btrfs_chunk_item_size(num_stripes); len = btrfs_chunk_item_size(num_stripes);
if (cur_offset + len > array_size) if (cur_offset + len > array_size)
goto out_short_read; goto out_short_read;
...@@ -6534,6 +6541,9 @@ int btrfs_read_sys_array(struct btrfs_root *root) ...@@ -6534,6 +6541,9 @@ int btrfs_read_sys_array(struct btrfs_root *root)
if (ret) if (ret)
break; break;
} else { } else {
printk(KERN_ERR
"BTRFS: unexpected item type %u in sys_array at offset %u\n",
(u32)key.type, cur_offset);
ret = -EIO; ret = -EIO;
break; break;
} }
......
...@@ -283,7 +283,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) ...@@ -283,7 +283,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 2; path->reada = READA_FORWARD;
/* search for our xattrs */ /* search for our xattrs */
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册