提交 c5cb6a05 编写于 作者: C Chris Mason
...@@ -918,7 +918,8 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ...@@ -918,7 +918,8 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
ref->parent, bsz, 0); ref->parent, bsz, 0);
if (!eb || !extent_buffer_uptodate(eb)) { if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb); free_extent_buffer(eb);
return -EIO; ret = -EIO;
goto out;
} }
ret = find_extent_in_eb(eb, bytenr, ret = find_extent_in_eb(eb, bytenr,
*extent_item_pos, &eie); *extent_item_pos, &eie);
......
...@@ -951,10 +951,12 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, ...@@ -951,10 +951,12 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
} }
if (new_flags != 0) { if (new_flags != 0) {
int level = btrfs_header_level(buf);
ret = btrfs_set_disk_extent_flags(trans, root, ret = btrfs_set_disk_extent_flags(trans, root,
buf->start, buf->start,
buf->len, buf->len,
new_flags, 0); new_flags, level, 0);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -88,12 +88,12 @@ struct btrfs_ordered_sum; ...@@ -88,12 +88,12 @@ struct btrfs_ordered_sum;
/* holds checksums of all the data extents */ /* holds checksums of all the data extents */
#define BTRFS_CSUM_TREE_OBJECTID 7ULL #define BTRFS_CSUM_TREE_OBJECTID 7ULL
/* for storing balance parameters in the root tree */
#define BTRFS_BALANCE_OBJECTID -4ULL
/* holds quota configuration and tracking */ /* holds quota configuration and tracking */
#define BTRFS_QUOTA_TREE_OBJECTID 8ULL #define BTRFS_QUOTA_TREE_OBJECTID 8ULL
/* for storing balance parameters in the root tree */
#define BTRFS_BALANCE_OBJECTID -4ULL
/* orhpan objectid for tracking unlinked/truncated files */ /* orhpan objectid for tracking unlinked/truncated files */
#define BTRFS_ORPHAN_OBJECTID -5ULL #define BTRFS_ORPHAN_OBJECTID -5ULL
...@@ -3075,7 +3075,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -3075,7 +3075,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 flags, u64 bytenr, u64 num_bytes, u64 flags,
int is_data); int level, int is_data);
int btrfs_free_extent(struct btrfs_trans_handle *trans, int btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
......
...@@ -60,6 +60,7 @@ struct btrfs_delayed_ref_node { ...@@ -60,6 +60,7 @@ struct btrfs_delayed_ref_node {
struct btrfs_delayed_extent_op { struct btrfs_delayed_extent_op {
struct btrfs_disk_key key; struct btrfs_disk_key key;
u64 flags_to_set; u64 flags_to_set;
int level;
unsigned int update_key:1; unsigned int update_key:1;
unsigned int update_flags:1; unsigned int update_flags:1;
unsigned int is_data:1; unsigned int is_data:1;
......
...@@ -313,6 +313,11 @@ int btrfs_dev_replace_start(struct btrfs_root *root, ...@@ -313,6 +313,11 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
struct btrfs_device *tgt_device = NULL; struct btrfs_device *tgt_device = NULL;
struct btrfs_device *src_device = NULL; struct btrfs_device *src_device = NULL;
if (btrfs_fs_incompat(fs_info, RAID56)) {
pr_warn("btrfs: dev_replace cannot yet handle RAID5/RAID6\n");
return -EINVAL;
}
switch (args->start.cont_reading_from_srcdev_mode) { switch (args->start.cont_reading_from_srcdev_mode) {
case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
......
...@@ -152,7 +152,7 @@ static struct btrfs_lockdep_keyset { ...@@ -152,7 +152,7 @@ static struct btrfs_lockdep_keyset {
{ .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
{ .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
{ .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
{ .id = BTRFS_ORPHAN_OBJECTID, .name_stem = "orphan" }, { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
{ .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
{ .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
...@@ -1513,7 +1513,6 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, ...@@ -1513,7 +1513,6 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
} }
root->commit_root = btrfs_root_node(root); root->commit_root = btrfs_root_node(root);
BUG_ON(!root->node); /* -ENOMEM */
out: out:
if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
root->ref_cows = 1; root->ref_cows = 1;
...@@ -1988,30 +1987,33 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) ...@@ -1988,30 +1987,33 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
{ {
free_extent_buffer(info->tree_root->node); free_extent_buffer(info->tree_root->node);
free_extent_buffer(info->tree_root->commit_root); free_extent_buffer(info->tree_root->commit_root);
free_extent_buffer(info->dev_root->node);
free_extent_buffer(info->dev_root->commit_root);
free_extent_buffer(info->extent_root->node);
free_extent_buffer(info->extent_root->commit_root);
free_extent_buffer(info->csum_root->node);
free_extent_buffer(info->csum_root->commit_root);
if (info->quota_root) {
free_extent_buffer(info->quota_root->node);
free_extent_buffer(info->quota_root->commit_root);
}
info->tree_root->node = NULL; info->tree_root->node = NULL;
info->tree_root->commit_root = NULL; info->tree_root->commit_root = NULL;
if (info->dev_root) {
free_extent_buffer(info->dev_root->node);
free_extent_buffer(info->dev_root->commit_root);
info->dev_root->node = NULL; info->dev_root->node = NULL;
info->dev_root->commit_root = NULL; info->dev_root->commit_root = NULL;
}
if (info->extent_root) {
free_extent_buffer(info->extent_root->node);
free_extent_buffer(info->extent_root->commit_root);
info->extent_root->node = NULL; info->extent_root->node = NULL;
info->extent_root->commit_root = NULL; info->extent_root->commit_root = NULL;
}
if (info->csum_root) {
free_extent_buffer(info->csum_root->node);
free_extent_buffer(info->csum_root->commit_root);
info->csum_root->node = NULL; info->csum_root->node = NULL;
info->csum_root->commit_root = NULL; info->csum_root->commit_root = NULL;
}
if (info->quota_root) { if (info->quota_root) {
free_extent_buffer(info->quota_root->node);
free_extent_buffer(info->quota_root->commit_root);
info->quota_root->node = NULL; info->quota_root->node = NULL;
info->quota_root->commit_root = NULL; info->quota_root->commit_root = NULL;
} }
if (chunk_root) { if (chunk_root) {
free_extent_buffer(info->chunk_root->node); free_extent_buffer(info->chunk_root->node);
free_extent_buffer(info->chunk_root->commit_root); free_extent_buffer(info->chunk_root->commit_root);
...@@ -3659,8 +3661,11 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, ...@@ -3659,8 +3661,11 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
ordered_operations); ordered_operations);
list_del_init(&btrfs_inode->ordered_operations); list_del_init(&btrfs_inode->ordered_operations);
spin_unlock(&root->fs_info->ordered_extent_lock);
btrfs_invalidate_inodes(btrfs_inode->root); btrfs_invalidate_inodes(btrfs_inode->root);
spin_lock(&root->fs_info->ordered_extent_lock);
} }
spin_unlock(&root->fs_info->ordered_extent_lock); spin_unlock(&root->fs_info->ordered_extent_lock);
...@@ -3782,8 +3787,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) ...@@ -3782,8 +3787,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
list_del_init(&btrfs_inode->delalloc_inodes); list_del_init(&btrfs_inode->delalloc_inodes);
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
&btrfs_inode->runtime_flags); &btrfs_inode->runtime_flags);
spin_unlock(&root->fs_info->delalloc_lock);
btrfs_invalidate_inodes(btrfs_inode->root); btrfs_invalidate_inodes(btrfs_inode->root);
spin_lock(&root->fs_info->delalloc_lock);
} }
spin_unlock(&root->fs_info->delalloc_lock); spin_unlock(&root->fs_info->delalloc_lock);
...@@ -3808,7 +3816,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, ...@@ -3808,7 +3816,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
while (start <= end) { while (start <= end) {
eb = btrfs_find_tree_block(root, start, eb = btrfs_find_tree_block(root, start,
root->leafsize); root->leafsize);
start += eb->len; start += root->leafsize;
if (!eb) if (!eb)
continue; continue;
wait_on_extent_buffer_writeback(eb); wait_on_extent_buffer_writeback(eb);
......
...@@ -2070,8 +2070,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, ...@@ -2070,8 +2070,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
u32 item_size; u32 item_size;
int ret; int ret;
int err = 0; int err = 0;
int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY || int metadata = !extent_op->is_data;
node->type == BTRFS_SHARED_BLOCK_REF_KEY);
if (trans->aborted) if (trans->aborted)
return 0; return 0;
...@@ -2086,11 +2085,8 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, ...@@ -2086,11 +2085,8 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
key.objectid = node->bytenr; key.objectid = node->bytenr;
if (metadata) { if (metadata) {
struct btrfs_delayed_tree_ref *tree_ref;
tree_ref = btrfs_delayed_node_to_tree_ref(node);
key.type = BTRFS_METADATA_ITEM_KEY; key.type = BTRFS_METADATA_ITEM_KEY;
key.offset = tree_ref->level; key.offset = extent_op->level;
} else { } else {
key.type = BTRFS_EXTENT_ITEM_KEY; key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = node->num_bytes; key.offset = node->num_bytes;
...@@ -2719,7 +2715,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2719,7 +2715,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 flags, u64 bytenr, u64 num_bytes, u64 flags,
int is_data) int level, int is_data)
{ {
struct btrfs_delayed_extent_op *extent_op; struct btrfs_delayed_extent_op *extent_op;
int ret; int ret;
...@@ -2732,6 +2728,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, ...@@ -2732,6 +2728,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
extent_op->update_flags = 1; extent_op->update_flags = 1;
extent_op->update_key = 0; extent_op->update_key = 0;
extent_op->is_data = is_data ? 1 : 0; extent_op->is_data = is_data ? 1 : 0;
extent_op->level = level;
ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr, ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
num_bytes, extent_op); num_bytes, extent_op);
...@@ -3109,6 +3106,11 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, ...@@ -3109,6 +3106,11 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
WARN_ON(ret); WARN_ON(ret);
if (i_size_read(inode) > 0) { if (i_size_read(inode) > 0) {
ret = btrfs_check_trunc_cache_free_space(root,
&root->fs_info->global_block_rsv);
if (ret)
goto out_put;
ret = btrfs_truncate_free_space_cache(root, trans, path, ret = btrfs_truncate_free_space_cache(root, trans, path,
inode); inode);
if (ret) if (ret)
...@@ -4562,6 +4564,8 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info) ...@@ -4562,6 +4564,8 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
if (fs_info->quota_root)
fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
update_global_block_rsv(fs_info); update_global_block_rsv(fs_info);
...@@ -6651,31 +6655,26 @@ use_block_rsv(struct btrfs_trans_handle *trans, ...@@ -6651,31 +6655,26 @@ use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
int ret; int ret;
bool global_updated = false;
block_rsv = get_block_rsv(trans, root); block_rsv = get_block_rsv(trans, root);
if (block_rsv->size == 0) { if (unlikely(block_rsv->size == 0))
ret = reserve_metadata_bytes(root, block_rsv, blocksize, goto try_reserve;
BTRFS_RESERVE_NO_FLUSH); again:
/* ret = block_rsv_use_bytes(block_rsv, blocksize);
* If we couldn't reserve metadata bytes try and use some from
* the global reserve.
*/
if (ret && block_rsv != global_rsv) {
ret = block_rsv_use_bytes(global_rsv, blocksize);
if (!ret) if (!ret)
return global_rsv;
return ERR_PTR(ret);
} else if (ret) {
return ERR_PTR(ret);
}
return block_rsv; return block_rsv;
if (block_rsv->failfast)
return ERR_PTR(ret);
if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
global_updated = true;
update_global_block_rsv(root->fs_info);
goto again;
} }
ret = block_rsv_use_bytes(block_rsv, blocksize);
if (!ret)
return block_rsv;
if (ret && !block_rsv->failfast) {
if (btrfs_test_opt(root, ENOSPC_DEBUG)) { if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
static DEFINE_RATELIMIT_STATE(_rs, static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL * 10, DEFAULT_RATELIMIT_INTERVAL * 10,
...@@ -6684,18 +6683,23 @@ use_block_rsv(struct btrfs_trans_handle *trans, ...@@ -6684,18 +6683,23 @@ use_block_rsv(struct btrfs_trans_handle *trans,
WARN(1, KERN_DEBUG WARN(1, KERN_DEBUG
"btrfs: block rsv returned %d\n", ret); "btrfs: block rsv returned %d\n", ret);
} }
try_reserve:
ret = reserve_metadata_bytes(root, block_rsv, blocksize, ret = reserve_metadata_bytes(root, block_rsv, blocksize,
BTRFS_RESERVE_NO_FLUSH); BTRFS_RESERVE_NO_FLUSH);
if (!ret) { if (!ret)
return block_rsv; return block_rsv;
} else if (ret && block_rsv != global_rsv) { /*
* If we couldn't reserve metadata bytes try and use some from
* the global reserve if its space type is the same as the global
* reservation.
*/
if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
block_rsv->space_info == global_rsv->space_info) {
ret = block_rsv_use_bytes(global_rsv, blocksize); ret = block_rsv_use_bytes(global_rsv, blocksize);
if (!ret) if (!ret)
return global_rsv; return global_rsv;
} }
} return ERR_PTR(ret);
return ERR_PTR(-ENOSPC);
} }
static void unuse_block_rsv(struct btrfs_fs_info *fs_info, static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
...@@ -6763,6 +6767,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, ...@@ -6763,6 +6767,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
extent_op->update_key = 1; extent_op->update_key = 1;
extent_op->update_flags = 1; extent_op->update_flags = 1;
extent_op->is_data = 0; extent_op->is_data = 0;
extent_op->level = level;
ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
ins.objectid, ins.objectid,
...@@ -6934,7 +6939,8 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, ...@@ -6934,7 +6939,8 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc); ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
ret = btrfs_set_disk_extent_flags(trans, root, eb->start, ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
eb->len, flag, 0); eb->len, flag,
btrfs_header_level(eb), 0);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
wc->flags[level] |= flag; wc->flags[level] |= flag;
} }
......
...@@ -1960,28 +1960,6 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) ...@@ -1960,28 +1960,6 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
SetPageUptodate(page); SetPageUptodate(page);
} }
/*
* helper function to unlock a page if all the extents in the tree
* for that page are unlocked
*/
static void check_page_locked(struct extent_io_tree *tree, struct page *page)
{
u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
unlock_page(page);
}
/*
* helper function to end page writeback if all the extents
* in the tree for that page are done with writeback
*/
static void check_page_writeback(struct extent_io_tree *tree,
struct page *page)
{
end_page_writeback(page);
}
/* /*
* When IO fails, either with EIO or csum verification fails, we * When IO fails, either with EIO or csum verification fails, we
* try other mirrors that might have a good copy of the data. This * try other mirrors that might have a good copy of the data. This
...@@ -2411,19 +2389,24 @@ static void end_bio_extent_writepage(struct bio *bio, int err) ...@@ -2411,19 +2389,24 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
struct extent_io_tree *tree; struct extent_io_tree *tree;
u64 start; u64 start;
u64 end; u64 end;
int whole_page;
do { do {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
tree = &BTRFS_I(page->mapping->host)->io_tree; tree = &BTRFS_I(page->mapping->host)->io_tree;
start = page_offset(page) + bvec->bv_offset; /* We always issue full-page reads, but if some block
end = start + bvec->bv_len - 1; * in a page fails to read, blk_update_request() will
* advance bv_offset and adjust bv_len to compensate.
* Print a warning for nonzero offsets, and an error
* if they don't add up to a full page. */
if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
printk("%s page write in btrfs with offset %u and length %u\n",
bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
? KERN_ERR "partial" : KERN_INFO "incomplete",
bvec->bv_offset, bvec->bv_len);
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) start = page_offset(page);
whole_page = 1; end = start + bvec->bv_offset + bvec->bv_len - 1;
else
whole_page = 0;
if (--bvec >= bio->bi_io_vec) if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags); prefetchw(&bvec->bv_page->flags);
...@@ -2431,10 +2414,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err) ...@@ -2431,10 +2414,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
if (end_extent_writepage(page, err, start, end)) if (end_extent_writepage(page, err, start, end))
continue; continue;
if (whole_page)
end_page_writeback(page); end_page_writeback(page);
else
check_page_writeback(tree, page);
} while (bvec >= bio->bi_io_vec); } while (bvec >= bio->bi_io_vec);
bio_put(bio); bio_put(bio);
...@@ -2459,7 +2439,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err) ...@@ -2459,7 +2439,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
struct extent_io_tree *tree; struct extent_io_tree *tree;
u64 start; u64 start;
u64 end; u64 end;
int whole_page;
int mirror; int mirror;
int ret; int ret;
...@@ -2477,13 +2456,19 @@ static void end_bio_extent_readpage(struct bio *bio, int err) ...@@ -2477,13 +2456,19 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
io_bio->mirror_num); io_bio->mirror_num);
tree = &BTRFS_I(page->mapping->host)->io_tree; tree = &BTRFS_I(page->mapping->host)->io_tree;
start = page_offset(page) + bvec->bv_offset; /* We always issue full-page reads, but if some block
end = start + bvec->bv_len - 1; * in a page fails to read, blk_update_request() will
* advance bv_offset and adjust bv_len to compensate.
* Print a warning for nonzero offsets, and an error
* if they don't add up to a full page. */
if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
printk("%s page read in btrfs with offset %u and length %u\n",
bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
? KERN_ERR "partial" : KERN_INFO "incomplete",
bvec->bv_offset, bvec->bv_len);
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) start = page_offset(page);
whole_page = 1; end = start + bvec->bv_offset + bvec->bv_len - 1;
else
whole_page = 0;
if (++bvec <= bvec_end) if (++bvec <= bvec_end)
prefetchw(&bvec->bv_page->flags); prefetchw(&bvec->bv_page->flags);
...@@ -2542,7 +2527,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err) ...@@ -2542,7 +2527,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
} }
unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
if (whole_page) {
if (uptodate) { if (uptodate) {
SetPageUptodate(page); SetPageUptodate(page);
} else { } else {
...@@ -2550,15 +2534,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err) ...@@ -2550,15 +2534,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
SetPageError(page); SetPageError(page);
} }
unlock_page(page); unlock_page(page);
} else {
if (uptodate) {
check_page_uptodate(tree, page);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
check_page_locked(tree, page);
}
} while (bvec <= bvec_end); } while (bvec <= bvec_end);
bio_put(bio); bio_put(bio);
...@@ -4022,7 +3997,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -4022,7 +3997,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
last_for_get_extent = isize; last_for_get_extent = isize;
} }
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
&cached_state); &cached_state);
em = get_extent_skip_holes(inode, start, last_for_get_extent, em = get_extent_skip_holes(inode, start, last_for_get_extent,
...@@ -4109,7 +4084,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -4109,7 +4084,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
out_free: out_free:
free_extent_map(em); free_extent_map(em);
out: out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
&cached_state, GFP_NOFS); &cached_state, GFP_NOFS);
return ret; return ret;
} }
......
...@@ -197,30 +197,32 @@ int create_free_space_inode(struct btrfs_root *root, ...@@ -197,30 +197,32 @@ int create_free_space_inode(struct btrfs_root *root,
block_group->key.objectid); block_group->key.objectid);
} }
int btrfs_truncate_free_space_cache(struct btrfs_root *root, int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
struct btrfs_trans_handle *trans, struct btrfs_block_rsv *rsv)
struct btrfs_path *path,
struct inode *inode)
{ {
struct btrfs_block_rsv *rsv;
u64 needed_bytes; u64 needed_bytes;
loff_t oldsize; int ret;
int ret = 0;
rsv = trans->block_rsv;
trans->block_rsv = &root->fs_info->global_block_rsv;
/* 1 for slack space, 1 for updating the inode */ /* 1 for slack space, 1 for updating the inode */
needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) + needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
btrfs_calc_trans_metadata_size(root, 1); btrfs_calc_trans_metadata_size(root, 1);
spin_lock(&trans->block_rsv->lock); spin_lock(&rsv->lock);
if (trans->block_rsv->reserved < needed_bytes) { if (rsv->reserved < needed_bytes)
spin_unlock(&trans->block_rsv->lock); ret = -ENOSPC;
trans->block_rsv = rsv; else
return -ENOSPC; ret = 0;
} spin_unlock(&rsv->lock);
spin_unlock(&trans->block_rsv->lock); return 0;
}
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct inode *inode)
{
loff_t oldsize;
int ret = 0;
oldsize = i_size_read(inode); oldsize = i_size_read(inode);
btrfs_i_size_write(inode, 0); btrfs_i_size_write(inode, 0);
...@@ -232,9 +234,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, ...@@ -232,9 +234,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
*/ */
ret = btrfs_truncate_inode_items(trans, root, inode, ret = btrfs_truncate_inode_items(trans, root, inode,
0, BTRFS_EXTENT_DATA_KEY); 0, BTRFS_EXTENT_DATA_KEY);
if (ret) { if (ret) {
trans->block_rsv = rsv;
btrfs_abort_transaction(trans, root, ret); btrfs_abort_transaction(trans, root, ret);
return ret; return ret;
} }
...@@ -242,7 +242,6 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, ...@@ -242,7 +242,6 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode(trans, root, inode);
if (ret) if (ret)
btrfs_abort_transaction(trans, root, ret); btrfs_abort_transaction(trans, root, ret);
trans->block_rsv = rsv;
return ret; return ret;
} }
...@@ -920,10 +919,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ...@@ -920,10 +919,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
/* Make sure we can fit our crcs into the first page */ /* Make sure we can fit our crcs into the first page */
if (io_ctl.check_crcs && if (io_ctl.check_crcs &&
(io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) { (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
WARN_ON(1);
goto out_nospc; goto out_nospc;
}
io_ctl_set_generation(&io_ctl, trans->transid); io_ctl_set_generation(&io_ctl, trans->transid);
......
...@@ -54,6 +54,8 @@ int create_free_space_inode(struct btrfs_root *root, ...@@ -54,6 +54,8 @@ int create_free_space_inode(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group_cache *block_group,
struct btrfs_path *path); struct btrfs_path *path);
int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_root *root, int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
struct btrfs_path *path, struct btrfs_path *path,
......
...@@ -429,11 +429,12 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -429,11 +429,12 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
num_bytes = trans->bytes_reserved; num_bytes = trans->bytes_reserved;
/* /*
* 1 item for inode item insertion if need * 1 item for inode item insertion if need
* 3 items for inode item update (in the worst case) * 4 items for inode item update (in the worst case)
* 1 items for slack space if we need do truncation
* 1 item for free space object * 1 item for free space object
* 3 items for pre-allocation * 3 items for pre-allocation
*/ */
trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8); trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
ret = btrfs_block_rsv_add(root, trans->block_rsv, ret = btrfs_block_rsv_add(root, trans->block_rsv,
trans->bytes_reserved, trans->bytes_reserved,
BTRFS_RESERVE_NO_FLUSH); BTRFS_RESERVE_NO_FLUSH);
...@@ -468,6 +469,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -468,6 +469,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
if (i_size_read(inode) > 0) { if (i_size_read(inode) > 0) {
ret = btrfs_truncate_free_space_cache(root, trans, path, inode); ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
if (ret) { if (ret) {
if (ret != -ENOSPC)
btrfs_abort_transaction(trans, root, ret); btrfs_abort_transaction(trans, root, ret);
goto out_put; goto out_put;
} }
......
...@@ -714,8 +714,10 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -714,8 +714,10 @@ static noinline int submit_compressed_extents(struct inode *inode,
async_extent->ram_size - 1, 0); async_extent->ram_size - 1, 0);
em = alloc_extent_map(); em = alloc_extent_map();
if (!em) if (!em) {
ret = -ENOMEM;
goto out_free_reserve; goto out_free_reserve;
}
em->start = async_extent->start; em->start = async_extent->start;
em->len = async_extent->ram_size; em->len = async_extent->ram_size;
em->orig_start = em->start; em->orig_start = em->start;
...@@ -922,8 +924,10 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans, ...@@ -922,8 +924,10 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
} }
em = alloc_extent_map(); em = alloc_extent_map();
if (!em) if (!em) {
ret = -ENOMEM;
goto out_reserve; goto out_reserve;
}
em->start = start; em->start = start;
em->orig_start = em->start; em->orig_start = em->start;
ram_size = ins.offset; ram_size = ins.offset;
...@@ -4723,6 +4727,7 @@ void btrfs_evict_inode(struct inode *inode) ...@@ -4723,6 +4727,7 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root); btrfs_btree_balance_dirty(root);
no_delete: no_delete:
btrfs_remove_delayed_node(inode);
clear_inode(inode); clear_inode(inode);
return; return;
} }
...@@ -4838,14 +4843,13 @@ static void inode_tree_add(struct inode *inode) ...@@ -4838,14 +4843,13 @@ static void inode_tree_add(struct inode *inode)
struct rb_node **p; struct rb_node **p;
struct rb_node *parent; struct rb_node *parent;
u64 ino = btrfs_ino(inode); u64 ino = btrfs_ino(inode);
again:
p = &root->inode_tree.rb_node;
parent = NULL;
if (inode_unhashed(inode)) if (inode_unhashed(inode))
return; return;
again:
parent = NULL;
spin_lock(&root->inode_lock); spin_lock(&root->inode_lock);
p = &root->inode_tree.rb_node;
while (*p) { while (*p) {
parent = *p; parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node); entry = rb_entry(parent, struct btrfs_inode, rb_node);
...@@ -8000,7 +8004,6 @@ void btrfs_destroy_inode(struct inode *inode) ...@@ -8000,7 +8004,6 @@ void btrfs_destroy_inode(struct inode *inode)
inode_tree_del(inode); inode_tree_del(inode);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
free: free:
btrfs_remove_delayed_node(inode);
call_rcu(&inode->i_rcu, btrfs_i_callback); call_rcu(&inode->i_rcu, btrfs_i_callback);
} }
......
...@@ -1801,7 +1801,11 @@ static noinline int copy_to_sk(struct btrfs_root *root, ...@@ -1801,7 +1801,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
item_off = btrfs_item_ptr_offset(leaf, i); item_off = btrfs_item_ptr_offset(leaf, i);
item_len = btrfs_item_size_nr(leaf, i); item_len = btrfs_item_size_nr(leaf, i);
if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) btrfs_item_key_to_cpu(leaf, key, i);
if (!key_in_sk(key, sk))
continue;
if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
item_len = 0; item_len = 0;
if (sizeof(sh) + item_len + *sk_offset > if (sizeof(sh) + item_len + *sk_offset >
...@@ -1810,10 +1814,6 @@ static noinline int copy_to_sk(struct btrfs_root *root, ...@@ -1810,10 +1814,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
goto overflow; goto overflow;
} }
btrfs_item_key_to_cpu(leaf, key, i);
if (!key_in_sk(key, sk))
continue;
sh.objectid = key->objectid; sh.objectid = key->objectid;
sh.offset = key->offset; sh.offset = key->offset;
sh.type = key->type; sh.type = key->type;
......
...@@ -1773,7 +1773,7 @@ int replace_path(struct btrfs_trans_handle *trans, ...@@ -1773,7 +1773,7 @@ int replace_path(struct btrfs_trans_handle *trans,
if (!eb || !extent_buffer_uptodate(eb)) { if (!eb || !extent_buffer_uptodate(eb)) {
ret = (!eb) ? -ENOMEM : -EIO; ret = (!eb) ? -ENOMEM : -EIO;
free_extent_buffer(eb); free_extent_buffer(eb);
return ret; break;
} }
btrfs_tree_lock(eb); btrfs_tree_lock(eb);
if (cow) { if (cow) {
...@@ -3350,6 +3350,11 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, ...@@ -3350,6 +3350,11 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
} }
truncate: truncate:
ret = btrfs_check_trunc_cache_free_space(root,
&fs_info->global_block_rsv);
if (ret)
goto out;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) { if (!path) {
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -1263,6 +1263,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) ...@@ -1263,6 +1263,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_dev_replace_suspend_for_unmount(fs_info); btrfs_dev_replace_suspend_for_unmount(fs_info);
btrfs_scrub_cancel(fs_info); btrfs_scrub_cancel(fs_info);
btrfs_pause_balance(fs_info);
ret = btrfs_commit_super(root); ret = btrfs_commit_super(root);
if (ret) if (ret)
......
...@@ -3120,14 +3120,13 @@ int btrfs_balance(struct btrfs_balance_control *bctl, ...@@ -3120,14 +3120,13 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
if (num_devices == 1) if (num_devices == 1)
allowed |= BTRFS_BLOCK_GROUP_DUP; allowed |= BTRFS_BLOCK_GROUP_DUP;
else if (num_devices < 4) else if (num_devices > 1)
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
else if (num_devices > 2)
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | allowed |= BTRFS_BLOCK_GROUP_RAID5;
BTRFS_BLOCK_GROUP_RAID10 | if (num_devices > 3)
BTRFS_BLOCK_GROUP_RAID5 | allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID6); BTRFS_BLOCK_GROUP_RAID6);
if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) && if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(!alloc_profile_is_valid(bctl->data.target, 1) || (!alloc_profile_is_valid(bctl->data.target, 1) ||
(bctl->data.target & ~allowed))) { (bctl->data.target & ~allowed))) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册