提交 c1f42467 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  btrfs: rename the option to nospace_cache
  Btrfs: handle bio_add_page failure gracefully in scrub
  Btrfs: fix deadlock caused by the race between relocation
  Btrfs: only map pages if we know we need them when reading the space cache
  Btrfs: fix orphan backref nodes
  Btrfs: Abstract similar code for btrfs_block_rsv_add{, _noflush}
  Btrfs: fix unreleased path in btrfs_orphan_cleanup()
  Btrfs: fix no reserved space for writing out inode cache
  Btrfs: fix nocow when deleting the item
  Btrfs: tweak the delayed inode reservations again
  Btrfs: rework error handling in btrfs_mount()
  Btrfs: close devices on all error paths in open_ctree()
  Btrfs: avoid null dereference and leaks when bailing from open_ctree()
  Btrfs: fix subvol_name leak on error in btrfs_mount()
  Btrfs: fix memory leak in btrfs_parse_early_options()
  Btrfs: fix our reservations for updating an inode when completing io
  Btrfs: fix oops on NULL trans handle in btrfs_truncate
  btrfs: fix double-free 'tree_root' in 'btrfs_mount()'
...@@ -147,14 +147,12 @@ struct btrfs_inode { ...@@ -147,14 +147,12 @@ struct btrfs_inode {
* the btrfs file release call will add this inode to the * the btrfs file release call will add this inode to the
* ordered operations list so that we make sure to flush out any * ordered operations list so that we make sure to flush out any
* new data the application may have written before commit. * new data the application may have written before commit.
*
* yes, its silly to have a single bitflag, but we might grow more
* of these.
*/ */
unsigned ordered_data_close:1; unsigned ordered_data_close:1;
unsigned orphan_meta_reserved:1; unsigned orphan_meta_reserved:1;
unsigned dummy_inode:1; unsigned dummy_inode:1;
unsigned in_defrag:1; unsigned in_defrag:1;
unsigned delalloc_meta_reserved:1;
/* /*
* always compress this one file * always compress this one file
......
...@@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, ...@@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
static int btrfs_delayed_inode_reserve_metadata( static int btrfs_delayed_inode_reserve_metadata(
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct inode *inode,
struct btrfs_delayed_node *node) struct btrfs_delayed_node *node)
{ {
struct btrfs_block_rsv *src_rsv; struct btrfs_block_rsv *src_rsv;
struct btrfs_block_rsv *dst_rsv; struct btrfs_block_rsv *dst_rsv;
u64 num_bytes; u64 num_bytes;
int ret; int ret;
int release = false;
src_rsv = trans->block_rsv; src_rsv = trans->block_rsv;
dst_rsv = &root->fs_info->delayed_block_rsv; dst_rsv = &root->fs_info->delayed_block_rsv;
...@@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata( ...@@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata(
if (!ret) if (!ret)
node->bytes_reserved = num_bytes; node->bytes_reserved = num_bytes;
return ret; return ret;
} else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
spin_lock(&BTRFS_I(inode)->lock);
if (BTRFS_I(inode)->delalloc_meta_reserved) {
BTRFS_I(inode)->delalloc_meta_reserved = 0;
spin_unlock(&BTRFS_I(inode)->lock);
release = true;
goto migrate;
}
spin_unlock(&BTRFS_I(inode)->lock);
/* Ok we didn't have space pre-reserved. This shouldn't happen
* too often but it can happen if we do delalloc to an existing
* inode which gets dirtied because of the time update, and then
* isn't touched again until after the transaction commits and
* then we try to write out the data. First try to be nice and
* reserve something strictly for us. If not be a pain and try
* to steal from the delalloc block rsv.
*/
ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
if (!ret)
goto out;
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
if (!ret)
goto out;
/*
* Ok this is a problem, let's just steal from the global rsv
* since this really shouldn't happen that often.
*/
WARN_ON(1);
ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
dst_rsv, num_bytes);
goto out;
} }
migrate:
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
out:
/*
* Migrate only takes a reservation, it doesn't touch the size of the
* block_rsv. This is to simplify people who don't normally have things
* migrated from their block rsv. If they go to release their
* reservation, that will decrease the size as well, so if migrate
* reduced size we'd end up with a negative size. But for the
* delalloc_meta_reserved stuff we will only know to drop 1 reservation,
* but we could in fact do this reserve/migrate dance several times
* between the time we did the original reservation and we'd clean it
* up. So to take care of this, release the space for the meta
* reservation here. I think it may be time for a documentation page on
* how block rsvs. work.
*/
if (!ret) if (!ret)
node->bytes_reserved = num_bytes; node->bytes_reserved = num_bytes;
if (release)
btrfs_block_rsv_release(root, src_rsv, num_bytes);
return ret; return ret;
} }
...@@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, ...@@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
goto release_node; goto release_node;
} }
ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
delayed_node);
if (ret) if (ret)
goto release_node; goto release_node;
......
...@@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,
u64 features; u64 features;
struct btrfs_key location; struct btrfs_key location;
struct buffer_head *bh; struct buffer_head *bh;
struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), struct btrfs_super_block *disk_super;
GFP_NOFS);
struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *tree_root = btrfs_sb(sb); struct btrfs_root *tree_root = btrfs_sb(sb);
struct btrfs_fs_info *fs_info = NULL; struct btrfs_fs_info *fs_info = tree_root->fs_info;
struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), struct btrfs_root *extent_root;
GFP_NOFS); struct btrfs_root *csum_root;
struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), struct btrfs_root *chunk_root;
GFP_NOFS); struct btrfs_root *dev_root;
struct btrfs_root *log_tree_root; struct btrfs_root *log_tree_root;
int ret; int ret;
int err = -EINVAL; int err = -EINVAL;
int num_backups_tried = 0; int num_backups_tried = 0;
int backup_index = 0; int backup_index = 0;
struct btrfs_super_block *disk_super; extent_root = fs_info->extent_root =
kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
csum_root = fs_info->csum_root =
kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
chunk_root = fs_info->chunk_root =
kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
dev_root = fs_info->dev_root =
kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
if (!extent_root || !tree_root || !tree_root->fs_info || if (!extent_root || !csum_root || !chunk_root || !dev_root) {
!chunk_root || !dev_root || !csum_root) {
err = -ENOMEM; err = -ENOMEM;
goto fail; goto fail;
} }
fs_info = tree_root->fs_info;
ret = init_srcu_struct(&fs_info->subvol_srcu); ret = init_srcu_struct(&fs_info->subvol_srcu);
if (ret) { if (ret) {
...@@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->reloc_mutex);
init_completion(&fs_info->kobj_unregister); init_completion(&fs_info->kobj_unregister);
fs_info->tree_root = tree_root;
fs_info->extent_root = extent_root;
fs_info->csum_root = csum_root;
fs_info->chunk_root = chunk_root;
fs_info->dev_root = dev_root;
fs_info->fs_devices = fs_devices;
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
INIT_LIST_HEAD(&fs_info->space_info); INIT_LIST_HEAD(&fs_info->space_info);
btrfs_mapping_init(&fs_info->mapping_tree); btrfs_mapping_init(&fs_info->mapping_tree);
...@@ -2465,21 +2460,20 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -2465,21 +2460,20 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_stop_workers(&fs_info->caching_workers); btrfs_stop_workers(&fs_info->caching_workers);
fail_alloc: fail_alloc:
fail_iput: fail_iput:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
invalidate_inode_pages2(fs_info->btree_inode->i_mapping); invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
iput(fs_info->btree_inode); iput(fs_info->btree_inode);
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
fail_bdi: fail_bdi:
bdi_destroy(&fs_info->bdi); bdi_destroy(&fs_info->bdi);
fail_srcu: fail_srcu:
cleanup_srcu_struct(&fs_info->subvol_srcu); cleanup_srcu_struct(&fs_info->subvol_srcu);
fail: fail:
btrfs_close_devices(fs_info->fs_devices);
free_fs_info(fs_info); free_fs_info(fs_info);
return ERR_PTR(err); return ERR_PTR(err);
recovery_tree_root: recovery_tree_root:
if (!btrfs_test_opt(tree_root, RECOVERY)) if (!btrfs_test_opt(tree_root, RECOVERY))
goto fail_tree_roots; goto fail_tree_roots;
......
...@@ -3797,16 +3797,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root, ...@@ -3797,16 +3797,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
kfree(rsv); kfree(rsv);
} }
int btrfs_block_rsv_add(struct btrfs_root *root, static inline int __block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, struct btrfs_block_rsv *block_rsv,
u64 num_bytes) u64 num_bytes, int flush)
{ {
int ret; int ret;
if (num_bytes == 0) if (num_bytes == 0)
return 0; return 0;
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) { if (!ret) {
block_rsv_add_bytes(block_rsv, num_bytes, 1); block_rsv_add_bytes(block_rsv, num_bytes, 1);
return 0; return 0;
...@@ -3815,22 +3815,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root, ...@@ -3815,22 +3815,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
return ret; return ret;
} }
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes)
{
return __block_rsv_add(root, block_rsv, num_bytes, 1);
}
int btrfs_block_rsv_add_noflush(struct btrfs_root *root, int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, struct btrfs_block_rsv *block_rsv,
u64 num_bytes) u64 num_bytes)
{ {
int ret; return __block_rsv_add(root, block_rsv, num_bytes, 0);
if (num_bytes == 0)
return 0;
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
if (!ret) {
block_rsv_add_bytes(block_rsv, num_bytes, 1);
return 0;
}
return ret;
} }
int btrfs_block_rsv_check(struct btrfs_root *root, int btrfs_block_rsv_check(struct btrfs_root *root,
...@@ -4064,23 +4060,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, ...@@ -4064,23 +4060,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
*/ */
static unsigned drop_outstanding_extent(struct inode *inode) static unsigned drop_outstanding_extent(struct inode *inode)
{ {
unsigned drop_inode_space = 0;
unsigned dropped_extents = 0; unsigned dropped_extents = 0;
BUG_ON(!BTRFS_I(inode)->outstanding_extents); BUG_ON(!BTRFS_I(inode)->outstanding_extents);
BTRFS_I(inode)->outstanding_extents--; BTRFS_I(inode)->outstanding_extents--;
if (BTRFS_I(inode)->outstanding_extents == 0 &&
BTRFS_I(inode)->delalloc_meta_reserved) {
drop_inode_space = 1;
BTRFS_I(inode)->delalloc_meta_reserved = 0;
}
/* /*
* If we have more or the same amount of outsanding extents than we have * If we have more or the same amount of outsanding extents than we have
* reserved then we need to leave the reserved extents count alone. * reserved then we need to leave the reserved extents count alone.
*/ */
if (BTRFS_I(inode)->outstanding_extents >= if (BTRFS_I(inode)->outstanding_extents >=
BTRFS_I(inode)->reserved_extents) BTRFS_I(inode)->reserved_extents)
return 0; return drop_inode_space;
dropped_extents = BTRFS_I(inode)->reserved_extents - dropped_extents = BTRFS_I(inode)->reserved_extents -
BTRFS_I(inode)->outstanding_extents; BTRFS_I(inode)->outstanding_extents;
BTRFS_I(inode)->reserved_extents -= dropped_extents; BTRFS_I(inode)->reserved_extents -= dropped_extents;
return dropped_extents; return dropped_extents + drop_inode_space;
} }
/** /**
...@@ -4166,9 +4169,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) ...@@ -4166,9 +4169,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
nr_extents = BTRFS_I(inode)->outstanding_extents - nr_extents = BTRFS_I(inode)->outstanding_extents -
BTRFS_I(inode)->reserved_extents; BTRFS_I(inode)->reserved_extents;
BTRFS_I(inode)->reserved_extents += nr_extents; BTRFS_I(inode)->reserved_extents += nr_extents;
}
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); /*
* Add an item to reserve for updating the inode when we complete the
* delalloc io.
*/
if (!BTRFS_I(inode)->delalloc_meta_reserved) {
nr_extents++;
BTRFS_I(inode)->delalloc_meta_reserved = 1;
} }
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
spin_unlock(&BTRFS_I(inode)->lock); spin_unlock(&BTRFS_I(inode)->lock);
......
...@@ -537,6 +537,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl, ...@@ -537,6 +537,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
struct btrfs_free_space *entry, u8 *type) struct btrfs_free_space *entry, u8 *type)
{ {
struct btrfs_free_space_entry *e; struct btrfs_free_space_entry *e;
int ret;
if (!io_ctl->cur) {
ret = io_ctl_check_crc(io_ctl, io_ctl->index);
if (ret)
return ret;
}
e = io_ctl->cur; e = io_ctl->cur;
entry->offset = le64_to_cpu(e->offset); entry->offset = le64_to_cpu(e->offset);
...@@ -550,10 +557,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl, ...@@ -550,10 +557,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
io_ctl_unmap_page(io_ctl); io_ctl_unmap_page(io_ctl);
if (io_ctl->index >= io_ctl->num_pages) return 0;
return 0;
return io_ctl_check_crc(io_ctl, io_ctl->index);
} }
static int io_ctl_read_bitmap(struct io_ctl *io_ctl, static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
...@@ -561,9 +565,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl, ...@@ -561,9 +565,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
{ {
int ret; int ret;
if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
io_ctl_unmap_page(io_ctl);
ret = io_ctl_check_crc(io_ctl, io_ctl->index); ret = io_ctl_check_crc(io_ctl, io_ctl->index);
if (ret) if (ret)
return ret; return ret;
...@@ -699,6 +700,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, ...@@ -699,6 +700,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
num_entries--; num_entries--;
} }
io_ctl_unmap_page(&io_ctl);
/* /*
* We add the bitmaps at the end of the entries in order that * We add the bitmaps at the end of the entries in order that
* the bitmap entries are added to the cache. * the bitmap entries are added to the cache.
......
...@@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct btrfs_path *path; struct btrfs_path *path;
struct inode *inode; struct inode *inode;
struct btrfs_block_rsv *rsv;
u64 num_bytes;
u64 alloc_hint = 0; u64 alloc_hint = 0;
int ret; int ret;
int prealloc; int prealloc;
...@@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
rsv = trans->block_rsv;
trans->block_rsv = &root->fs_info->trans_block_rsv;
num_bytes = trans->bytes_reserved;
/*
* 1 item for inode item insertion if need
* 3 items for inode item update (in the worst case)
* 1 item for free space object
* 3 items for pre-allocation
*/
trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
trans->bytes_reserved);
if (ret)
goto out;
again: again:
inode = lookup_free_ino_inode(root, path); inode = lookup_free_ino_inode(root, path);
if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
ret = PTR_ERR(inode); ret = PTR_ERR(inode);
goto out; goto out_release;
} }
if (IS_ERR(inode)) { if (IS_ERR(inode)) {
...@@ -434,7 +451,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -434,7 +451,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
ret = create_free_ino_inode(root, trans, path); ret = create_free_ino_inode(root, trans, path);
if (ret) if (ret)
goto out; goto out_release;
goto again; goto again;
} }
...@@ -477,11 +494,14 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -477,11 +494,14 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
} }
btrfs_free_reserved_data_space(inode, prealloc); btrfs_free_reserved_data_space(inode, prealloc);
ret = btrfs_write_out_ino_cache(root, trans, path);
out_put: out_put:
iput(inode); iput(inode);
out_release:
btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
out: out:
if (ret == 0) trans->block_rsv = rsv;
ret = btrfs_write_out_ino_cache(root, trans, path); trans->bytes_reserved = num_bytes;
btrfs_free_path(path); btrfs_free_path(path);
return ret; return ret;
......
...@@ -93,6 +93,8 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -93,6 +93,8 @@ static noinline int cow_file_range(struct inode *inode,
struct page *locked_page, struct page *locked_page,
u64 start, u64 end, int *page_started, u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock); unsigned long *nr_written, int unlock);
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir, struct inode *inode, struct inode *dir,
...@@ -1741,7 +1743,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ...@@ -1741,7 +1743,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
trans = btrfs_join_transaction(root); trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans)); BUG_ON(IS_ERR(trans));
trans->block_rsv = &root->fs_info->delalloc_block_rsv; trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode_fallback(trans, root, inode);
BUG_ON(ret); BUG_ON(ret);
} }
goto out; goto out;
...@@ -1791,7 +1793,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ...@@ -1791,7 +1793,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode_fallback(trans, root, inode);
BUG_ON(ret); BUG_ON(ret);
} }
ret = 0; ret = 0;
...@@ -2199,6 +2201,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ...@@ -2199,6 +2201,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
if (ret) if (ret)
goto out; goto out;
} }
/* release the path since we're done with it */
btrfs_release_path(path);
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv) if (root->orphan_block_rsv)
...@@ -2426,7 +2431,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, ...@@ -2426,7 +2431,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
/* /*
* copy everything in the in-memory inode into the btree. * copy everything in the in-memory inode into the btree.
*/ */
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode) struct btrfs_root *root, struct inode *inode)
{ {
struct btrfs_inode_item *inode_item; struct btrfs_inode_item *inode_item;
...@@ -2434,21 +2439,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, ...@@ -2434,21 +2439,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf; struct extent_buffer *leaf;
int ret; int ret;
/*
* If the inode is a free space inode, we can deadlock during commit
* if we put it into the delayed code.
*
* The data relocation inode should also be directly updated
* without delay
*/
if (!btrfs_is_free_space_inode(root, inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
...@@ -2476,6 +2466,43 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, ...@@ -2476,6 +2466,43 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
/*
* copy everything in the in-memory inode into the btree.
*/
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
int ret;
/*
* If the inode is a free space inode, we can deadlock during commit
* if we put it into the delayed code.
*
* The data relocation inode should also be directly updated
* without delay
*/
if (!btrfs_is_free_space_inode(root, inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
return btrfs_update_inode_item(trans, root, inode);
}
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
int ret;
ret = btrfs_update_inode(trans, root, inode);
if (ret == -ENOSPC)
return btrfs_update_inode_item(trans, root, inode);
return ret;
}
/* /*
* unlink helper that gets used here in inode.c and in the tree logging * unlink helper that gets used here in inode.c and in the tree logging
* recovery code. It remove a link in a directory with a given name, and * recovery code. It remove a link in a directory with a given name, and
...@@ -5632,7 +5659,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) ...@@ -5632,7 +5659,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
ret = btrfs_ordered_update_i_size(inode, 0, ordered); ret = btrfs_ordered_update_i_size(inode, 0, ordered);
if (!ret) if (!ret)
err = btrfs_update_inode(trans, root, inode); err = btrfs_update_inode_fallback(trans, root, inode);
goto out; goto out;
} }
...@@ -5670,7 +5697,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) ...@@ -5670,7 +5697,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
ret = btrfs_ordered_update_i_size(inode, 0, ordered); ret = btrfs_ordered_update_i_size(inode, 0, ordered);
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
btrfs_update_inode(trans, root, inode); btrfs_update_inode_fallback(trans, root, inode);
ret = 0; ret = 0;
out_unlock: out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
...@@ -6529,14 +6556,16 @@ static int btrfs_truncate(struct inode *inode) ...@@ -6529,14 +6556,16 @@ static int btrfs_truncate(struct inode *inode)
ret = btrfs_orphan_del(NULL, inode); ret = btrfs_orphan_del(NULL, inode);
} }
trans->block_rsv = &root->fs_info->trans_block_rsv; if (trans) {
ret = btrfs_update_inode(trans, root, inode); trans->block_rsv = &root->fs_info->trans_block_rsv;
if (ret && !err) ret = btrfs_update_inode(trans, root, inode);
err = ret; if (ret && !err)
err = ret;
nr = trans->blocks_used; nr = trans->blocks_used;
ret = btrfs_end_transaction_throttle(trans, root); ret = btrfs_end_transaction_throttle(trans, root);
btrfs_btree_balance_dirty(root, nr); btrfs_btree_balance_dirty(root, nr);
}
out: out:
btrfs_free_block_rsv(root, rsv); btrfs_free_block_rsv(root, rsv);
...@@ -6605,6 +6634,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ...@@ -6605,6 +6634,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->orphan_meta_reserved = 0; ei->orphan_meta_reserved = 0;
ei->dummy_inode = 0; ei->dummy_inode = 0;
ei->in_defrag = 0; ei->in_defrag = 0;
ei->delalloc_meta_reserved = 0;
ei->force_compress = BTRFS_COMPRESS_NONE; ei->force_compress = BTRFS_COMPRESS_NONE;
ei->delayed_node = NULL; ei->delayed_node = NULL;
......
...@@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, ...@@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
list_add_tail(&new_edge->list[UPPER], list_add_tail(&new_edge->list[UPPER],
&new_node->lower); &new_node->lower);
} }
} else {
list_add_tail(&new_node->lower, &cache->leaves);
} }
rb_node = tree_insert(&cache->rb_root, new_node->bytenr, rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
......
...@@ -944,50 +944,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) ...@@ -944,50 +944,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
static int scrub_submit(struct scrub_dev *sdev) static int scrub_submit(struct scrub_dev *sdev)
{ {
struct scrub_bio *sbio; struct scrub_bio *sbio;
struct bio *bio;
int i;
if (sdev->curr == -1) if (sdev->curr == -1)
return 0; return 0;
sbio = sdev->bios[sdev->curr]; sbio = sdev->bios[sdev->curr];
bio = bio_alloc(GFP_NOFS, sbio->count);
if (!bio)
goto nomem;
bio->bi_private = sbio;
bio->bi_end_io = scrub_bio_end_io;
bio->bi_bdev = sdev->dev->bdev;
bio->bi_sector = sbio->physical >> 9;
for (i = 0; i < sbio->count; ++i) {
struct page *page;
int ret;
page = alloc_page(GFP_NOFS);
if (!page)
goto nomem;
ret = bio_add_page(bio, page, PAGE_SIZE, 0);
if (!ret) {
__free_page(page);
goto nomem;
}
}
sbio->err = 0; sbio->err = 0;
sdev->curr = -1; sdev->curr = -1;
atomic_inc(&sdev->in_flight); atomic_inc(&sdev->in_flight);
submit_bio(READ, bio); submit_bio(READ, sbio->bio);
return 0; return 0;
nomem:
scrub_free_bio(bio);
return -ENOMEM;
} }
static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
...@@ -995,6 +963,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, ...@@ -995,6 +963,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
u8 *csum, int force) u8 *csum, int force)
{ {
struct scrub_bio *sbio; struct scrub_bio *sbio;
struct page *page;
int ret;
again: again:
/* /*
...@@ -1015,12 +985,22 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, ...@@ -1015,12 +985,22 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
} }
sbio = sdev->bios[sdev->curr]; sbio = sdev->bios[sdev->curr];
if (sbio->count == 0) { if (sbio->count == 0) {
struct bio *bio;
sbio->physical = physical; sbio->physical = physical;
sbio->logical = logical; sbio->logical = logical;
bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
if (!bio)
return -ENOMEM;
bio->bi_private = sbio;
bio->bi_end_io = scrub_bio_end_io;
bio->bi_bdev = sdev->dev->bdev;
bio->bi_sector = sbio->physical >> 9;
sbio->err = 0;
sbio->bio = bio;
} else if (sbio->physical + sbio->count * PAGE_SIZE != physical || } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
sbio->logical + sbio->count * PAGE_SIZE != logical) { sbio->logical + sbio->count * PAGE_SIZE != logical) {
int ret;
ret = scrub_submit(sdev); ret = scrub_submit(sdev);
if (ret) if (ret)
return ret; return ret;
...@@ -1030,6 +1010,20 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, ...@@ -1030,6 +1010,20 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
sbio->spag[sbio->count].generation = gen; sbio->spag[sbio->count].generation = gen;
sbio->spag[sbio->count].have_csum = 0; sbio->spag[sbio->count].have_csum = 0;
sbio->spag[sbio->count].mirror_num = mirror_num; sbio->spag[sbio->count].mirror_num = mirror_num;
page = alloc_page(GFP_NOFS);
if (!page)
return -ENOMEM;
ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
if (!ret) {
__free_page(page);
ret = scrub_submit(sdev);
if (ret)
return ret;
goto again;
}
if (csum) { if (csum) {
sbio->spag[sbio->count].have_csum = 1; sbio->spag[sbio->count].have_csum = 1;
memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
......
...@@ -197,7 +197,7 @@ static match_table_t tokens = { ...@@ -197,7 +197,7 @@ static match_table_t tokens = {
{Opt_subvolrootid, "subvolrootid=%d"}, {Opt_subvolrootid, "subvolrootid=%d"},
{Opt_defrag, "autodefrag"}, {Opt_defrag, "autodefrag"},
{Opt_inode_cache, "inode_cache"}, {Opt_inode_cache, "inode_cache"},
{Opt_no_space_cache, "no_space_cache"}, {Opt_no_space_cache, "nospace_cache"},
{Opt_recovery, "recovery"}, {Opt_recovery, "recovery"},
{Opt_err, NULL}, {Opt_err, NULL},
}; };
...@@ -448,6 +448,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, ...@@ -448,6 +448,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
token = match_token(p, tokens, args); token = match_token(p, tokens, args);
switch (token) { switch (token) {
case Opt_subvol: case Opt_subvol:
kfree(*subvol_name);
*subvol_name = match_strdup(&args[0]); *subvol_name = match_strdup(&args[0]);
break; break;
case Opt_subvolid: case Opt_subvolid:
...@@ -710,7 +711,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) ...@@ -710,7 +711,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (btrfs_test_opt(root, SPACE_CACHE)) if (btrfs_test_opt(root, SPACE_CACHE))
seq_puts(seq, ",space_cache"); seq_puts(seq, ",space_cache");
else else
seq_puts(seq, ",no_space_cache"); seq_puts(seq, ",nospace_cache");
if (btrfs_test_opt(root, CLEAR_CACHE)) if (btrfs_test_opt(root, CLEAR_CACHE))
seq_puts(seq, ",clear_cache"); seq_puts(seq, ",clear_cache");
if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
...@@ -890,7 +891,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, ...@@ -890,7 +891,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
struct super_block *s; struct super_block *s;
struct dentry *root; struct dentry *root;
struct btrfs_fs_devices *fs_devices = NULL; struct btrfs_fs_devices *fs_devices = NULL;
struct btrfs_root *tree_root = NULL;
struct btrfs_fs_info *fs_info = NULL; struct btrfs_fs_info *fs_info = NULL;
fmode_t mode = FMODE_READ; fmode_t mode = FMODE_READ;
char *subvol_name = NULL; char *subvol_name = NULL;
...@@ -904,8 +904,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, ...@@ -904,8 +904,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
error = btrfs_parse_early_options(data, mode, fs_type, error = btrfs_parse_early_options(data, mode, fs_type,
&subvol_name, &subvol_objectid, &subvol_name, &subvol_objectid,
&subvol_rootid, &fs_devices); &subvol_rootid, &fs_devices);
if (error) if (error) {
kfree(subvol_name);
return ERR_PTR(error); return ERR_PTR(error);
}
if (subvol_name) { if (subvol_name) {
root = mount_subvol(subvol_name, flags, device_name, data); root = mount_subvol(subvol_name, flags, device_name, data);
...@@ -917,15 +919,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, ...@@ -917,15 +919,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (error) if (error)
return ERR_PTR(error); return ERR_PTR(error);
error = btrfs_open_devices(fs_devices, mode, fs_type);
if (error)
return ERR_PTR(error);
if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
error = -EACCES;
goto error_close_devices;
}
/* /*
* Setup a dummy root and fs_info for test/set super. This is because * Setup a dummy root and fs_info for test/set super. This is because
* we don't actually fill this stuff out until open_ctree, but we need * we don't actually fill this stuff out until open_ctree, but we need
...@@ -933,24 +926,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, ...@@ -933,24 +926,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
* then open_ctree will properly initialize everything later. * then open_ctree will properly initialize everything later.
*/ */
fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); if (!fs_info)
if (!fs_info || !tree_root) { return ERR_PTR(-ENOMEM);
fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
if (!fs_info->tree_root) {
error = -ENOMEM; error = -ENOMEM;
goto error_close_devices; goto error_fs_info;
} }
fs_info->tree_root = tree_root; fs_info->tree_root->fs_info = fs_info;
fs_info->fs_devices = fs_devices; fs_info->fs_devices = fs_devices;
tree_root->fs_info = fs_info;
fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
if (!fs_info->super_copy || !fs_info->super_for_commit) { if (!fs_info->super_copy || !fs_info->super_for_commit) {
error = -ENOMEM; error = -ENOMEM;
goto error_fs_info;
}
error = btrfs_open_devices(fs_devices, mode, fs_type);
if (error)
goto error_fs_info;
if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
error = -EACCES;
goto error_close_devices; goto error_close_devices;
} }
bdev = fs_devices->latest_bdev; bdev = fs_devices->latest_bdev;
s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root); s = sget(fs_type, btrfs_test_super, btrfs_set_super,
fs_info->tree_root);
if (IS_ERR(s)) { if (IS_ERR(s)) {
error = PTR_ERR(s); error = PTR_ERR(s);
goto error_close_devices; goto error_close_devices;
...@@ -959,12 +964,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, ...@@ -959,12 +964,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (s->s_root) { if (s->s_root) {
if ((flags ^ s->s_flags) & MS_RDONLY) { if ((flags ^ s->s_flags) & MS_RDONLY) {
deactivate_locked_super(s); deactivate_locked_super(s);
return ERR_PTR(-EBUSY); error = -EBUSY;
goto error_close_devices;
} }
btrfs_close_devices(fs_devices); btrfs_close_devices(fs_devices);
free_fs_info(fs_info); free_fs_info(fs_info);
kfree(tree_root);
} else { } else {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
...@@ -991,8 +996,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, ...@@ -991,8 +996,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
error_close_devices: error_close_devices:
btrfs_close_devices(fs_devices); btrfs_close_devices(fs_devices);
error_fs_info:
free_fs_info(fs_info); free_fs_info(fs_info);
kfree(tree_root);
return ERR_PTR(error); return ERR_PTR(error);
} }
......
...@@ -882,8 +882,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ...@@ -882,8 +882,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
if (to_reserve > 0) { if (to_reserve > 0) {
ret = btrfs_block_rsv_add(root, &pending->block_rsv, ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
to_reserve); to_reserve);
if (ret) { if (ret) {
pending->error = ret; pending->error = ret;
goto fail; goto fail;
......
...@@ -999,7 +999,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, ...@@ -999,7 +999,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
key.objectid = device->devid; key.objectid = device->devid;
key.offset = start; key.offset = start;
key.type = BTRFS_DEV_EXTENT_KEY; key.type = BTRFS_DEV_EXTENT_KEY;
again:
ret = btrfs_search_slot(trans, root, &key, path, -1, 1); ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) { if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid, ret = btrfs_previous_item(root, path, key.objectid,
...@@ -1012,6 +1012,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, ...@@ -1012,6 +1012,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_dev_extent); struct btrfs_dev_extent);
BUG_ON(found_key.offset > start || found_key.offset + BUG_ON(found_key.offset > start || found_key.offset +
btrfs_dev_extent_length(leaf, extent) < start); btrfs_dev_extent_length(leaf, extent) < start);
key = found_key;
btrfs_release_path(path);
goto again;
} else if (ret == 0) { } else if (ret == 0) {
leaf = path->nodes[0]; leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0], extent = btrfs_item_ptr(leaf, path->slots[0],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册