提交 2101ae42 编写于 作者: L Linus Torvalds

Merge branch 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull more btrfs updates from Chris Mason:
 "These are mostly fixes that we've been testing, but also we grabbed
  and tested a few small cleanups that had been on the list for a while.

  Zhao Lei's patchset also fixes some early ENOSPC buglets"

* 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (21 commits)
  btrfs: raid56: Use raid_write_end_io for scrub
  btrfs: Remove unnecessary ClearPageUptodate for raid56
  btrfs: use rbio->nr_pages to reduce calculation
  btrfs: Use unified stripe_page's index calculation
  btrfs: Fix calculation of rbio->dbitmap's size calculation
  btrfs: Fix no_space in write and rm loop
  btrfs: merge functions for wait snapshot creation
  btrfs: delete unused argument in btrfs_copy_from_user
  btrfs: Use direct way to determine raid56 write/recover mode
  btrfs: Small cleanup for get index_srcdev loop
  btrfs: Enhance chunk validation check
  btrfs: Enhance super validation check
  Btrfs: fix deadlock running delayed iputs at transaction commit time
  Btrfs: fix typo in log message when starting a balance
  btrfs: remove duplicate const specifier
  btrfs: initialize the seq counter in struct btrfs_device
  Btrfs: clean up an error code in btrfs_init_space_info()
  btrfs: fix iterator with update error in backref.c
  Btrfs: fix output of compression message in btrfs_parse_options()
  Btrfs: Initialize btrfs_root->highest_objectid when loading tree root and subvolume roots
  ...
...@@ -560,13 +560,13 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info, ...@@ -560,13 +560,13 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
*/ */
static void __merge_refs(struct list_head *head, int mode) static void __merge_refs(struct list_head *head, int mode)
{ {
struct __prelim_ref *ref1; struct __prelim_ref *pos1;
list_for_each_entry(ref1, head, list) { list_for_each_entry(pos1, head, list) {
struct __prelim_ref *ref2 = ref1, *tmp; struct __prelim_ref *pos2 = pos1, *tmp;
list_for_each_entry_safe_continue(ref2, tmp, head, list) { list_for_each_entry_safe_continue(pos2, tmp, head, list) {
struct __prelim_ref *xchg; struct __prelim_ref *xchg, *ref1 = pos1, *ref2 = pos2;
struct extent_inode_elem *eie; struct extent_inode_elem *eie;
if (!ref_for_same_block(ref1, ref2)) if (!ref_for_same_block(ref1, ref2))
......
...@@ -1614,7 +1614,7 @@ struct btrfs_fs_info { ...@@ -1614,7 +1614,7 @@ struct btrfs_fs_info {
spinlock_t delayed_iput_lock; spinlock_t delayed_iput_lock;
struct list_head delayed_iputs; struct list_head delayed_iputs;
struct rw_semaphore delayed_iput_sem; struct mutex cleaner_delayed_iput_mutex;
/* this protects tree_mod_seq_list */ /* this protects tree_mod_seq_list */
spinlock_t tree_mod_seq_lock; spinlock_t tree_mod_seq_lock;
...@@ -3641,6 +3641,7 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, ...@@ -3641,6 +3641,7 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
int __get_raid_index(u64 flags); int __get_raid_index(u64 flags);
int btrfs_start_write_no_snapshoting(struct btrfs_root *root); int btrfs_start_write_no_snapshoting(struct btrfs_root *root);
void btrfs_end_write_no_snapshoting(struct btrfs_root *root); void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
void check_system_chunk(struct btrfs_trans_handle *trans, void check_system_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
const u64 type); const u64 type);
......
...@@ -614,7 +614,7 @@ static void btrfs_dev_replace_update_device_in_mapping_tree( ...@@ -614,7 +614,7 @@ static void btrfs_dev_replace_update_device_in_mapping_tree(
em = lookup_extent_mapping(em_tree, start, (u64)-1); em = lookup_extent_mapping(em_tree, start, (u64)-1);
if (!em) if (!em)
break; break;
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) for (i = 0; i < map->num_stripes; i++)
if (srcdev == map->stripes[i].dev) if (srcdev == map->stripes[i].dev)
map->stripes[i].dev = tgtdev; map->stripes[i].dev = tgtdev;
......
...@@ -55,6 +55,12 @@ ...@@ -55,6 +55,12 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#endif #endif
#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
BTRFS_HEADER_FLAG_RELOC |\
BTRFS_SUPER_FLAG_ERROR |\
BTRFS_SUPER_FLAG_SEEDING |\
BTRFS_SUPER_FLAG_METADUMP)
static const struct extent_io_ops btree_extent_io_ops; static const struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work); static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root); static void free_fs_root(struct btrfs_root *root);
...@@ -1583,8 +1589,23 @@ int btrfs_init_fs_root(struct btrfs_root *root) ...@@ -1583,8 +1589,23 @@ int btrfs_init_fs_root(struct btrfs_root *root)
ret = get_anon_bdev(&root->anon_dev); ret = get_anon_bdev(&root->anon_dev);
if (ret) if (ret)
goto free_writers; goto free_writers;
mutex_lock(&root->objectid_mutex);
ret = btrfs_find_highest_objectid(root,
&root->highest_objectid);
if (ret) {
mutex_unlock(&root->objectid_mutex);
goto free_root_dev;
}
ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
mutex_unlock(&root->objectid_mutex);
return 0; return 0;
free_root_dev:
free_anon_bdev(root->anon_dev);
free_writers: free_writers:
btrfs_free_subvolume_writers(root->subv_writers); btrfs_free_subvolume_writers(root->subv_writers);
fail: fail:
...@@ -1786,7 +1807,10 @@ static int cleaner_kthread(void *arg) ...@@ -1786,7 +1807,10 @@ static int cleaner_kthread(void *arg)
goto sleep; goto sleep;
} }
mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
btrfs_run_delayed_iputs(root); btrfs_run_delayed_iputs(root);
mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
again = btrfs_clean_one_deleted_snapshot(root); again = btrfs_clean_one_deleted_snapshot(root);
mutex_unlock(&root->fs_info->cleaner_mutex); mutex_unlock(&root->fs_info->cleaner_mutex);
...@@ -2556,8 +2580,8 @@ int open_ctree(struct super_block *sb, ...@@ -2556,8 +2580,8 @@ int open_ctree(struct super_block *sb,
mutex_init(&fs_info->delete_unused_bgs_mutex); mutex_init(&fs_info->delete_unused_bgs_mutex);
mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex); mutex_init(&fs_info->delalloc_root_mutex);
mutex_init(&fs_info->cleaner_delayed_iput_mutex);
seqlock_init(&fs_info->profiles_lock); seqlock_init(&fs_info->profiles_lock);
init_rwsem(&fs_info->delayed_iput_sem);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
INIT_LIST_HEAD(&fs_info->space_info); INIT_LIST_HEAD(&fs_info->space_info);
...@@ -2742,26 +2766,6 @@ int open_ctree(struct super_block *sb, ...@@ -2742,26 +2766,6 @@ int open_ctree(struct super_block *sb,
goto fail_alloc; goto fail_alloc;
} }
/*
* Leafsize and nodesize were always equal, this is only a sanity check.
*/
if (le32_to_cpu(disk_super->__unused_leafsize) !=
btrfs_super_nodesize(disk_super)) {
printk(KERN_ERR "BTRFS: couldn't mount because metadata "
"blocksizes don't match. node %d leaf %d\n",
btrfs_super_nodesize(disk_super),
le32_to_cpu(disk_super->__unused_leafsize));
err = -EINVAL;
goto fail_alloc;
}
if (btrfs_super_nodesize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
printk(KERN_ERR "BTRFS: couldn't mount because metadata "
"blocksize (%d) was too large\n",
btrfs_super_nodesize(disk_super));
err = -EINVAL;
goto fail_alloc;
}
features = btrfs_super_incompat_flags(disk_super); features = btrfs_super_incompat_flags(disk_super);
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
...@@ -2833,17 +2837,6 @@ int open_ctree(struct super_block *sb, ...@@ -2833,17 +2837,6 @@ int open_ctree(struct super_block *sb,
sb->s_blocksize = sectorsize; sb->s_blocksize = sectorsize;
sb->s_blocksize_bits = blksize_bits(sectorsize); sb->s_blocksize_bits = blksize_bits(sectorsize);
if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
printk(KERN_ERR "BTRFS: valid FS not found on %s\n", sb->s_id);
goto fail_sb_buffer;
}
if (sectorsize != PAGE_SIZE) {
printk(KERN_ERR "BTRFS: incompatible sector size (%lu) "
"found on %s\n", (unsigned long)sectorsize, sb->s_id);
goto fail_sb_buffer;
}
mutex_lock(&fs_info->chunk_mutex); mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_read_sys_array(tree_root); ret = btrfs_read_sys_array(tree_root);
mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
...@@ -2915,6 +2908,18 @@ int open_ctree(struct super_block *sb, ...@@ -2915,6 +2908,18 @@ int open_ctree(struct super_block *sb,
tree_root->commit_root = btrfs_root_node(tree_root); tree_root->commit_root = btrfs_root_node(tree_root);
btrfs_set_root_refs(&tree_root->root_item, 1); btrfs_set_root_refs(&tree_root->root_item, 1);
mutex_lock(&tree_root->objectid_mutex);
ret = btrfs_find_highest_objectid(tree_root,
&tree_root->highest_objectid);
if (ret) {
mutex_unlock(&tree_root->objectid_mutex);
goto recovery_tree_root;
}
ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
mutex_unlock(&tree_root->objectid_mutex);
ret = btrfs_read_roots(fs_info, tree_root); ret = btrfs_read_roots(fs_info, tree_root);
if (ret) if (ret)
goto recovery_tree_root; goto recovery_tree_root;
...@@ -4018,8 +4023,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, ...@@ -4018,8 +4023,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only) int read_only)
{ {
struct btrfs_super_block *sb = fs_info->super_copy; struct btrfs_super_block *sb = fs_info->super_copy;
u64 nodesize = btrfs_super_nodesize(sb);
u64 sectorsize = btrfs_super_sectorsize(sb);
int ret = 0; int ret = 0;
if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
printk(KERN_ERR "BTRFS: no valid FS found\n");
ret = -EINVAL;
}
if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
printk(KERN_WARNING "BTRFS: unrecognized super flag: %llu\n",
btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n", printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
...@@ -4037,31 +4051,46 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, ...@@ -4037,31 +4051,46 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
} }
/* /*
* The common minimum, we don't know if we can trust the nodesize/sectorsize * Check sectorsize and nodesize first, other check will need it.
* items yet, they'll be verified later. Issue just a warning. * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
*/ */
if (!IS_ALIGNED(btrfs_super_root(sb), 4096)) if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
printk(KERN_ERR "BTRFS: invalid sectorsize %llu\n", sectorsize);
ret = -EINVAL;
}
/* Only PAGE SIZE is supported yet */
if (sectorsize != PAGE_CACHE_SIZE) {
printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
sectorsize, PAGE_CACHE_SIZE);
ret = -EINVAL;
}
if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
printk(KERN_ERR "BTRFS: invalid nodesize %llu\n", nodesize);
ret = -EINVAL;
}
if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
printk(KERN_ERR "BTRFS: invalid leafsize %u, should be %llu\n",
le32_to_cpu(sb->__unused_leafsize),
nodesize);
ret = -EINVAL;
}
/* Root alignment check */
if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
btrfs_super_root(sb)); btrfs_super_root(sb));
if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096)) ret = -EINVAL;
}
if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n", printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
btrfs_super_chunk_root(sb)); btrfs_super_chunk_root(sb));
if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096))
printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
btrfs_super_log_root(sb));
/*
* Check the lower bound, the alignment and other constraints are
* checked later.
*/
if (btrfs_super_nodesize(sb) < 4096) {
printk(KERN_ERR "BTRFS: nodesize too small: %u < 4096\n",
btrfs_super_nodesize(sb));
ret = -EINVAL; ret = -EINVAL;
} }
if (btrfs_super_sectorsize(sb) < 4096) { if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
printk(KERN_ERR "BTRFS: sectorsize too small: %u < 4096\n", printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
btrfs_super_sectorsize(sb)); btrfs_super_log_root(sb));
ret = -EINVAL; ret = -EINVAL;
} }
......
...@@ -4139,8 +4139,10 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes) ...@@ -4139,8 +4139,10 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
!atomic_read(&root->fs_info->open_ioctl_trans)) { !atomic_read(&root->fs_info->open_ioctl_trans)) {
need_commit--; need_commit--;
if (need_commit > 0) if (need_commit > 0) {
btrfs_start_delalloc_roots(fs_info, 0, -1);
btrfs_wait_ordered_roots(fs_info, -1); btrfs_wait_ordered_roots(fs_info, -1);
}
trans = btrfs_join_transaction(root); trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) if (IS_ERR(trans))
...@@ -4153,11 +4155,12 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes) ...@@ -4153,11 +4155,12 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
if (ret) if (ret)
return ret; return ret;
/* /*
* make sure that all running delayed iput are * The cleaner kthread might still be doing iput
* done * operations. Wait for it to finish so that
* more space is released.
*/ */
down_write(&root->fs_info->delayed_iput_sem); mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
up_write(&root->fs_info->delayed_iput_sem); mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
goto again; goto again;
} else { } else {
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
...@@ -10399,7 +10402,7 @@ btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info, ...@@ -10399,7 +10402,7 @@ btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
* more device items and remove one chunk item), but this is done at * more device items and remove one chunk item), but this is done at
* btrfs_remove_chunk() through a call to check_system_chunk(). * btrfs_remove_chunk() through a call to check_system_chunk().
*/ */
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
num_items = 3 + map->num_stripes; num_items = 3 + map->num_stripes;
free_extent_map(em); free_extent_map(em);
...@@ -10586,7 +10589,7 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info) ...@@ -10586,7 +10589,7 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
disk_super = fs_info->super_copy; disk_super = fs_info->super_copy;
if (!btrfs_super_root(disk_super)) if (!btrfs_super_root(disk_super))
return 1; return -EINVAL;
features = btrfs_super_incompat_flags(disk_super); features = btrfs_super_incompat_flags(disk_super);
if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
...@@ -10816,3 +10819,23 @@ int btrfs_start_write_no_snapshoting(struct btrfs_root *root) ...@@ -10816,3 +10819,23 @@ int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
} }
return 1; return 1;
} }
static int wait_snapshoting_atomic_t(atomic_t *a)
{
schedule();
return 0;
}
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
{
while (true) {
int ret;
ret = btrfs_start_write_no_snapshoting(root);
if (ret)
break;
wait_on_atomic_t(&root->will_be_snapshoted,
wait_snapshoting_atomic_t,
TASK_UNINTERRUPTIBLE);
}
}
...@@ -76,7 +76,7 @@ void free_extent_map(struct extent_map *em) ...@@ -76,7 +76,7 @@ void free_extent_map(struct extent_map *em)
WARN_ON(extent_map_in_tree(em)); WARN_ON(extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list)); WARN_ON(!list_empty(&em->list));
if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
kfree(em->bdev); kfree(em->map_lookup);
kmem_cache_free(extent_map_cache, em); kmem_cache_free(extent_map_cache, em);
} }
} }
......
...@@ -32,7 +32,15 @@ struct extent_map { ...@@ -32,7 +32,15 @@ struct extent_map {
u64 block_len; u64 block_len;
u64 generation; u64 generation;
unsigned long flags; unsigned long flags;
struct block_device *bdev; union {
struct block_device *bdev;
/*
* used for chunk mappings
* flags & EXTENT_FLAG_FS_MAPPING must be set
*/
struct map_lookup *map_lookup;
};
atomic_t refs; atomic_t refs;
unsigned int compress_type; unsigned int compress_type;
struct list_head list; struct list_head list;
......
...@@ -406,8 +406,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) ...@@ -406,8 +406,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
/* simple helper to fault in pages and copy. This should go away /* simple helper to fault in pages and copy. This should go away
* and be replaced with calls into generic code. * and be replaced with calls into generic code.
*/ */
static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
size_t write_bytes,
struct page **prepared_pages, struct page **prepared_pages,
struct iov_iter *i) struct iov_iter *i)
{ {
...@@ -1588,8 +1587,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1588,8 +1587,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
ret = 0; ret = 0;
} }
copied = btrfs_copy_from_user(pos, num_pages, copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
write_bytes, pages, i);
/* /*
* if we have trouble faulting in the pages, fall * if we have trouble faulting in the pages, fall
......
...@@ -515,7 +515,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -515,7 +515,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
return ret; return ret;
} }
static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
{ {
struct btrfs_path *path; struct btrfs_path *path;
int ret; int ret;
...@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) ...@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
int ret; int ret;
mutex_lock(&root->objectid_mutex); mutex_lock(&root->objectid_mutex);
if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_find_highest_objectid(root,
&root->highest_objectid);
if (ret)
goto out;
}
if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) { if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
ret = -ENOSPC; ret = -ENOSPC;
goto out; goto out;
......
...@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root, ...@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans); struct btrfs_trans_handle *trans);
int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
#endif #endif
...@@ -3134,7 +3134,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root) ...@@ -3134,7 +3134,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_fs_info *fs_info = root->fs_info;
down_read(&fs_info->delayed_iput_sem);
spin_lock(&fs_info->delayed_iput_lock); spin_lock(&fs_info->delayed_iput_lock);
while (!list_empty(&fs_info->delayed_iputs)) { while (!list_empty(&fs_info->delayed_iputs)) {
struct btrfs_inode *inode; struct btrfs_inode *inode;
...@@ -3153,7 +3152,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root) ...@@ -3153,7 +3152,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
spin_lock(&fs_info->delayed_iput_lock); spin_lock(&fs_info->delayed_iput_lock);
} }
spin_unlock(&fs_info->delayed_iput_lock); spin_unlock(&fs_info->delayed_iput_lock);
up_read(&root->fs_info->delayed_iput_sem);
} }
/* /*
...@@ -4874,26 +4872,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) ...@@ -4874,26 +4872,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
return err; return err;
} }
static int wait_snapshoting_atomic_t(atomic_t *a)
{
schedule();
return 0;
}
static void wait_for_snapshot_creation(struct btrfs_root *root)
{
while (true) {
int ret;
ret = btrfs_start_write_no_snapshoting(root);
if (ret)
break;
wait_on_atomic_t(&root->will_be_snapshoted,
wait_snapshoting_atomic_t,
TASK_UNINTERRUPTIBLE);
}
}
static int btrfs_setsize(struct inode *inode, struct iattr *attr) static int btrfs_setsize(struct inode *inode, struct iattr *attr)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
...@@ -4925,7 +4903,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) ...@@ -4925,7 +4903,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* truncation, it must capture all writes that happened before * truncation, it must capture all writes that happened before
* this truncation. * this truncation.
*/ */
wait_for_snapshot_creation(root); btrfs_wait_for_snapshot_creation(root);
ret = btrfs_cont_expand(inode, oldsize, newsize); ret = btrfs_cont_expand(inode, oldsize, newsize);
if (ret) { if (ret) {
btrfs_end_write_no_snapshoting(root); btrfs_end_write_no_snapshoting(root);
......
...@@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir, ...@@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir,
goto fail; goto fail;
} }
mutex_lock(&new_root->objectid_mutex);
new_root->highest_objectid = new_dirid;
mutex_unlock(&new_root->objectid_mutex);
/* /*
* insert the directory item * insert the directory item
*/ */
......
...@@ -609,13 +609,28 @@ static int rbio_can_merge(struct btrfs_raid_bio *last, ...@@ -609,13 +609,28 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
return 1; return 1;
} }
static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
int index)
{
return stripe * rbio->stripe_npages + index;
}
/*
* these are just the pages from the rbio array, not from anything
* the FS sent down to us
*/
static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
int index)
{
return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
}
/* /*
* helper to index into the pstripe * helper to index into the pstripe
*/ */
static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
{ {
index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; return rbio_stripe_page(rbio, rbio->nr_data, index);
return rbio->stripe_pages[index];
} }
/* /*
...@@ -626,10 +641,7 @@ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) ...@@ -626,10 +641,7 @@ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
{ {
if (rbio->nr_data + 1 == rbio->real_stripes) if (rbio->nr_data + 1 == rbio->real_stripes)
return NULL; return NULL;
return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
PAGE_CACHE_SHIFT;
return rbio->stripe_pages[index];
} }
/* /*
...@@ -889,6 +901,7 @@ static void raid_write_end_io(struct bio *bio) ...@@ -889,6 +901,7 @@ static void raid_write_end_io(struct bio *bio)
{ {
struct btrfs_raid_bio *rbio = bio->bi_private; struct btrfs_raid_bio *rbio = bio->bi_private;
int err = bio->bi_error; int err = bio->bi_error;
int max_errors;
if (err) if (err)
fail_bio_stripe(rbio, bio); fail_bio_stripe(rbio, bio);
...@@ -901,7 +914,9 @@ static void raid_write_end_io(struct bio *bio) ...@@ -901,7 +914,9 @@ static void raid_write_end_io(struct bio *bio)
err = 0; err = 0;
/* OK, we have read all the stripes we need to. */ /* OK, we have read all the stripes we need to. */
if (atomic_read(&rbio->error) > rbio->bbio->max_errors) max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
0 : rbio->bbio->max_errors;
if (atomic_read(&rbio->error) > max_errors)
err = -EIO; err = -EIO;
rbio_orig_end_io(rbio, err); rbio_orig_end_io(rbio, err);
...@@ -947,8 +962,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, ...@@ -947,8 +962,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
*/ */
static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
{ {
unsigned long nr = stripe_len * nr_stripes; return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes;
return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE);
} }
/* /*
...@@ -966,8 +980,8 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, ...@@ -966,8 +980,8 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
void *p; void *p;
rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8), DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
GFP_NOFS); sizeof(long), GFP_NOFS);
if (!rbio) if (!rbio)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1021,18 +1035,17 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) ...@@ -1021,18 +1035,17 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
rbio->stripe_pages[i] = page; rbio->stripe_pages[i] = page;
ClearPageUptodate(page);
} }
return 0; return 0;
} }
/* allocate pages for just the p/q stripes */ /* only allocate pages for p/q stripes */
static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
{ {
int i; int i;
struct page *page; struct page *page;
i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
for (; i < rbio->nr_pages; i++) { for (; i < rbio->nr_pages; i++) {
if (rbio->stripe_pages[i]) if (rbio->stripe_pages[i])
...@@ -1120,18 +1133,6 @@ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) ...@@ -1120,18 +1133,6 @@ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
} }
} }
/*
* these are just the pages from the rbio array, not from anything
* the FS sent down to us
*/
static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
{
int index;
index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
index += page;
return rbio->stripe_pages[index];
}
/* /*
* helper function to walk our bio list and populate the bio_pages array with * helper function to walk our bio list and populate the bio_pages array with
* the result. This seems expensive, but it is faster than constantly * the result. This seems expensive, but it is faster than constantly
...@@ -1175,7 +1176,6 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1175,7 +1176,6 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
{ {
struct btrfs_bio *bbio = rbio->bbio; struct btrfs_bio *bbio = rbio->bbio;
void *pointers[rbio->real_stripes]; void *pointers[rbio->real_stripes];
int stripe_len = rbio->stripe_len;
int nr_data = rbio->nr_data; int nr_data = rbio->nr_data;
int stripe; int stripe;
int pagenr; int pagenr;
...@@ -1183,7 +1183,6 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1183,7 +1183,6 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
int q_stripe = -1; int q_stripe = -1;
struct bio_list bio_list; struct bio_list bio_list;
struct bio *bio; struct bio *bio;
int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
int ret; int ret;
bio_list_init(&bio_list); bio_list_init(&bio_list);
...@@ -1226,7 +1225,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1226,7 +1225,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
else else
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *p; struct page *p;
/* first collect one page from each data stripe */ /* first collect one page from each data stripe */
for (stripe = 0; stripe < nr_data; stripe++) { for (stripe = 0; stripe < nr_data; stripe++) {
...@@ -1268,7 +1267,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1268,7 +1267,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
* everything else. * everything else.
*/ */
for (stripe = 0; stripe < rbio->real_stripes; stripe++) { for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *page; struct page *page;
if (stripe < rbio->nr_data) { if (stripe < rbio->nr_data) {
page = page_in_rbio(rbio, stripe, pagenr, 1); page = page_in_rbio(rbio, stripe, pagenr, 1);
...@@ -1292,7 +1291,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1292,7 +1291,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
if (!bbio->tgtdev_map[stripe]) if (!bbio->tgtdev_map[stripe])
continue; continue;
for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *page; struct page *page;
if (stripe < rbio->nr_data) { if (stripe < rbio->nr_data) {
page = page_in_rbio(rbio, stripe, pagenr, 1); page = page_in_rbio(rbio, stripe, pagenr, 1);
...@@ -1506,7 +1505,6 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) ...@@ -1506,7 +1505,6 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
int bios_to_read = 0; int bios_to_read = 0;
struct bio_list bio_list; struct bio_list bio_list;
int ret; int ret;
int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
int pagenr; int pagenr;
int stripe; int stripe;
struct bio *bio; struct bio *bio;
...@@ -1525,7 +1523,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) ...@@ -1525,7 +1523,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
* stripe * stripe
*/ */
for (stripe = 0; stripe < rbio->nr_data; stripe++) { for (stripe = 0; stripe < rbio->nr_data; stripe++) {
for (pagenr = 0; pagenr < nr_pages; pagenr++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *page; struct page *page;
/* /*
* we want to find all the pages missing from * we want to find all the pages missing from
...@@ -1801,7 +1799,6 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1801,7 +1799,6 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
int pagenr, stripe; int pagenr, stripe;
void **pointers; void **pointers;
int faila = -1, failb = -1; int faila = -1, failb = -1;
int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
struct page *page; struct page *page;
int err; int err;
int i; int i;
...@@ -1824,7 +1821,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1824,7 +1821,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
index_rbio_pages(rbio); index_rbio_pages(rbio);
for (pagenr = 0; pagenr < nr_pages; pagenr++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
/* /*
* Now we just use bitmap to mark the horizontal stripes in * Now we just use bitmap to mark the horizontal stripes in
* which we have data when doing parity scrub. * which we have data when doing parity scrub.
...@@ -1935,7 +1932,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1935,7 +1932,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
* other endio functions will fiddle the uptodate bits * other endio functions will fiddle the uptodate bits
*/ */
if (rbio->operation == BTRFS_RBIO_WRITE) { if (rbio->operation == BTRFS_RBIO_WRITE) {
for (i = 0; i < nr_pages; i++) { for (i = 0; i < rbio->stripe_npages; i++) {
if (faila != -1) { if (faila != -1) {
page = rbio_stripe_page(rbio, faila, i); page = rbio_stripe_page(rbio, faila, i);
SetPageUptodate(page); SetPageUptodate(page);
...@@ -2031,7 +2028,6 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) ...@@ -2031,7 +2028,6 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
int bios_to_read = 0; int bios_to_read = 0;
struct bio_list bio_list; struct bio_list bio_list;
int ret; int ret;
int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
int pagenr; int pagenr;
int stripe; int stripe;
struct bio *bio; struct bio *bio;
...@@ -2055,7 +2051,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) ...@@ -2055,7 +2051,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
continue; continue;
} }
for (pagenr = 0; pagenr < nr_pages; pagenr++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *p; struct page *p;
/* /*
...@@ -2279,37 +2275,11 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) ...@@ -2279,37 +2275,11 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
rbio->stripe_pages[index] = page; rbio->stripe_pages[index] = page;
ClearPageUptodate(page);
} }
} }
return 0; return 0;
} }
/*
* end io function used by finish_rmw. When we finally
* get here, we've written a full stripe
*/
static void raid_write_parity_end_io(struct bio *bio)
{
struct btrfs_raid_bio *rbio = bio->bi_private;
int err = bio->bi_error;
if (bio->bi_error)
fail_bio_stripe(rbio, bio);
bio_put(bio);
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
err = 0;
if (atomic_read(&rbio->error))
err = -EIO;
rbio_orig_end_io(rbio, err);
}
static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
int need_check) int need_check)
{ {
...@@ -2462,7 +2432,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, ...@@ -2462,7 +2432,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
break; break;
bio->bi_private = rbio; bio->bi_private = rbio;
bio->bi_end_io = raid_write_parity_end_io; bio->bi_end_io = raid_write_end_io;
submit_bio(WRITE, bio); submit_bio(WRITE, bio);
} }
return; return;
......
...@@ -2813,7 +2813,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) ...@@ -2813,7 +2813,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
static inline int scrub_calc_parity_bitmap_len(int nsectors) static inline int scrub_calc_parity_bitmap_len(int nsectors)
{ {
return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8); return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
} }
static void scrub_parity_get(struct scrub_parity *sparity) static void scrub_parity_get(struct scrub_parity *sparity)
...@@ -3458,7 +3458,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, ...@@ -3458,7 +3458,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
return ret; return ret;
} }
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
if (em->start != chunk_offset) if (em->start != chunk_offset)
goto out; goto out;
......
...@@ -383,6 +383,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) ...@@ -383,6 +383,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
int ret = 0; int ret = 0;
char *compress_type; char *compress_type;
bool compress_force = false; bool compress_force = false;
enum btrfs_compression_type saved_compress_type;
bool saved_compress_force;
int no_compress = 0;
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy); cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE)) if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE))
...@@ -462,6 +465,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) ...@@ -462,6 +465,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
/* Fallthrough */ /* Fallthrough */
case Opt_compress: case Opt_compress:
case Opt_compress_type: case Opt_compress_type:
saved_compress_type = btrfs_test_opt(root, COMPRESS) ?
info->compress_type : BTRFS_COMPRESS_NONE;
saved_compress_force =
btrfs_test_opt(root, FORCE_COMPRESS);
if (token == Opt_compress || if (token == Opt_compress ||
token == Opt_compress_force || token == Opt_compress_force ||
strcmp(args[0].from, "zlib") == 0) { strcmp(args[0].from, "zlib") == 0) {
...@@ -470,6 +477,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) ...@@ -470,6 +477,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
btrfs_set_opt(info->mount_opt, COMPRESS); btrfs_set_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, NODATACOW); btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM); btrfs_clear_opt(info->mount_opt, NODATASUM);
no_compress = 0;
} else if (strcmp(args[0].from, "lzo") == 0) { } else if (strcmp(args[0].from, "lzo") == 0) {
compress_type = "lzo"; compress_type = "lzo";
info->compress_type = BTRFS_COMPRESS_LZO; info->compress_type = BTRFS_COMPRESS_LZO;
...@@ -477,25 +485,21 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) ...@@ -477,25 +485,21 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
btrfs_clear_opt(info->mount_opt, NODATACOW); btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM); btrfs_clear_opt(info->mount_opt, NODATASUM);
btrfs_set_fs_incompat(info, COMPRESS_LZO); btrfs_set_fs_incompat(info, COMPRESS_LZO);
no_compress = 0;
} else if (strncmp(args[0].from, "no", 2) == 0) { } else if (strncmp(args[0].from, "no", 2) == 0) {
compress_type = "no"; compress_type = "no";
btrfs_clear_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
compress_force = false; compress_force = false;
no_compress++;
} else { } else {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (compress_force) { if (compress_force) {
btrfs_set_and_info(root, FORCE_COMPRESS, btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
"force %s compression",
compress_type);
} else { } else {
if (!btrfs_test_opt(root, COMPRESS))
btrfs_info(root->fs_info,
"btrfs: use %s compression",
compress_type);
/* /*
* If we remount from compress-force=xxx to * If we remount from compress-force=xxx to
* compress=xxx, we need clear FORCE_COMPRESS * compress=xxx, we need clear FORCE_COMPRESS
...@@ -504,6 +508,17 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) ...@@ -504,6 +508,17 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
*/ */
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
} }
if ((btrfs_test_opt(root, COMPRESS) &&
(info->compress_type != saved_compress_type ||
compress_force != saved_compress_force)) ||
(!btrfs_test_opt(root, COMPRESS) &&
no_compress == 1)) {
btrfs_info(root->fs_info,
"%s %s compression",
(compress_force) ? "force" : "use",
compress_type);
}
compress_force = false;
break; break;
case Opt_ssd: case Opt_ssd:
btrfs_set_and_info(root, SSD, btrfs_set_and_info(root, SSD,
......
...@@ -108,7 +108,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { ...@@ -108,7 +108,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
}, },
}; };
const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10, [BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
[BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1, [BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
[BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP, [BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
...@@ -233,6 +233,7 @@ static struct btrfs_device *__alloc_device(void) ...@@ -233,6 +233,7 @@ static struct btrfs_device *__alloc_device(void)
spin_lock_init(&dev->reada_lock); spin_lock_init(&dev->reada_lock);
atomic_set(&dev->reada_in_flight, 0); atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0); atomic_set(&dev->dev_stats_ccnt, 0);
btrfs_device_data_ordered_init(dev);
INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
...@@ -1183,7 +1184,7 @@ static int contains_pending_extent(struct btrfs_transaction *transaction, ...@@ -1183,7 +1184,7 @@ static int contains_pending_extent(struct btrfs_transaction *transaction,
struct map_lookup *map; struct map_lookup *map;
int i; int i;
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) { for (i = 0; i < map->num_stripes; i++) {
u64 end; u64 end;
...@@ -2755,7 +2756,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, ...@@ -2755,7 +2756,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
free_extent_map(em); free_extent_map(em);
return -EINVAL; return -EINVAL;
} }
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
lock_chunks(root->fs_info->chunk_root); lock_chunks(root->fs_info->chunk_root);
check_system_chunk(trans, extent_root, map->type); check_system_chunk(trans, extent_root, map->type);
unlock_chunks(root->fs_info->chunk_root); unlock_chunks(root->fs_info->chunk_root);
...@@ -3751,7 +3752,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl, ...@@ -3751,7 +3752,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) < if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) { btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
btrfs_warn(fs_info, btrfs_warn(fs_info,
"metatdata profile 0x%llx has lower redundancy than data profile 0x%llx", "metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
bctl->meta.target, bctl->data.target); bctl->meta.target, bctl->data.target);
} }
...@@ -4718,7 +4719,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ...@@ -4718,7 +4719,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
goto error; goto error;
} }
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->bdev = (struct block_device *)map; em->map_lookup = map;
em->start = start; em->start = start;
em->len = num_bytes; em->len = num_bytes;
em->block_start = 0; em->block_start = 0;
...@@ -4813,7 +4814,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, ...@@ -4813,7 +4814,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
return -EINVAL; return -EINVAL;
} }
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
item_size = btrfs_chunk_item_size(map->num_stripes); item_size = btrfs_chunk_item_size(map->num_stripes);
stripe_size = em->orig_block_len; stripe_size = em->orig_block_len;
...@@ -4968,7 +4969,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) ...@@ -4968,7 +4969,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
if (!em) if (!em)
return 1; return 1;
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) { for (i = 0; i < map->num_stripes; i++) {
if (map->stripes[i].dev->missing) { if (map->stripes[i].dev->missing) {
miss_ndevs++; miss_ndevs++;
...@@ -5048,7 +5049,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) ...@@ -5048,7 +5049,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
return 1; return 1;
} }
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
ret = map->num_stripes; ret = map->num_stripes;
else if (map->type & BTRFS_BLOCK_GROUP_RAID10) else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
...@@ -5084,7 +5085,7 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root, ...@@ -5084,7 +5085,7 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
BUG_ON(!em); BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical); BUG_ON(em->start > logical || em->start + em->len < logical);
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
len = map->stripe_len * nr_data_stripes(map); len = map->stripe_len * nr_data_stripes(map);
free_extent_map(em); free_extent_map(em);
...@@ -5105,7 +5106,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, ...@@ -5105,7 +5106,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
BUG_ON(!em); BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical); BUG_ON(em->start > logical || em->start + em->len < logical);
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
ret = 1; ret = 1;
free_extent_map(em); free_extent_map(em);
...@@ -5264,7 +5265,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5264,7 +5265,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
return -EINVAL; return -EINVAL;
} }
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
offset = logical - em->start; offset = logical - em->start;
stripe_len = map->stripe_len; stripe_len = map->stripe_len;
...@@ -5378,35 +5379,33 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, ...@@ -5378,35 +5379,33 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
* target drive. * target drive.
*/ */
for (i = 0; i < tmp_num_stripes; i++) { for (i = 0; i < tmp_num_stripes; i++) {
if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) { if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
/* continue;
* In case of DUP, in order to keep it
* simple, only add the mirror with the /*
* lowest physical address * In case of DUP, in order to keep it simple, only add
*/ * the mirror with the lowest physical address
if (found && */
physical_of_found <= if (found &&
tmp_bbio->stripes[i].physical) physical_of_found <= tmp_bbio->stripes[i].physical)
continue; continue;
index_srcdev = i;
found = 1; index_srcdev = i;
physical_of_found = found = 1;
tmp_bbio->stripes[i].physical; physical_of_found = tmp_bbio->stripes[i].physical;
}
} }
if (found) { btrfs_put_bbio(tmp_bbio);
mirror_num = index_srcdev + 1;
patch_the_first_stripe_for_dev_replace = 1; if (!found) {
physical_to_patch_in_first_stripe = physical_of_found;
} else {
WARN_ON(1); WARN_ON(1);
ret = -EIO; ret = -EIO;
btrfs_put_bbio(tmp_bbio);
goto out; goto out;
} }
btrfs_put_bbio(tmp_bbio); mirror_num = index_srcdev + 1;
patch_the_first_stripe_for_dev_replace = 1;
physical_to_patch_in_first_stripe = physical_of_found;
} else if (mirror_num > map->num_stripes) { } else if (mirror_num > map->num_stripes) {
mirror_num = 0; mirror_num = 0;
} }
...@@ -5806,7 +5805,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, ...@@ -5806,7 +5805,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
free_extent_map(em); free_extent_map(em);
return -EIO; return -EIO;
} }
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
length = em->len; length = em->len;
rmap_len = map->stripe_len; rmap_len = map->stripe_len;
...@@ -6069,7 +6068,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, ...@@ -6069,7 +6068,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
bbio->fs_info = root->fs_info; bbio->fs_info = root->fs_info;
atomic_set(&bbio->stripes_pending, bbio->num_stripes); atomic_set(&bbio->stripes_pending, bbio->num_stripes);
if (bbio->raid_map) { if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
((rw & WRITE) || (mirror_num > 1))) {
/* In this case, map_length has been set to the length of /* In this case, map_length has been set to the length of
a single stripe; not the whole write */ a single stripe; not the whole write */
if (rw & WRITE) { if (rw & WRITE) {
...@@ -6210,6 +6210,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, ...@@ -6210,6 +6210,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
struct extent_map *em; struct extent_map *em;
u64 logical; u64 logical;
u64 length; u64 length;
u64 stripe_len;
u64 devid; u64 devid;
u8 uuid[BTRFS_UUID_SIZE]; u8 uuid[BTRFS_UUID_SIZE];
int num_stripes; int num_stripes;
...@@ -6218,6 +6219,37 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, ...@@ -6218,6 +6219,37 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
logical = key->offset; logical = key->offset;
length = btrfs_chunk_length(leaf, chunk); length = btrfs_chunk_length(leaf, chunk);
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
/* Validation check */
if (!num_stripes) {
btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
num_stripes);
return -EIO;
}
if (!IS_ALIGNED(logical, root->sectorsize)) {
btrfs_err(root->fs_info,
"invalid chunk logical %llu", logical);
return -EIO;
}
if (!length || !IS_ALIGNED(length, root->sectorsize)) {
btrfs_err(root->fs_info,
"invalid chunk length %llu", length);
return -EIO;
}
if (!is_power_of_2(stripe_len)) {
btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
stripe_len);
return -EIO;
}
if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
btrfs_chunk_type(leaf, chunk)) {
btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK) &
btrfs_chunk_type(leaf, chunk));
return -EIO;
}
read_lock(&map_tree->map_tree.lock); read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
...@@ -6234,7 +6266,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, ...@@ -6234,7 +6266,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
em = alloc_extent_map(); em = alloc_extent_map();
if (!em) if (!em)
return -ENOMEM; return -ENOMEM;
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) { if (!map) {
free_extent_map(em); free_extent_map(em);
...@@ -6242,7 +6273,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, ...@@ -6242,7 +6273,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
} }
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->bdev = (struct block_device *)map; em->map_lookup = map;
em->start = logical; em->start = logical;
em->len = length; em->len = length;
em->orig_start = 0; em->orig_start = 0;
...@@ -6944,7 +6975,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root, ...@@ -6944,7 +6975,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
/* In order to kick the device replace finish process */ /* In order to kick the device replace finish process */
lock_chunks(root); lock_chunks(root);
list_for_each_entry(em, &transaction->pending_chunks, list) { list_for_each_entry(em, &transaction->pending_chunks, list) {
map = (struct map_lookup *)em->bdev; map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) { for (i = 0; i < map->num_stripes; i++) {
dev = map->stripes[i].dev; dev = map->stripes[i].dev;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册