提交 a525890c 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (23 commits)
  Btrfs: fix extent_buffer leak during tree log replay
  Btrfs: fix oops when btrfs_inherit_iflags called with a NULL dir
  Btrfs: fix -o nodatasum printk spelling
  Btrfs: check duplicate backrefs for both data and metadata
  Btrfs: init worker struct fields before kthread-run
  Btrfs: pin buffers during write_dev_supers
  Btrfs: avoid races between super writeout and device list updates
  Fix btrfs when ACLs are configured out
  Btrfs: fdatasync should skip metadata writeout
  Btrfs: remove crc32c.h and use libcrc32c directly.
  Btrfs: implement FS_IOC_GETFLAGS/SETFLAGS/GETVERSION
  Btrfs: autodetect SSD devices
  Btrfs: add mount -o ssd_spread to spread allocations out
  Btrfs: avoid allocation clusters that are too spread out
  Btrfs: Add mount -o nossd
  Btrfs: avoid IO stalls behind congested devices in a multi-device FS
  Btrfs: don't allow WRITE_SYNC bios to starve out regular writes
  Btrfs: fix metadata dirty throttling limits
  Btrfs: reduce mount -o ssd CPU usage
  Btrfs: balance btree more often
  ...
......@@ -6,5 +6,5 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
transaction.o inode.o file.o tree-defrag.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
ref-cache.o export.o tree-log.o acl.o free-space-cache.o zlib.o \
compression.o delayed-ref.o
export.o tree-log.o acl.o free-space-cache.o zlib.o \
compression.o delayed-ref.o relocation.o
......@@ -351,9 +351,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
return 0;
}
int btrfs_check_acl(struct inode *inode, int mask)
{
return 0;
}
#endif /* CONFIG_FS_POSIX_ACL */
......@@ -294,10 +294,10 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
INIT_LIST_HEAD(&worker->worker_list);
spin_lock_init(&worker->lock);
atomic_set(&worker->num_pending, 0);
worker->workers = workers;
worker->task = kthread_run(worker_loop, worker,
"btrfs-%s-%d", workers->name,
workers->num_workers + i);
worker->workers = workers;
if (IS_ERR(worker->task)) {
kfree(worker);
ret = PTR_ERR(worker->task);
......
......@@ -72,6 +72,9 @@ struct btrfs_inode {
*/
struct list_head ordered_operations;
/* node for the red-black tree that links inodes in subvolume root */
struct rb_node rb_node;
/* the space_info for where this inode's data allocations are done */
struct btrfs_space_info *space_info;
......@@ -154,5 +157,4 @@ static inline void btrfs_i_size_write(struct inode *inode, u64 size)
BTRFS_I(inode)->disk_i_size = size;
}
#endif
......@@ -123,7 +123,7 @@ static int check_compressed_csum(struct inode *inode,
u32 csum;
u32 *cb_sum = &cb->sums;
if (btrfs_test_flag(inode, NODATASUM))
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
return 0;
for (i = 0; i < cb->nr_pages; i++) {
......@@ -670,7 +670,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
*/
atomic_inc(&cb->pending_bios);
if (!btrfs_test_flag(inode, NODATASUM)) {
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
btrfs_lookup_bio_sums(root, inode, comp_bio,
sums);
}
......@@ -697,7 +697,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
BUG_ON(ret);
if (!btrfs_test_flag(inode, NODATASUM))
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
......
/*
* Copyright (C) 2008 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __BTRFS_CRC32C__
#define __BTRFS_CRC32C__
#include <linux/crc32c.h>
/*
* this file used to do more for selecting the HW version of crc32c,
* perhaps it will one day again soon.
*/
#define btrfs_crc32c(seed, data, length) crc32c(seed, data, length)
#endif
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -30,9 +30,6 @@ struct btrfs_delayed_ref_node {
/* the starting bytenr of the extent */
u64 bytenr;
/* the parent our backref will point to */
u64 parent;
/* the size of the extent */
u64 num_bytes;
......@@ -50,10 +47,21 @@ struct btrfs_delayed_ref_node {
*/
int ref_mod;
unsigned int action:8;
unsigned int type:8;
/* is this node still in the rbtree? */
unsigned int is_head:1;
unsigned int in_tree:1;
};
struct btrfs_delayed_extent_op {
struct btrfs_disk_key key;
u64 flags_to_set;
unsigned int update_key:1;
unsigned int update_flags:1;
unsigned int is_data:1;
};
/*
* the head refs are used to hold a lock on a given extent, which allows us
* to make sure that only one process is running the delayed refs
......@@ -71,6 +79,7 @@ struct btrfs_delayed_ref_head {
struct list_head cluster;
struct btrfs_delayed_extent_op *extent_op;
/*
* when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree
......@@ -84,27 +93,26 @@ struct btrfs_delayed_ref_head {
* the free has happened.
*/
unsigned int must_insert_reserved:1;
unsigned int is_data:1;
};
struct btrfs_delayed_ref {
struct btrfs_delayed_tree_ref {
struct btrfs_delayed_ref_node node;
union {
u64 root;
u64 parent;
};
int level;
};
/* the root objectid our ref will point to */
u64 root;
/* the generation for the backref */
u64 generation;
/* owner_objectid of the backref */
u64 owner_objectid;
/* operation done by this entry in the rbtree */
u8 action;
/* if pin == 1, when the extent is freed it will be pinned until
* transaction commit
*/
unsigned int pin:1;
struct btrfs_delayed_data_ref {
struct btrfs_delayed_ref_node node;
union {
u64 root;
u64 parent;
};
u64 objectid;
u64 offset;
};
struct btrfs_delayed_ref_root {
......@@ -143,17 +151,25 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
}
}
int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root,
u64 ref_generation, u64 owner_objectid, int action,
int pin);
int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action,
struct btrfs_delayed_extent_op *extent_op);
int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root,
u64 owner, u64 offset, int action,
struct btrfs_delayed_extent_op *extent_op);
int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
struct btrfs_delayed_extent_op *extent_op);
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
u64 num_bytes, u32 *refs);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
u64 num_bytes, u64 *refs, u64 *flags);
int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 orig_parent,
u64 parent, u64 orig_ref_root, u64 ref_root,
......@@ -169,18 +185,24 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
*/
static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
{
return node->parent == (u64)-1;
return node->is_head;
}
/*
* helper functions to cast a node into its container
*/
static inline struct btrfs_delayed_ref *
btrfs_delayed_node_to_ref(struct btrfs_delayed_ref_node *node)
static inline struct btrfs_delayed_tree_ref *
btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
{
WARN_ON(btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_ref, node);
return container_of(node, struct btrfs_delayed_tree_ref, node);
}
static inline struct btrfs_delayed_data_ref *
btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
{
WARN_ON(btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_data_ref, node);
}
static inline struct btrfs_delayed_ref_head *
......@@ -188,6 +210,5 @@ btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
{
WARN_ON(!btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_ref_head, node);
}
#endif
......@@ -26,8 +26,8 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/crc32c.h>
#include "compat.h"
#include "crc32c.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
......@@ -36,7 +36,6 @@
#include "print-tree.h"
#include "async-thread.h"
#include "locking.h"
#include "ref-cache.h"
#include "tree-log.h"
#include "free-space-cache.h"
......@@ -172,7 +171,7 @@ out:
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
{
return btrfs_crc32c(seed, data, len);
return crc32c(seed, data, len);
}
void btrfs_csum_final(u32 crc, char *result)
......@@ -884,7 +883,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
{
root->node = NULL;
root->commit_root = NULL;
root->ref_tree = NULL;
root->sectorsize = sectorsize;
root->nodesize = nodesize;
root->leafsize = leafsize;
......@@ -899,12 +897,14 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->last_inode_alloc = 0;
root->name = NULL;
root->in_sysfs = 0;
root->inode_tree.rb_node = NULL;
INIT_LIST_HEAD(&root->dirty_list);
INIT_LIST_HEAD(&root->orphan_list);
INIT_LIST_HEAD(&root->dead_list);
INIT_LIST_HEAD(&root->root_list);
spin_lock_init(&root->node_lock);
spin_lock_init(&root->list_lock);
spin_lock_init(&root->inode_lock);
mutex_init(&root->objectid_mutex);
mutex_init(&root->log_mutex);
init_waitqueue_head(&root->log_writer_wait);
......@@ -918,9 +918,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
extent_io_tree_init(&root->dirty_log_pages,
fs_info->btree_inode->i_mapping, GFP_NOFS);
btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
root->ref_tree = &root->ref_tree_struct;
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
......@@ -959,6 +956,7 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
root->commit_root = btrfs_root_node(root);
BUG_ON(!root->node);
return 0;
}
......@@ -1025,20 +1023,19 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
*/
root->ref_cows = 0;
leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
0, BTRFS_TREE_LOG_OBJECTID,
trans->transid, 0, 0, 0);
leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
if (IS_ERR(leaf)) {
kfree(root);
return ERR_CAST(leaf);
}
memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(leaf, leaf->start);
btrfs_set_header_generation(leaf, trans->transid);
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
root->node = leaf;
btrfs_set_header_nritems(root->node, 0);
btrfs_set_header_level(root->node, 0);
btrfs_set_header_bytenr(root->node, root->node->start);
btrfs_set_header_generation(root->node, trans->transid);
btrfs_set_header_owner(root->node, BTRFS_TREE_LOG_OBJECTID);
write_extent_buffer(root->node, root->fs_info->fsid,
(unsigned long)btrfs_header_fsid(root->node),
......@@ -1081,8 +1078,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
inode_item->nbytes = cpu_to_le64(root->leafsize);
inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
btrfs_set_root_bytenr(&log_root->root_item, log_root->node->start);
btrfs_set_root_generation(&log_root->root_item, trans->transid);
btrfs_set_root_node(&log_root->root_item, log_root->node);
WARN_ON(root->log_root);
root->log_root = log_root;
......@@ -1144,6 +1140,7 @@ out:
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
root->commit_root = btrfs_root_node(root);
BUG_ON(!root->node);
insert:
if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
......@@ -1210,7 +1207,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
}
if (!(fs_info->sb->s_flags & MS_RDONLY)) {
ret = btrfs_find_dead_roots(fs_info->tree_root,
root->root_key.objectid, root);
root->root_key.objectid);
BUG_ON(ret);
btrfs_orphan_cleanup(root);
}
......@@ -1569,8 +1566,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
atomic_set(&fs_info->async_delalloc_pages, 0);
atomic_set(&fs_info->async_submit_draining, 0);
atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->throttles, 0);
atomic_set(&fs_info->throttle_gen, 0);
fs_info->sb = sb;
fs_info->max_extent = (u64)-1;
fs_info->max_inline = 8192 * 1024;
......@@ -1598,6 +1593,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
fs_info->btree_inode->i_mapping,
GFP_NOFS);
......@@ -1613,10 +1609,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->do_barriers = 1;
INIT_LIST_HEAD(&fs_info->dead_reloc_roots);
btrfs_leaf_ref_tree_init(&fs_info->reloc_ref_tree);
btrfs_leaf_ref_tree_init(&fs_info->shared_ref_tree);
BTRFS_I(fs_info->btree_inode)->root = tree_root;
memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
sizeof(struct btrfs_key));
......@@ -1674,6 +1666,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
goto fail_iput;
}
features = btrfs_super_incompat_flags(disk_super);
if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) {
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
btrfs_set_super_incompat_flags(disk_super, features);
}
features = btrfs_super_compat_ro_flags(disk_super) &
~BTRFS_FEATURE_COMPAT_RO_SUPP;
if (!(sb->s_flags & MS_RDONLY) && features) {
......@@ -1771,7 +1769,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (ret) {
printk(KERN_WARNING "btrfs: failed to read the system "
"array on %s\n", sb->s_id);
goto fail_sys_array;
goto fail_sb_buffer;
}
blocksize = btrfs_level_size(tree_root,
......@@ -1785,6 +1783,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_super_chunk_root(disk_super),
blocksize, generation);
BUG_ON(!chunk_root->node);
btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
chunk_root->commit_root = btrfs_root_node(chunk_root);
read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
(unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
......@@ -1810,7 +1810,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
blocksize, generation);
if (!tree_root->node)
goto fail_chunk_root;
btrfs_set_root_node(&tree_root->root_item, tree_root->node);
tree_root->commit_root = btrfs_root_node(tree_root);
ret = find_and_setup_root(tree_root, fs_info,
BTRFS_EXTENT_TREE_OBJECTID, extent_root);
......@@ -1820,14 +1821,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,
ret = find_and_setup_root(tree_root, fs_info,
BTRFS_DEV_TREE_OBJECTID, dev_root);
dev_root->track_dirty = 1;
if (ret)
goto fail_extent_root;
dev_root->track_dirty = 1;
ret = find_and_setup_root(tree_root, fs_info,
BTRFS_CSUM_TREE_OBJECTID, csum_root);
if (ret)
goto fail_extent_root;
goto fail_dev_root;
csum_root->track_dirty = 1;
......@@ -1849,6 +1850,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (IS_ERR(fs_info->transaction_kthread))
goto fail_cleaner;
if (!btrfs_test_opt(tree_root, SSD) &&
!btrfs_test_opt(tree_root, NOSSD) &&
!fs_info->fs_devices->rotating) {
printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
"mode\n");
btrfs_set_opt(fs_info->mount_opt, SSD);
}
if (btrfs_super_log_root(disk_super) != 0) {
u64 bytenr = btrfs_super_log_root(disk_super);
......@@ -1881,7 +1890,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
}
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_cleanup_reloc_trees(tree_root);
ret = btrfs_recover_relocation(tree_root);
BUG_ON(ret);
}
......@@ -1892,6 +1901,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
if (!fs_info->fs_root)
goto fail_trans_kthread;
return tree_root;
fail_trans_kthread:
......@@ -1908,14 +1918,19 @@ fail_cleaner:
fail_csum_root:
free_extent_buffer(csum_root->node);
free_extent_buffer(csum_root->commit_root);
fail_dev_root:
free_extent_buffer(dev_root->node);
free_extent_buffer(dev_root->commit_root);
fail_extent_root:
free_extent_buffer(extent_root->node);
free_extent_buffer(extent_root->commit_root);
fail_tree_root:
free_extent_buffer(tree_root->node);
free_extent_buffer(tree_root->commit_root);
fail_chunk_root:
free_extent_buffer(chunk_root->node);
fail_sys_array:
free_extent_buffer(dev_root->node);
free_extent_buffer(chunk_root->commit_root);
fail_sb_buffer:
btrfs_stop_workers(&fs_info->fixup_workers);
btrfs_stop_workers(&fs_info->delalloc_workers);
......@@ -2005,6 +2020,17 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
return latest;
}
/*
* this should be called twice, once with wait == 0 and
* once with wait == 1. When wait == 0 is done, all the buffer heads
* we write are pinned.
*
* They are released when wait == 1 is done.
* max_mirrors must be the same for both runs, and it indicates how
* many supers on this one device should be written.
*
* max_mirrors == 0 means to write them all.
*/
static int write_dev_supers(struct btrfs_device *device,
struct btrfs_super_block *sb,
int do_barriers, int wait, int max_mirrors)
......@@ -2040,12 +2066,16 @@ static int write_dev_supers(struct btrfs_device *device,
bh = __find_get_block(device->bdev, bytenr / 4096,
BTRFS_SUPER_INFO_SIZE);
BUG_ON(!bh);
brelse(bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh)) {
brelse(bh);
continue;
}
if (!buffer_uptodate(bh))
errors++;
/* drop our reference */
brelse(bh);
/* drop the reference from the wait == 0 run */
brelse(bh);
continue;
} else {
btrfs_set_super_bytenr(sb, bytenr);
......@@ -2056,12 +2086,18 @@ static int write_dev_supers(struct btrfs_device *device,
BTRFS_CSUM_SIZE);
btrfs_csum_final(crc, sb->csum);
/*
* one reference for us, and we leave it for the
* caller
*/
bh = __getblk(device->bdev, bytenr / 4096,
BTRFS_SUPER_INFO_SIZE);
memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
set_buffer_uptodate(bh);
/* one reference for submit_bh */
get_bh(bh);
set_buffer_uptodate(bh);
lock_buffer(bh);
bh->b_end_io = btrfs_end_buffer_write_sync;
}
......@@ -2073,6 +2109,7 @@ static int write_dev_supers(struct btrfs_device *device,
device->name);
set_buffer_uptodate(bh);
device->barriers = 0;
/* one reference for submit_bh */
get_bh(bh);
lock_buffer(bh);
ret = submit_bh(WRITE_SYNC, bh);
......@@ -2081,22 +2118,15 @@ static int write_dev_supers(struct btrfs_device *device,
ret = submit_bh(WRITE_SYNC, bh);
}
if (!ret && wait) {
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
errors++;
} else if (ret) {
if (ret)
errors++;
}
if (wait)
brelse(bh);
}
return errors < i ? 0 : -1;
}
int write_all_supers(struct btrfs_root *root, int max_mirrors)
{
struct list_head *head = &root->fs_info->fs_devices->devices;
struct list_head *head;
struct btrfs_device *dev;
struct btrfs_super_block *sb;
struct btrfs_dev_item *dev_item;
......@@ -2111,6 +2141,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
sb = &root->fs_info->super_for_commit;
dev_item = &sb->dev_item;
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
head = &root->fs_info->fs_devices->devices;
list_for_each_entry(dev, head, dev_list) {
if (!dev->bdev) {
total_errors++;
......@@ -2154,6 +2187,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
if (ret)
total_errors++;
}
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
if (total_errors > max_errors) {
printk(KERN_ERR "btrfs: %d errors while writing supers\n",
total_errors);
......@@ -2173,6 +2207,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
radix_tree_delete(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid);
if (root->anon_super.s_dev) {
......@@ -2219,10 +2254,12 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
ARRAY_SIZE(gang));
if (!ret)
break;
root_objectid = gang[ret - 1]->root_key.objectid + 1;
for (i = 0; i < ret; i++) {
root_objectid = gang[i]->root_key.objectid;
ret = btrfs_find_dead_roots(fs_info->tree_root,
root_objectid, gang[i]);
root_objectid);
BUG_ON(ret);
btrfs_orphan_cleanup(gang[i]);
}
......@@ -2278,20 +2315,16 @@ int close_ctree(struct btrfs_root *root)
(unsigned long long)fs_info->total_ref_cache_size);
}
if (fs_info->extent_root->node)
free_extent_buffer(fs_info->extent_root->node);
if (fs_info->tree_root->node)
free_extent_buffer(fs_info->tree_root->node);
if (root->fs_info->chunk_root->node)
free_extent_buffer(root->fs_info->chunk_root->node);
if (root->fs_info->dev_root->node)
free_extent_buffer(root->fs_info->dev_root->node);
if (root->fs_info->csum_root->node)
free_extent_buffer(root->fs_info->csum_root->node);
free_extent_buffer(fs_info->extent_root->node);
free_extent_buffer(fs_info->extent_root->commit_root);
free_extent_buffer(fs_info->tree_root->node);
free_extent_buffer(fs_info->tree_root->commit_root);
free_extent_buffer(root->fs_info->chunk_root->node);
free_extent_buffer(root->fs_info->chunk_root->commit_root);
free_extent_buffer(root->fs_info->dev_root->node);
free_extent_buffer(root->fs_info->dev_root->commit_root);
free_extent_buffer(root->fs_info->csum_root->node);
free_extent_buffer(root->fs_info->csum_root->commit_root);
btrfs_free_block_groups(root->fs_info);
......@@ -2373,17 +2406,14 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
* looks as though older kernels can get into trouble with
* this code, they end up stuck in balance_dirty_pages forever
*/
struct extent_io_tree *tree;
u64 num_dirty;
u64 start = 0;
unsigned long thresh = 32 * 1024 * 1024;
tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
if (current->flags & PF_MEMALLOC)
return;
num_dirty = count_range_bits(tree, &start, (u64)-1,
thresh, EXTENT_DIRTY);
num_dirty = root->fs_info->dirty_metadata_bytes;
if (num_dirty > thresh) {
balance_dirty_pages_ratelimited_nr(
root->fs_info->btree_inode->i_mapping, 1);
......
......@@ -78,7 +78,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
inode = btrfs_iget(sb, &key, root, NULL);
inode = btrfs_iget(sb, &key, root);
if (IS_ERR(inode))
return (void *)inode;
......@@ -192,7 +192,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root));
}
const struct export_operations btrfs_export_ops = {
......
此差异已折叠。
......@@ -476,6 +476,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state *state;
struct extent_state *prealloc = NULL;
struct rb_node *node;
u64 last_end;
int err;
int set = 0;
......@@ -498,6 +499,7 @@ again:
if (state->start > end)
goto out;
WARN_ON(state->end < start);
last_end = state->end;
/*
* | ---- desired range ---- |
......@@ -524,9 +526,11 @@ again:
if (err)
goto out;
if (state->end <= end) {
start = state->end + 1;
set |= clear_state_bit(tree, state, bits,
wake, delete);
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
} else {
start = state->start;
}
......@@ -552,8 +556,10 @@ again:
goto out;
}
start = state->end + 1;
set |= clear_state_bit(tree, state, bits, wake, delete);
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
goto search_again;
out:
......@@ -707,8 +713,10 @@ again:
goto out;
}
set_state_bits(tree, state, bits);
start = state->end + 1;
merge_state(tree, state);
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
goto search_again;
}
......@@ -742,8 +750,10 @@ again:
goto out;
if (state->end <= end) {
set_state_bits(tree, state, bits);
start = state->end + 1;
merge_state(tree, state);
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
} else {
start = state->start;
}
......
......@@ -291,16 +291,12 @@ noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
{
u64 extent_end = 0;
u64 search_start = start;
u64 leaf_start;
u64 ram_bytes = 0;
u64 orig_parent = 0;
u64 disk_bytenr = 0;
u64 orig_locked_end = locked_end;
u8 compression;
u8 encryption;
u16 other_encoding = 0;
u64 root_gen;
u64 root_owner;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *extent;
struct btrfs_path *path;
......@@ -340,9 +336,6 @@ next_slot:
bookend = 0;
found_extent = 0;
found_inline = 0;
leaf_start = 0;
root_gen = 0;
root_owner = 0;
compression = 0;
encryption = 0;
extent = NULL;
......@@ -417,9 +410,6 @@ next_slot:
if (found_extent) {
read_extent_buffer(leaf, &old, (unsigned long)extent,
sizeof(old));
root_gen = btrfs_header_generation(leaf);
root_owner = btrfs_header_owner(leaf);
leaf_start = leaf->start;
}
if (end < extent_end && end >= key.offset) {
......@@ -443,14 +433,14 @@ next_slot:
}
locked_end = extent_end;
}
orig_parent = path->nodes[0]->start;
disk_bytenr = le64_to_cpu(old.disk_bytenr);
if (disk_bytenr != 0) {
ret = btrfs_inc_extent_ref(trans, root,
disk_bytenr,
le64_to_cpu(old.disk_num_bytes),
orig_parent, root->root_key.objectid,
trans->transid, inode->i_ino);
le64_to_cpu(old.disk_num_bytes), 0,
root->root_key.objectid,
key.objectid, key.offset -
le64_to_cpu(old.offset));
BUG_ON(ret);
}
}
......@@ -568,17 +558,6 @@ next_slot:
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_set_lock_blocking(path->nodes[0]);
if (disk_bytenr != 0) {
ret = btrfs_update_extent_ref(trans, root,
disk_bytenr,
le64_to_cpu(old.disk_num_bytes),
orig_parent,
leaf->start,
root->root_key.objectid,
trans->transid, ins.objectid);
BUG_ON(ret);
}
path->leave_spinning = 0;
btrfs_release_path(root, path);
if (disk_bytenr != 0)
......@@ -594,8 +573,9 @@ next_slot:
ret = btrfs_free_extent(trans, root,
old_disk_bytenr,
le64_to_cpu(old.disk_num_bytes),
leaf_start, root_owner,
root_gen, key.objectid, 0);
0, root->root_key.objectid,
key.objectid, key.offset -
le64_to_cpu(old.offset));
BUG_ON(ret);
*hint_byte = old_disk_bytenr;
}
......@@ -664,12 +644,11 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
u64 bytenr;
u64 num_bytes;
u64 extent_end;
u64 extent_offset;
u64 orig_offset;
u64 other_start;
u64 other_end;
u64 split = start;
u64 locked_end = end;
u64 orig_parent;
int extent_type;
int split_end = 1;
int ret;
......@@ -703,7 +682,7 @@ again:
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
extent_offset = btrfs_file_extent_offset(leaf, fi);
orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
if (key.offset == start)
split = end;
......@@ -711,8 +690,6 @@ again:
if (key.offset == start && extent_end == end) {
int del_nr = 0;
int del_slot = 0;
u64 leaf_owner = btrfs_header_owner(leaf);
u64 leaf_gen = btrfs_header_generation(leaf);
other_start = end;
other_end = 0;
if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
......@@ -721,8 +698,8 @@ again:
del_slot = path->slots[0] + 1;
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
leaf->start, leaf_owner,
leaf_gen, inode->i_ino, 0);
0, root->root_key.objectid,
inode->i_ino, orig_offset);
BUG_ON(ret);
}
other_start = 0;
......@@ -733,8 +710,8 @@ again:
del_slot = path->slots[0];
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
leaf->start, leaf_owner,
leaf_gen, inode->i_ino, 0);
0, root->root_key.objectid,
inode->i_ino, orig_offset);
BUG_ON(ret);
}
split_end = 0;
......@@ -768,13 +745,12 @@ again:
locked_end = extent_end;
}
btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
extent_offset += split - key.offset;
} else {
BUG_ON(key.offset != start);
btrfs_set_file_extent_offset(leaf, fi, extent_offset +
split - key.offset);
btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
key.offset = split;
btrfs_set_file_extent_offset(leaf, fi, key.offset -
orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
btrfs_set_item_key_safe(trans, root, path, &key);
extent_end = split;
}
......@@ -793,7 +769,8 @@ again:
struct btrfs_file_extent_item);
key.offset = split;
btrfs_set_item_key_safe(trans, root, path, &key);
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_offset(leaf, fi, key.offset -
orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
other_end - split);
goto done;
......@@ -815,10 +792,9 @@ again:
btrfs_mark_buffer_dirty(leaf);
orig_parent = leaf->start;
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
orig_parent, root->root_key.objectid,
trans->transid, inode->i_ino);
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid,
inode->i_ino, orig_offset);
BUG_ON(ret);
btrfs_release_path(root, path);
......@@ -833,20 +809,12 @@ again:
btrfs_set_file_extent_type(leaf, fi, extent_type);
btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_offset(leaf, fi, key.offset - orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_compression(leaf, fi, 0);
btrfs_set_file_extent_encryption(leaf, fi, 0);
btrfs_set_file_extent_other_encoding(leaf, fi, 0);
if (orig_parent != leaf->start) {
ret = btrfs_update_extent_ref(trans, root, bytenr, num_bytes,
orig_parent, leaf->start,
root->root_key.objectid,
trans->transid, inode->i_ino);
BUG_ON(ret);
}
done:
btrfs_mark_buffer_dirty(leaf);
......@@ -1189,6 +1157,8 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
btrfs_wait_ordered_range(inode, 0, (u64)-1);
root->log_batch++;
if (datasync && !(inode->i_state & I_DIRTY_PAGES))
goto out;
/*
* ok we haven't committed the transaction yet, lets do a commit
*/
......
......@@ -579,6 +579,7 @@ out:
* it returns -enospc
*/
int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
......@@ -595,7 +596,9 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
int ret;
/* for metadata, allow allocates with more holes */
if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
if (btrfs_test_opt(root, SSD_SPREAD)) {
min_bytes = bytes + empty_size;
} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
/*
* we want to do larger allocations when we are
* flushing out the delayed refs, it helps prevent
......@@ -645,14 +648,15 @@ again:
* we haven't filled the empty size and the window is
* very large. reset and try again
*/
if (next->offset - window_start > (bytes + empty_size) * 2) {
if (next->offset - (last->offset + last->bytes) > 128 * 1024 ||
next->offset - window_start > (bytes + empty_size) * 2) {
entry = next;
window_start = entry->offset;
window_free = entry->bytes;
last = entry;
max_extent = 0;
total_retries++;
if (total_retries % 256 == 0) {
if (total_retries % 64 == 0) {
if (min_bytes >= (bytes + empty_size)) {
ret = -ENOSPC;
goto out;
......
......@@ -31,6 +31,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes);
u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group);
int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size);
......
......@@ -19,9 +19,9 @@
#ifndef __HASH__
#define __HASH__
#include "crc32c.h"
#include <linux/crc32c.h>
static inline u64 btrfs_name_hash(const char *name, int len)
{
return btrfs_crc32c((u32)~1, name, len);
return crc32c((u32)~1, name, len);
}
#endif
......@@ -48,7 +48,6 @@
#include "ordered-data.h"
#include "xattr.h"
#include "tree-log.h"
#include "ref-cache.h"
#include "compression.h"
#include "locking.h"
......@@ -369,7 +368,7 @@ again:
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
if (!btrfs_test_flag(inode, NOCOMPRESS) &&
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
btrfs_test_opt(root, COMPRESS)) {
WARN_ON(pages);
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
......@@ -470,7 +469,7 @@ again:
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
btrfs_set_flag(inode, NOCOMPRESS);
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
if (will_compress) {
*num_added += 1;
......@@ -863,7 +862,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_cow->locked_page = locked_page;
async_cow->start = start;
if (btrfs_test_flag(inode, NOCOMPRESS))
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
cur_end = end;
else
cur_end = min(end, start + 512 * 1024 - 1);
......@@ -944,6 +943,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
u64 cow_start;
u64 cur_offset;
u64 extent_end;
u64 extent_offset;
u64 disk_bytenr;
u64 num_bytes;
int extent_type;
......@@ -1005,6 +1005,7 @@ next_slot:
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
extent_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = found_key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
if (extent_end <= start) {
......@@ -1022,9 +1023,10 @@ next_slot:
if (btrfs_extent_readonly(root, disk_bytenr))
goto out_check;
if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
disk_bytenr))
found_key.offset -
extent_offset, disk_bytenr))
goto out_check;
disk_bytenr += btrfs_file_extent_offset(leaf, fi);
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
/*
......@@ -1131,10 +1133,10 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_test_flag(inode, NODATACOW))
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 1, nr_written);
else if (btrfs_test_flag(inode, PREALLOC))
else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
else if (!btrfs_test_opt(root, COMPRESS))
......@@ -1288,7 +1290,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int ret = 0;
int skip_sum;
skip_sum = btrfs_test_flag(inode, NODATASUM);
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret);
......@@ -1489,9 +1491,9 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
root->root_key.objectid,
trans->transid, inode->i_ino, &ins);
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
inode->i_ino, file_pos, &ins);
BUG_ON(ret);
btrfs_free_path(path);
......@@ -1788,7 +1790,8 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
ClearPageChecked(page);
goto good;
}
if (btrfs_test_flag(inode, NODATASUM))
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
return 0;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
......@@ -1956,23 +1959,13 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
* crossing root thing. we store the inode number in the
* offset of the orphan item.
*/
inode = btrfs_iget_locked(root->fs_info->sb,
found_key.offset, root);
if (!inode)
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root);
if (IS_ERR(inode))
break;
if (inode->i_state & I_NEW) {
BTRFS_I(inode)->root = root;
/* have to set the location manually */
BTRFS_I(inode)->location.objectid = inode->i_ino;
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
BTRFS_I(inode)->location.offset = 0;
btrfs_read_locked_inode(inode);
unlock_new_inode(inode);
}
/*
* add this inode to the orphan list so btrfs_orphan_del does
* the proper thing when we hit it
......@@ -2069,7 +2062,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
/*
* read an inode from the btree into the in-memory inode
*/
void btrfs_read_locked_inode(struct inode *inode)
static void btrfs_read_locked_inode(struct inode *inode)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
......@@ -2164,6 +2157,8 @@ void btrfs_read_locked_inode(struct inode *inode)
init_special_inode(inode, inode->i_mode, rdev);
break;
}
btrfs_update_iflags(inode);
return;
make_bad:
......@@ -2599,9 +2594,8 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_file_extent_item *fi;
u64 extent_start = 0;
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
u64 root_gen = 0;
u64 root_owner = 0;
int found_extent;
int del_item;
int pending_del_nr = 0;
......@@ -2716,6 +2710,9 @@ search_again:
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf,
fi);
extent_offset = found_key.offset -
btrfs_file_extent_offset(leaf, fi);
/* FIXME blocksize != 4096 */
num_dec = btrfs_file_extent_num_bytes(leaf, fi);
if (extent_start != 0) {
......@@ -2723,8 +2720,6 @@ search_again:
if (root->ref_cows)
inode_sub_bytes(inode, num_dec);
}
root_gen = btrfs_header_generation(leaf);
root_owner = btrfs_header_owner(leaf);
}
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
/*
......@@ -2768,12 +2763,12 @@ delete:
} else {
break;
}
if (found_extent) {
if (found_extent && root->ref_cows) {
btrfs_set_path_blocking(path);
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes,
leaf->start, root_owner,
root_gen, inode->i_ino, 0);
extent_num_bytes, 0,
btrfs_header_owner(leaf),
inode->i_ino, extent_offset);
BUG_ON(ret);
}
next:
......@@ -3105,6 +3100,45 @@ static int fixup_tree_root_location(struct btrfs_root *root,
return 0;
}
static void inode_tree_add(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_inode *entry;
struct rb_node **p = &root->inode_tree.rb_node;
struct rb_node *parent = NULL;
spin_lock(&root->inode_lock);
while (*p) {
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
if (inode->i_ino < entry->vfs_inode.i_ino)
p = &(*p)->rb_left;
else if (inode->i_ino > entry->vfs_inode.i_ino)
p = &(*p)->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
(I_WILL_FREE | I_FREEING | I_CLEAR)));
break;
}
}
rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
spin_unlock(&root->inode_lock);
}
static void inode_tree_del(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
spin_lock(&root->inode_lock);
rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
spin_unlock(&root->inode_lock);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
}
}
static noinline void init_btrfs_i(struct inode *inode)
{
struct btrfs_inode *bi = BTRFS_I(inode);
......@@ -3130,6 +3164,7 @@ static noinline void init_btrfs_i(struct inode *inode)
inode->i_mapping, GFP_NOFS);
INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
mutex_init(&BTRFS_I(inode)->extent_mutex);
mutex_init(&BTRFS_I(inode)->log_mutex);
......@@ -3152,26 +3187,9 @@ static int btrfs_find_actor(struct inode *inode, void *opaque)
args->root == BTRFS_I(inode)->root;
}
struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
struct btrfs_root *root, int wait)
{
struct inode *inode;
struct btrfs_iget_args args;
args.ino = objectid;
args.root = root;
if (wait) {
inode = ilookup5(s, objectid, btrfs_find_actor,
(void *)&args);
} else {
inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
(void *)&args);
}
return inode;
}
struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
struct btrfs_root *root)
static struct inode *btrfs_iget_locked(struct super_block *s,
u64 objectid,
struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
......@@ -3188,24 +3206,21 @@ struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
* Returns in *is_new if the inode was read from disk
*/
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *is_new)
struct btrfs_root *root)
{
struct inode *inode;
inode = btrfs_iget_locked(s, location->objectid, root);
if (!inode)
return ERR_PTR(-EACCES);
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
btrfs_read_locked_inode(inode);
inode_tree_add(inode);
unlock_new_inode(inode);
if (is_new)
*is_new = 1;
} else {
if (is_new)
*is_new = 0;
}
return inode;
......@@ -3218,7 +3233,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
struct btrfs_root *root = bi->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location;
int ret, new;
int ret;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
......@@ -3236,7 +3251,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
return ERR_PTR(ret);
if (ret > 0)
return ERR_PTR(-ENOENT);
inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
inode = btrfs_iget(dir->i_sb, &location, sub_root);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
......@@ -3574,9 +3589,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_find_block_group(root, 0, alloc_hint, owner);
if ((mode & S_IFREG)) {
if (btrfs_test_opt(root, NODATASUM))
btrfs_set_flag(inode, NODATASUM);
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
if (btrfs_test_opt(root, NODATACOW))
btrfs_set_flag(inode, NODATACOW);
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
}
key[0].objectid = objectid;
......@@ -3630,7 +3645,10 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
location->offset = 0;
btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
btrfs_inherit_iflags(inode, dir);
insert_inode_hash(inode);
inode_tree_add(inode);
return inode;
fail:
if (dir)
......@@ -4683,6 +4701,7 @@ void btrfs_destroy_inode(struct inode *inode)
btrfs_put_ordered_extent(ordered);
}
}
inode_tree_del(inode);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
......@@ -5061,7 +5080,7 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
out:
if (cur_offset > start) {
inode->i_ctime = CURRENT_TIME;
btrfs_set_flag(inode, PREALLOC);
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
cur_offset > i_size_read(inode))
btrfs_i_size_write(inode, cur_offset);
......@@ -5182,7 +5201,7 @@ static int btrfs_set_page_dirty(struct page *page)
static int btrfs_permission(struct inode *inode, int mask)
{
if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
return -EACCES;
return generic_permission(inode, mask, btrfs_check_acl);
}
......
......@@ -50,7 +50,177 @@
#include "volumes.h"
#include "locking.h"
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
{
if (S_ISDIR(mode))
return flags;
else if (S_ISREG(mode))
return flags & ~FS_DIRSYNC_FL;
else
return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
}
/*
* Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl.
*/
static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
{
unsigned int iflags = 0;
if (flags & BTRFS_INODE_SYNC)
iflags |= FS_SYNC_FL;
if (flags & BTRFS_INODE_IMMUTABLE)
iflags |= FS_IMMUTABLE_FL;
if (flags & BTRFS_INODE_APPEND)
iflags |= FS_APPEND_FL;
if (flags & BTRFS_INODE_NODUMP)
iflags |= FS_NODUMP_FL;
if (flags & BTRFS_INODE_NOATIME)
iflags |= FS_NOATIME_FL;
if (flags & BTRFS_INODE_DIRSYNC)
iflags |= FS_DIRSYNC_FL;
return iflags;
}
/*
* Update inode->i_flags based on the btrfs internal flags.
*/
void btrfs_update_iflags(struct inode *inode)
{
struct btrfs_inode *ip = BTRFS_I(inode);
inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (ip->flags & BTRFS_INODE_SYNC)
inode->i_flags |= S_SYNC;
if (ip->flags & BTRFS_INODE_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE;
if (ip->flags & BTRFS_INODE_APPEND)
inode->i_flags |= S_APPEND;
if (ip->flags & BTRFS_INODE_NOATIME)
inode->i_flags |= S_NOATIME;
if (ip->flags & BTRFS_INODE_DIRSYNC)
inode->i_flags |= S_DIRSYNC;
}
/*
* Inherit flags from the parent inode.
*
* Unlike extN we don't have any flags we don't want to inherit currently.
*/
void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
{
unsigned int flags;
if (!dir)
return;
flags = BTRFS_I(dir)->flags;
if (S_ISREG(inode->i_mode))
flags &= ~BTRFS_INODE_DIRSYNC;
else if (!S_ISDIR(inode->i_mode))
flags &= (BTRFS_INODE_NODUMP | BTRFS_INODE_NOATIME);
BTRFS_I(inode)->flags = flags;
btrfs_update_iflags(inode);
}
static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
{
struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode);
unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
if (copy_to_user(arg, &flags, sizeof(flags)))
return -EFAULT;
return 0;
}
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct btrfs_inode *ip = BTRFS_I(inode);
struct btrfs_root *root = ip->root;
struct btrfs_trans_handle *trans;
unsigned int flags, oldflags;
int ret;
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
FS_NOATIME_FL | FS_NODUMP_FL | \
FS_SYNC_FL | FS_DIRSYNC_FL))
return -EOPNOTSUPP;
if (!is_owner_or_cap(inode))
return -EACCES;
mutex_lock(&inode->i_mutex);
flags = btrfs_mask_flags(inode->i_mode, flags);
oldflags = btrfs_flags_to_ioctl(ip->flags);
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
ret = -EPERM;
goto out_unlock;
}
}
ret = mnt_want_write(file->f_path.mnt);
if (ret)
goto out_unlock;
if (flags & FS_SYNC_FL)
ip->flags |= BTRFS_INODE_SYNC;
else
ip->flags &= ~BTRFS_INODE_SYNC;
if (flags & FS_IMMUTABLE_FL)
ip->flags |= BTRFS_INODE_IMMUTABLE;
else
ip->flags &= ~BTRFS_INODE_IMMUTABLE;
if (flags & FS_APPEND_FL)
ip->flags |= BTRFS_INODE_APPEND;
else
ip->flags &= ~BTRFS_INODE_APPEND;
if (flags & FS_NODUMP_FL)
ip->flags |= BTRFS_INODE_NODUMP;
else
ip->flags &= ~BTRFS_INODE_NODUMP;
if (flags & FS_NOATIME_FL)
ip->flags |= BTRFS_INODE_NOATIME;
else
ip->flags &= ~BTRFS_INODE_NOATIME;
if (flags & FS_DIRSYNC_FL)
ip->flags |= BTRFS_INODE_DIRSYNC;
else
ip->flags &= ~BTRFS_INODE_DIRSYNC;
trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans);
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
btrfs_update_iflags(inode);
inode->i_ctime = CURRENT_TIME;
btrfs_end_transaction(trans, root);
mnt_drop_write(file->f_path.mnt);
out_unlock:
mutex_unlock(&inode->i_mutex);
return 0;
}
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
{
struct inode *inode = file->f_path.dentry->d_inode;
return put_user(inode->i_generation, arg);
}
static noinline int create_subvol(struct btrfs_root *root,
struct dentry *dentry,
......@@ -82,22 +252,25 @@ static noinline int create_subvol(struct btrfs_root *root,
if (ret)
goto fail;
leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
objectid, trans->transid, 0, 0, 0);
leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
0, objectid, NULL, 0, 0, 0);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
goto fail;
}
btrfs_set_header_nritems(leaf, 0);
btrfs_set_header_level(leaf, 0);
memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(leaf, leaf->start);
btrfs_set_header_generation(leaf, trans->transid);
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(leaf, objectid);
write_extent_buffer(leaf, root->fs_info->fsid,
(unsigned long)btrfs_header_fsid(leaf),
BTRFS_FSID_SIZE);
write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
(unsigned long)btrfs_header_chunk_tree_uuid(leaf),
BTRFS_UUID_SIZE);
btrfs_mark_buffer_dirty(leaf);
inode_item = &root_item.inode;
......@@ -125,7 +298,7 @@ static noinline int create_subvol(struct btrfs_root *root,
btrfs_set_root_dirid(&root_item, new_dirid);
key.objectid = objectid;
key.offset = 1;
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
&root_item);
......@@ -911,10 +1084,10 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (disko) {
inode_add_bytes(inode, datal);
ret = btrfs_inc_extent_ref(trans, root,
disko, diskl, leaf->start,
root->root_key.objectid,
trans->transid,
inode->i_ino);
disko, diskl, 0,
root->root_key.objectid,
inode->i_ino,
new_key.offset - datao);
BUG_ON(ret);
}
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
......@@ -1074,6 +1247,12 @@ long btrfs_ioctl(struct file *file, unsigned int
void __user *argp = (void __user *)arg;
switch (cmd) {
case FS_IOC_GETFLAGS:
return btrfs_ioctl_getflags(file, argp);
case FS_IOC_SETFLAGS:
return btrfs_ioctl_setflags(file, argp);
case FS_IOC_GETVERSION:
return btrfs_ioctl_getversion(file, argp);
case BTRFS_IOC_SNAP_CREATE:
return btrfs_ioctl_snap_create(file, argp, 0);
case BTRFS_IOC_SUBVOL_CREATE:
......
......@@ -45,22 +45,132 @@ static void print_dev_item(struct extent_buffer *eb,
(unsigned long long)btrfs_device_total_bytes(eb, dev_item),
(unsigned long long)btrfs_device_bytes_used(eb, dev_item));
}
static void print_extent_data_ref(struct extent_buffer *eb,
struct btrfs_extent_data_ref *ref)
{
printk(KERN_INFO "\t\textent data backref root %llu "
"objectid %llu offset %llu count %u\n",
(unsigned long long)btrfs_extent_data_ref_root(eb, ref),
(unsigned long long)btrfs_extent_data_ref_objectid(eb, ref),
(unsigned long long)btrfs_extent_data_ref_offset(eb, ref),
btrfs_extent_data_ref_count(eb, ref));
}
static void print_extent_item(struct extent_buffer *eb, int slot)
{
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct btrfs_disk_key key;
unsigned long end;
unsigned long ptr;
int type;
u32 item_size = btrfs_item_size_nr(eb, slot);
u64 flags;
u64 offset;
if (item_size < sizeof(*ei)) {
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
struct btrfs_extent_item_v0 *ei0;
BUG_ON(item_size != sizeof(*ei0));
ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
printk(KERN_INFO "\t\textent refs %u\n",
btrfs_extent_refs_v0(eb, ei0));
return;
#else
BUG();
#endif
}
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
flags = btrfs_extent_flags(eb, ei);
printk(KERN_INFO "\t\textent refs %llu gen %llu flags %llu\n",
(unsigned long long)btrfs_extent_refs(eb, ei),
(unsigned long long)btrfs_extent_generation(eb, ei),
(unsigned long long)flags);
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
struct btrfs_tree_block_info *info;
info = (struct btrfs_tree_block_info *)(ei + 1);
btrfs_tree_block_key(eb, info, &key);
printk(KERN_INFO "\t\ttree block key (%llu %x %llu) "
"level %d\n",
(unsigned long long)btrfs_disk_key_objectid(&key),
key.type,
(unsigned long long)btrfs_disk_key_offset(&key),
btrfs_tree_block_level(eb, info));
iref = (struct btrfs_extent_inline_ref *)(info + 1);
} else {
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
}
ptr = (unsigned long)iref;
end = (unsigned long)ei + item_size;
while (ptr < end) {
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_extent_inline_ref_type(eb, iref);
offset = btrfs_extent_inline_ref_offset(eb, iref);
switch (type) {
case BTRFS_TREE_BLOCK_REF_KEY:
printk(KERN_INFO "\t\ttree block backref "
"root %llu\n", (unsigned long long)offset);
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
printk(KERN_INFO "\t\tshared block backref "
"parent %llu\n", (unsigned long long)offset);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
print_extent_data_ref(eb, dref);
break;
case BTRFS_SHARED_DATA_REF_KEY:
sref = (struct btrfs_shared_data_ref *)(iref + 1);
printk(KERN_INFO "\t\tshared data backref "
"parent %llu count %u\n",
(unsigned long long)offset,
btrfs_shared_data_ref_count(eb, sref));
break;
default:
BUG();
}
ptr += btrfs_extent_inline_ref_size(type);
}
WARN_ON(ptr > end);
}
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
static void print_extent_ref_v0(struct extent_buffer *eb, int slot)
{
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0);
printk("\t\textent back ref root %llu gen %llu "
"owner %llu num_refs %lu\n",
(unsigned long long)btrfs_ref_root_v0(eb, ref0),
(unsigned long long)btrfs_ref_generation_v0(eb, ref0),
(unsigned long long)btrfs_ref_objectid_v0(eb, ref0),
(unsigned long)btrfs_ref_count_v0(eb, ref0));
}
#endif
void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
{
int i;
u32 type;
u32 nr = btrfs_header_nritems(l);
struct btrfs_item *item;
struct btrfs_extent_item *ei;
struct btrfs_root_item *ri;
struct btrfs_dir_item *di;
struct btrfs_inode_item *ii;
struct btrfs_block_group_item *bi;
struct btrfs_file_extent_item *fi;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct btrfs_dev_extent *dev_extent;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_extent_ref *ref;
struct btrfs_dev_extent *dev_extent;
u32 type;
printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n",
(unsigned long long)btrfs_header_bytenr(l), nr,
......@@ -100,20 +210,25 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
btrfs_disk_root_refs(l, ri));
break;
case BTRFS_EXTENT_ITEM_KEY:
ei = btrfs_item_ptr(l, i, struct btrfs_extent_item);
printk(KERN_INFO "\t\textent data refs %u\n",
btrfs_extent_refs(l, ei));
break;
case BTRFS_EXTENT_REF_KEY:
ref = btrfs_item_ptr(l, i, struct btrfs_extent_ref);
printk(KERN_INFO "\t\textent back ref root %llu "
"gen %llu owner %llu num_refs %lu\n",
(unsigned long long)btrfs_ref_root(l, ref),
(unsigned long long)btrfs_ref_generation(l, ref),
(unsigned long long)btrfs_ref_objectid(l, ref),
(unsigned long)btrfs_ref_num_refs(l, ref));
print_extent_item(l, i);
break;
case BTRFS_TREE_BLOCK_REF_KEY:
printk(KERN_INFO "\t\ttree block backref\n");
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
printk(KERN_INFO "\t\tshared block backref\n");
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = btrfs_item_ptr(l, i,
struct btrfs_extent_data_ref);
print_extent_data_ref(l, dref);
break;
case BTRFS_SHARED_DATA_REF_KEY:
sref = btrfs_item_ptr(l, i,
struct btrfs_shared_data_ref);
printk(KERN_INFO "\t\tshared data backref count %u\n",
btrfs_shared_data_ref_count(l, sref));
break;
case BTRFS_EXTENT_DATA_KEY:
fi = btrfs_item_ptr(l, i,
struct btrfs_file_extent_item);
......@@ -139,6 +254,12 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
(unsigned long long)
btrfs_file_extent_ram_bytes(l, fi));
break;
case BTRFS_EXTENT_REF_V0_KEY:
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
print_extent_ref_v0(l, i);
#else
BUG();
#endif
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
struct btrfs_block_group_item);
......
此差异已折叠。
......@@ -111,6 +111,15 @@ out:
return ret;
}
int btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node)
{
btrfs_set_root_bytenr(item, node->start);
btrfs_set_root_level(item, btrfs_header_level(node));
btrfs_set_root_generation(item, btrfs_header_generation(node));
return 0;
}
/*
* copy the data in 'item' into the btree
*/
......@@ -164,8 +173,7 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
* offset lower than the latest root. They need to be queued for deletion to
* finish what was happening when we crashed.
*/
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid,
struct btrfs_root *latest)
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid)
{
struct btrfs_root *dead_root;
struct btrfs_item *item;
......@@ -227,10 +235,7 @@ again:
goto err;
}
if (objectid == BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_add_dead_reloc_root(dead_root);
else
ret = btrfs_add_dead_root(dead_root, latest);
ret = btrfs_add_dead_root(dead_root);
if (ret)
goto err;
goto again;
......
......@@ -52,7 +52,6 @@
#include "export.h"
#include "compression.h"
static struct super_operations btrfs_super_ops;
static void btrfs_put_super(struct super_block *sb)
......@@ -67,8 +66,8 @@ static void btrfs_put_super(struct super_block *sb)
enum {
Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow,
Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
Opt_ssd, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_notreelog,
Opt_ratio, Opt_flushoncommit, Opt_err,
Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl,
Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_err,
};
static match_table_t tokens = {
......@@ -84,6 +83,8 @@ static match_table_t tokens = {
{Opt_thread_pool, "thread_pool=%d"},
{Opt_compress, "compress"},
{Opt_ssd, "ssd"},
{Opt_ssd_spread, "ssd_spread"},
{Opt_nossd, "nossd"},
{Opt_noacl, "noacl"},
{Opt_notreelog, "notreelog"},
{Opt_flushoncommit, "flushoncommit"},
......@@ -158,7 +159,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
*/
break;
case Opt_nodatasum:
printk(KERN_INFO "btrfs: setting nodatacsum\n");
printk(KERN_INFO "btrfs: setting nodatasum\n");
btrfs_set_opt(info->mount_opt, NODATASUM);
break;
case Opt_nodatacow:
......@@ -174,6 +175,19 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
btrfs_set_opt(info->mount_opt, SSD);
break;
case Opt_ssd_spread:
printk(KERN_INFO "btrfs: use spread ssd "
"allocation scheme\n");
btrfs_set_opt(info->mount_opt, SSD);
btrfs_set_opt(info->mount_opt, SSD_SPREAD);
break;
case Opt_nossd:
printk(KERN_INFO "btrfs: not using ssd allocation "
"scheme\n");
btrfs_set_opt(info->mount_opt, NOSSD);
btrfs_clear_opt(info->mount_opt, SSD);
btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
break;
case Opt_nobarrier:
printk(KERN_INFO "btrfs: turning off barriers\n");
btrfs_set_opt(info->mount_opt, NOBARRIER);
......@@ -322,7 +336,7 @@ static int btrfs_fill_super(struct super_block *sb,
struct dentry *root_dentry;
struct btrfs_super_block *disk_super;
struct btrfs_root *tree_root;
struct btrfs_inode *bi;
struct btrfs_key key;
int err;
sb->s_maxbytes = MAX_LFS_FILESIZE;
......@@ -341,23 +355,15 @@ static int btrfs_fill_super(struct super_block *sb,
}
sb->s_fs_info = tree_root;
disk_super = &tree_root->fs_info->super_copy;
inode = btrfs_iget_locked(sb, BTRFS_FIRST_FREE_OBJECTID,
tree_root->fs_info->fs_root);
bi = BTRFS_I(inode);
bi->location.objectid = inode->i_ino;
bi->location.offset = 0;
bi->root = tree_root->fs_info->fs_root;
btrfs_set_key_type(&bi->location, BTRFS_INODE_ITEM_KEY);
if (!inode) {
err = -ENOMEM;
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto fail_close;
}
if (inode->i_state & I_NEW) {
btrfs_read_locked_inode(inode);
unlock_new_inode(inode);
}
root_dentry = d_alloc_root(inode);
if (!root_dentry) {
......@@ -433,7 +439,11 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
if (btrfs_test_opt(root, COMPRESS))
seq_puts(seq, ",compress");
if (btrfs_test_opt(root, SSD))
if (btrfs_test_opt(root, NOSSD))
seq_puts(seq, ",nossd");
if (btrfs_test_opt(root, SSD_SPREAD))
seq_puts(seq, ",ssd_spread");
else if (btrfs_test_opt(root, SSD))
seq_puts(seq, ",ssd");
if (btrfs_test_opt(root, NOTREELOG))
seq_puts(seq, ",notreelog");
......@@ -584,7 +594,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
if (btrfs_super_log_root(&root->fs_info->super_copy) != 0)
return -EINVAL;
ret = btrfs_cleanup_reloc_trees(root);
/* recover relocation */
ret = btrfs_recover_relocation(root);
WARN_ON(ret);
ret = btrfs_cleanup_fs_roots(root->fs_info);
......
此差异已折叠。
......@@ -62,12 +62,6 @@ struct btrfs_pending_snapshot {
struct list_head list;
};
struct btrfs_dirty_root {
struct list_head list;
struct btrfs_root *root;
struct btrfs_root *latest_root;
};
static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans,
struct inode *inode)
{
......@@ -100,7 +94,8 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest);
int btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_drop_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
int btrfs_clean_old_snapshots(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
......@@ -108,7 +103,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_throttle(struct btrfs_root *root);
int btrfs_record_root_in_trans(struct btrfs_root *root);
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages);
#endif
此差异已折叠。
此差异已折叠。
......@@ -96,7 +96,12 @@ struct btrfs_fs_devices {
u64 rw_devices;
u64 total_rw_bytes;
struct block_device *latest_bdev;
/* all of the devices in the FS */
/* all of the devices in the FS, protected by a mutex
* so we can safely walk it to write out the supers without
* worrying about add/remove by the multi-device code
*/
struct mutex device_list_mutex;
struct list_head devices;
/* devices not currently being allocated */
......@@ -107,6 +112,11 @@ struct btrfs_fs_devices {
int seeding;
int opened;
/* set when we find or add a device that doesn't have the
* nonrot flag set
*/
int rotating;
};
struct btrfs_bio_stripe {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册