提交 f29135b5 编写于 作者: L Linus Torvalds

Merge branch 'for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs updates from Chris Mason:
 "This is a big variety of fixes and cleanups.

  Liu Bo continues to fixup fuzzer related problems, and some of Josef's
  cleanups are prep for his bigger extent buffer changes (slated for
  v4.10)"

* 'for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (39 commits)
  Revert "btrfs: let btrfs_delete_unused_bgs() to clean relocated bgs"
  Btrfs: remove unnecessary btrfs_mark_buffer_dirty in split_leaf
  Btrfs: don't BUG() during drop snapshot
  btrfs: fix btrfs_no_printk stub helper
  Btrfs: memset to avoid stale content in btree leaf
  btrfs: parent_start initialization cleanup
  btrfs: Remove already completed TODO comment
  btrfs: Do not reassign count in btrfs_run_delayed_refs
  btrfs: fix a possible umount deadlock
  Btrfs: fix memory leak in do_walk_down
  btrfs: btrfs_debug should consume fs_info when DEBUG is not defined
  btrfs: convert send's verbose_printk to btrfs_debug
  btrfs: convert pr_* to btrfs_* where possible
  btrfs: convert printk(KERN_* to use pr_* calls
  btrfs: unsplit printed strings
  btrfs: clean the old superblocks before freeing the device
  Btrfs: kill BUG_ON in run_delayed_tree_ref
  Btrfs: don't leak reloc root nodes on error
  btrfs: squash lines for simple wrapper functions
  Btrfs: improve check_node to avoid reading corrupted nodes
  ...
......@@ -17,6 +17,7 @@
*/
#include <linux/vmalloc.h>
#include <linux/rbtree.h>
#include "ctree.h"
#include "disk-io.h"
#include "backref.h"
......@@ -34,6 +35,265 @@ struct extent_inode_elem {
struct extent_inode_elem *next;
};
/*
* ref_root is used as the root of the ref tree that hold a collection
* of unique references.
*/
struct ref_root {
struct rb_root rb_root;
/*
* The unique_refs represents the number of ref_nodes with a positive
* count stored in the tree. Even if a ref_node (the count is greater
* than one) is added, the unique_refs will only increase by one.
*/
unsigned int unique_refs;
};
/* ref_node is used to store a unique reference to the ref tree. */
struct ref_node {
struct rb_node rb_node;
/* For NORMAL_REF, otherwise all these fields should be set to 0 */
u64 root_id;
u64 object_id;
u64 offset;
/* For SHARED_REF, otherwise parent field should be set to 0 */
u64 parent;
/* Ref to the ref_mod of btrfs_delayed_ref_node */
int ref_mod;
};
/* Dynamically allocate and initialize a ref_root */
static struct ref_root *ref_root_alloc(void)
{
struct ref_root *ref_tree;
ref_tree = kmalloc(sizeof(*ref_tree), GFP_NOFS);
if (!ref_tree)
return NULL;
ref_tree->rb_root = RB_ROOT;
ref_tree->unique_refs = 0;
return ref_tree;
}
/* Free all nodes in the ref tree, and reinit ref_root */
static void ref_root_fini(struct ref_root *ref_tree)
{
struct ref_node *node;
struct rb_node *next;
while ((next = rb_first(&ref_tree->rb_root)) != NULL) {
node = rb_entry(next, struct ref_node, rb_node);
rb_erase(next, &ref_tree->rb_root);
kfree(node);
}
ref_tree->rb_root = RB_ROOT;
ref_tree->unique_refs = 0;
}
static void ref_root_free(struct ref_root *ref_tree)
{
if (!ref_tree)
return;
ref_root_fini(ref_tree);
kfree(ref_tree);
}
/*
* Compare ref_node with (root_id, object_id, offset, parent)
*
* The function compares two ref_node a and b. It returns an integer less
* than, equal to, or greater than zero , respectively, to be less than, to
* equal, or be greater than b.
*/
static int ref_node_cmp(struct ref_node *a, struct ref_node *b)
{
if (a->root_id < b->root_id)
return -1;
else if (a->root_id > b->root_id)
return 1;
if (a->object_id < b->object_id)
return -1;
else if (a->object_id > b->object_id)
return 1;
if (a->offset < b->offset)
return -1;
else if (a->offset > b->offset)
return 1;
if (a->parent < b->parent)
return -1;
else if (a->parent > b->parent)
return 1;
return 0;
}
/*
* Search ref_node with (root_id, object_id, offset, parent) in the tree
*
* if found, the pointer of the ref_node will be returned;
* if not found, NULL will be returned and pos will point to the rb_node for
* insert, pos_parent will point to pos'parent for insert;
*/
static struct ref_node *__ref_tree_search(struct ref_root *ref_tree,
struct rb_node ***pos,
struct rb_node **pos_parent,
u64 root_id, u64 object_id,
u64 offset, u64 parent)
{
struct ref_node *cur = NULL;
struct ref_node entry;
int ret;
entry.root_id = root_id;
entry.object_id = object_id;
entry.offset = offset;
entry.parent = parent;
*pos = &ref_tree->rb_root.rb_node;
while (**pos) {
*pos_parent = **pos;
cur = rb_entry(*pos_parent, struct ref_node, rb_node);
ret = ref_node_cmp(cur, &entry);
if (ret > 0)
*pos = &(**pos)->rb_left;
else if (ret < 0)
*pos = &(**pos)->rb_right;
else
return cur;
}
return NULL;
}
/*
* Insert a ref_node to the ref tree
* @pos used for specifiy the position to insert
* @pos_parent for specifiy pos's parent
*
* success, return 0;
* ref_node already exists, return -EEXIST;
*/
static int ref_tree_insert(struct ref_root *ref_tree, struct rb_node **pos,
struct rb_node *pos_parent, struct ref_node *ins)
{
struct rb_node **p = NULL;
struct rb_node *parent = NULL;
struct ref_node *cur = NULL;
if (!pos) {
cur = __ref_tree_search(ref_tree, &p, &parent, ins->root_id,
ins->object_id, ins->offset,
ins->parent);
if (cur)
return -EEXIST;
} else {
p = pos;
parent = pos_parent;
}
rb_link_node(&ins->rb_node, parent, p);
rb_insert_color(&ins->rb_node, &ref_tree->rb_root);
return 0;
}
/* Erase and free ref_node, caller should update ref_root->unique_refs */
static void ref_tree_remove(struct ref_root *ref_tree, struct ref_node *node)
{
rb_erase(&node->rb_node, &ref_tree->rb_root);
kfree(node);
}
/*
* Update ref_root->unique_refs
*
* Call __ref_tree_search
* 1. if ref_node doesn't exist, ref_tree_insert this node, and update
* ref_root->unique_refs:
* if ref_node->ref_mod > 0, ref_root->unique_refs++;
* if ref_node->ref_mod < 0, do noting;
*
* 2. if ref_node is found, then get origin ref_node->ref_mod, and update
* ref_node->ref_mod.
* if ref_node->ref_mod is equal to 0,then call ref_tree_remove
*
* according to origin_mod and new_mod, update ref_root->items
* +----------------+--------------+-------------+
* | |new_count <= 0|new_count > 0|
* +----------------+--------------+-------------+
* |origin_count < 0| 0 | 1 |
* +----------------+--------------+-------------+
* |origin_count > 0| -1 | 0 |
* +----------------+--------------+-------------+
*
* In case of allocation failure, -ENOMEM is returned and the ref_tree stays
* unaltered.
* Success, return 0
*/
static int ref_tree_add(struct ref_root *ref_tree, u64 root_id, u64 object_id,
u64 offset, u64 parent, int count)
{
struct ref_node *node = NULL;
struct rb_node **pos = NULL;
struct rb_node *pos_parent = NULL;
int origin_count;
int ret;
if (!count)
return 0;
node = __ref_tree_search(ref_tree, &pos, &pos_parent, root_id,
object_id, offset, parent);
if (node == NULL) {
node = kmalloc(sizeof(*node), GFP_NOFS);
if (!node)
return -ENOMEM;
node->root_id = root_id;
node->object_id = object_id;
node->offset = offset;
node->parent = parent;
node->ref_mod = count;
ret = ref_tree_insert(ref_tree, pos, pos_parent, node);
ASSERT(!ret);
if (ret) {
kfree(node);
return ret;
}
ref_tree->unique_refs += node->ref_mod > 0 ? 1 : 0;
return 0;
}
origin_count = node->ref_mod;
node->ref_mod += count;
if (node->ref_mod > 0)
ref_tree->unique_refs += origin_count > 0 ? 0 : 1;
else if (node->ref_mod <= 0)
ref_tree->unique_refs += origin_count > 0 ? -1 : 0;
if (!node->ref_mod)
ref_tree_remove(ref_tree, node);
return 0;
}
static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
struct btrfs_file_extent_item *fi,
u64 extent_item_pos,
......@@ -390,8 +650,8 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
/* root node has been locked, we can release @subvol_srcu safely here */
srcu_read_unlock(&fs_info->subvol_srcu, index);
pr_debug("search slot in root %llu (level %d, ref count %d) returned "
"%d for key (%llu %u %llu)\n",
btrfs_debug(fs_info,
"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
ref->root_id, level, ref->count, ret,
ref->key_for_search.objectid, ref->key_for_search.type,
ref->key_for_search.offset);
......@@ -700,6 +960,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
static int __add_inline_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 bytenr,
int *info_level, struct list_head *prefs,
struct ref_root *ref_tree,
u64 *total_refs, u64 inum)
{
int ret = 0;
......@@ -767,6 +1028,13 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
count = btrfs_shared_data_ref_count(leaf, sdref);
ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
bytenr, count, GFP_NOFS);
if (ref_tree) {
if (!ret)
ret = ref_tree_add(ref_tree, 0, 0, 0,
bytenr, count);
if (!ret && ref_tree->unique_refs > 1)
ret = BACKREF_FOUND_SHARED;
}
break;
}
case BTRFS_TREE_BLOCK_REF_KEY:
......@@ -794,6 +1062,15 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
root = btrfs_extent_data_ref_root(leaf, dref);
ret = __add_prelim_ref(prefs, root, &key, 0, 0,
bytenr, count, GFP_NOFS);
if (ref_tree) {
if (!ret)
ret = ref_tree_add(ref_tree, root,
key.objectid,
key.offset, 0,
count);
if (!ret && ref_tree->unique_refs > 1)
ret = BACKREF_FOUND_SHARED;
}
break;
}
default:
......@@ -812,7 +1089,8 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
*/
static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 bytenr,
int info_level, struct list_head *prefs, u64 inum)
int info_level, struct list_head *prefs,
struct ref_root *ref_tree, u64 inum)
{
struct btrfs_root *extent_root = fs_info->extent_root;
int ret;
......@@ -855,6 +1133,13 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
count = btrfs_shared_data_ref_count(leaf, sdref);
ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
bytenr, count, GFP_NOFS);
if (ref_tree) {
if (!ret)
ret = ref_tree_add(ref_tree, 0, 0, 0,
bytenr, count);
if (!ret && ref_tree->unique_refs > 1)
ret = BACKREF_FOUND_SHARED;
}
break;
}
case BTRFS_TREE_BLOCK_REF_KEY:
......@@ -883,6 +1168,15 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
root = btrfs_extent_data_ref_root(leaf, dref);
ret = __add_prelim_ref(prefs, root, &key, 0, 0,
bytenr, count, GFP_NOFS);
if (ref_tree) {
if (!ret)
ret = ref_tree_add(ref_tree, root,
key.objectid,
key.offset, 0,
count);
if (!ret && ref_tree->unique_refs > 1)
ret = BACKREF_FOUND_SHARED;
}
break;
}
default:
......@@ -909,13 +1203,16 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
* commit root.
* The special case is for qgroup to search roots in commit_transaction().
*
* If check_shared is set to 1, any extent has more than one ref item, will
* be returned BACKREF_FOUND_SHARED immediately.
*
* FIXME some caching might speed things up
*/
static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist *refs,
struct ulist *roots, const u64 *extent_item_pos,
u64 root_objectid, u64 inum)
u64 root_objectid, u64 inum, int check_shared)
{
struct btrfs_key key;
struct btrfs_path *path;
......@@ -927,6 +1224,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct list_head prefs;
struct __prelim_ref *ref;
struct extent_inode_elem *eie = NULL;
struct ref_root *ref_tree = NULL;
u64 total_refs = 0;
INIT_LIST_HEAD(&prefs);
......@@ -958,6 +1256,18 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
again:
head = NULL;
if (check_shared) {
if (!ref_tree) {
ref_tree = ref_root_alloc();
if (!ref_tree) {
ret = -ENOMEM;
goto out;
}
} else {
ref_root_fini(ref_tree);
}
}
ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
......@@ -1002,6 +1312,36 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
} else {
spin_unlock(&delayed_refs->lock);
}
if (check_shared && !list_empty(&prefs_delayed)) {
/*
* Add all delay_ref to the ref_tree and check if there
* are multiple ref items added.
*/
list_for_each_entry(ref, &prefs_delayed, list) {
if (ref->key_for_search.type) {
ret = ref_tree_add(ref_tree,
ref->root_id,
ref->key_for_search.objectid,
ref->key_for_search.offset,
0, ref->count);
if (ret)
goto out;
} else {
ret = ref_tree_add(ref_tree, 0, 0, 0,
ref->parent, ref->count);
if (ret)
goto out;
}
}
if (ref_tree->unique_refs > 1) {
ret = BACKREF_FOUND_SHARED;
goto out;
}
}
}
if (path->slots[0]) {
......@@ -1017,11 +1357,13 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
key.type == BTRFS_METADATA_ITEM_KEY)) {
ret = __add_inline_refs(fs_info, path, bytenr,
&info_level, &prefs,
&total_refs, inum);
ref_tree, &total_refs,
inum);
if (ret)
goto out;
ret = __add_keyed_refs(fs_info, path, bytenr,
info_level, &prefs, inum);
info_level, &prefs,
ref_tree, inum);
if (ret)
goto out;
}
......@@ -1106,6 +1448,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(path);
ref_root_free(ref_tree);
while (!list_empty(&prefs)) {
ref = list_first_entry(&prefs, struct __prelim_ref, list);
list_del(&ref->list);
......@@ -1159,8 +1502,8 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
if (!*leafs)
return -ENOMEM;
ret = find_parent_nodes(trans, fs_info, bytenr,
time_seq, *leafs, NULL, extent_item_pos, 0, 0);
ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
*leafs, NULL, extent_item_pos, 0, 0, 0);
if (ret < 0 && ret != -ENOENT) {
free_leaf_list(*leafs);
return ret;
......@@ -1202,8 +1545,8 @@ static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
ULIST_ITER_INIT(&uiter);
while (1) {
ret = find_parent_nodes(trans, fs_info, bytenr,
time_seq, tmp, *roots, NULL, 0, 0);
ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
tmp, *roots, NULL, 0, 0, 0);
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
ulist_free(*roots);
......@@ -1273,7 +1616,7 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
ULIST_ITER_INIT(&uiter);
while (1) {
ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
roots, NULL, root_objectid, inum);
roots, NULL, root_objectid, inum, 1);
if (ret == BACKREF_FOUND_SHARED) {
/* this is the only condition under which we return 1 */
ret = 1;
......@@ -1492,7 +1835,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
if (found_key->objectid > logical ||
found_key->objectid + size <= logical) {
pr_debug("logical %llu is not within any extent\n", logical);
btrfs_debug(fs_info,
"logical %llu is not within any extent", logical);
return -ENOENT;
}
......@@ -1503,8 +1847,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
flags = btrfs_extent_flags(eb, ei);
pr_debug("logical %llu is at position %llu within the extent (%llu "
"EXTENT_ITEM %llu) flags %#llx size %u\n",
btrfs_debug(fs_info,
"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
logical, logical - found_key->objectid, found_key->objectid,
found_key->offset, flags, item_size);
......@@ -1625,21 +1969,24 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
return 0;
}
static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
u64 root, u64 extent_item_objectid,
iterate_extent_inodes_t *iterate, void *ctx)
static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
struct extent_inode_elem *inode_list,
u64 root, u64 extent_item_objectid,
iterate_extent_inodes_t *iterate, void *ctx)
{
struct extent_inode_elem *eie;
int ret = 0;
for (eie = inode_list; eie; eie = eie->next) {
pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
"root %llu\n", extent_item_objectid,
eie->inum, eie->offset, root);
btrfs_debug(fs_info,
"ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
extent_item_objectid, eie->inum,
eie->offset, root);
ret = iterate(eie->inum, eie->offset, root, ctx);
if (ret) {
pr_debug("stopping iteration for %llu due to ret=%d\n",
extent_item_objectid, ret);
btrfs_debug(fs_info,
"stopping iteration for %llu due to ret=%d",
extent_item_objectid, ret);
break;
}
}
......@@ -1667,7 +2014,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
struct ulist_iterator ref_uiter;
struct ulist_iterator root_uiter;
pr_debug("resolving all inodes for extent %llu\n",
btrfs_debug(fs_info, "resolving all inodes for extent %llu",
extent_item_objectid);
if (!search_commit_root) {
......@@ -1693,10 +2040,12 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
break;
ULIST_ITER_INIT(&root_uiter);
while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
pr_debug("root %llu references leaf %llu, data list "
"%#llx\n", root_node->val, ref_node->val,
ref_node->aux);
ret = iterate_leaf_refs((struct extent_inode_elem *)
btrfs_debug(fs_info,
"root %llu references leaf %llu, data list %#llx",
root_node->val, ref_node->val,
ref_node->aux);
ret = iterate_leaf_refs(fs_info,
(struct extent_inode_elem *)
(uintptr_t)ref_node->aux,
root_node->val,
extent_item_objectid,
......@@ -1792,9 +2141,9 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
name_len = btrfs_inode_ref_name_len(eb, iref);
/* path must be released before calling iterate()! */
pr_debug("following ref at offset %u for inode %llu in "
"tree %llu\n", cur, found_key.objectid,
fs_root->objectid);
btrfs_debug(fs_root->fs_info,
"following ref at offset %u for inode %llu in tree %llu",
cur, found_key.objectid, fs_root->objectid);
ret = iterate(parent, name_len,
(unsigned long)(iref + 1), eb, ctx);
if (ret)
......
......@@ -44,17 +44,6 @@
#define BTRFS_INODE_IN_DELALLOC_LIST 9
#define BTRFS_INODE_READDIO_NEED_LOCK 10
#define BTRFS_INODE_HAS_PROPS 11
/*
* The following 3 bits are meant only for the btree inode.
* When any of them is set, it means an error happened while writing an
* extent buffer belonging to:
* 1) a non-log btree
* 2) a log btree and first log sub-transaction
* 3) a log btree and second log sub-transaction
*/
#define BTRFS_INODE_BTREE_ERR 12
#define BTRFS_INODE_BTREE_LOG1_ERR 13
#define BTRFS_INODE_BTREE_LOG2_ERR 14
/* in memory btrfs inode */
struct btrfs_inode {
......
此差异已折叠。
......@@ -783,8 +783,7 @@ void __init btrfs_init_compress(void)
*/
workspace = btrfs_compress_op[i]->alloc_workspace();
if (IS_ERR(workspace)) {
printk(KERN_WARNING
"BTRFS: cannot preallocate compression workspace, will try later");
pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
} else {
atomic_set(&btrfs_comp_ws[i].total_ws, 1);
btrfs_comp_ws[i].free_ws = 1;
......@@ -854,8 +853,7 @@ static struct list_head *find_workspace(int type)
/* no burst */ 1);
if (__ratelimit(&_rs)) {
printk(KERN_WARNING
"no compression workspaces, low memory, retrying");
pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
}
}
goto again;
......
......@@ -45,9 +45,7 @@ static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
struct btrfs_path *btrfs_alloc_path(void)
{
struct btrfs_path *path;
path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
return path;
return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
}
/*
......@@ -1102,7 +1100,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
int level, ret;
int last_ref = 0;
int unlock_orig = 0;
u64 parent_start;
u64 parent_start = 0;
if (*cow_ret == buf)
unlock_orig = 1;
......@@ -1121,13 +1119,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (parent)
parent_start = parent->start;
else
parent_start = 0;
} else
parent_start = 0;
if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
parent_start = parent->start;
cow = btrfs_alloc_tree_block(trans, root, parent_start,
root->root_key.objectid, &disk_key, level,
......@@ -1170,8 +1163,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
parent_start = buf->start;
else
parent_start = 0;
extent_buffer_get(cow);
tree_mod_log_set_root_pointer(root, cow, 1);
......@@ -1182,11 +1173,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
free_extent_buffer(buf);
add_root_to_dirty_list(root);
} else {
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
parent_start = parent->start;
else
parent_start = 0;
WARN_ON(trans->transid != btrfs_header_generation(parent));
tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
MOD_LOG_KEY_REPLACE, GFP_NOFS);
......@@ -1729,20 +1715,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
return err;
}
/*
* The leaf data grows from end-to-front in the node.
* this returns the address of the start of the last item,
* which is the stop of the leaf data stack
*/
static inline unsigned int leaf_data_end(struct btrfs_root *root,
struct extent_buffer *leaf)
{
u32 nr = btrfs_header_nritems(leaf);
if (nr == 0)
return BTRFS_LEAF_DATA_SIZE(root);
return btrfs_item_offset_nr(leaf, nr - 1);
}
/*
* search for key in the extent_buffer. The items start at offset p,
......@@ -2268,7 +2240,6 @@ static void reada_for_search(struct btrfs_root *root,
u64 search;
u64 target;
u64 nread = 0;
u64 gen;
struct extent_buffer *eb;
u32 nr;
u32 blocksize;
......@@ -2313,7 +2284,6 @@ static void reada_for_search(struct btrfs_root *root,
search = btrfs_node_blockptr(node, nr);
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
gen = btrfs_node_ptr_generation(node, nr);
readahead_tree_block(root, search);
nread += blocksize;
}
......@@ -4341,7 +4311,11 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
if (path->slots[1] == 0)
fixup_low_keys(fs_info, path, &disk_key, 1);
}
btrfs_mark_buffer_dirty(right);
/*
* We create a new leaf 'right' for the required ins_len and
* we'll do btrfs_mark_buffer_dirty() on this leaf after copying
* the content of ins_len to 'right'.
*/
return ret;
}
......@@ -4772,8 +4746,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (btrfs_leaf_free_space(root, leaf) < total_size) {
btrfs_print_leaf(root, leaf);
btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
total_size, btrfs_leaf_free_space(root, leaf));
btrfs_crit(root->fs_info,
"not enough freespace need %u have %d",
total_size, btrfs_leaf_free_space(root, leaf));
BUG();
}
......@@ -4782,8 +4757,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (old_data < data_end) {
btrfs_print_leaf(root, leaf);
btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
slot, old_data, data_end);
btrfs_crit(root->fs_info,
"slot %d old_data %d data_end %d",
slot, old_data, data_end);
BUG_ON(1);
}
/*
......@@ -4793,7 +4769,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
for (i = slot; i < nritems; i++) {
u32 ioff;
item = btrfs_item_nr( i);
item = btrfs_item_nr(i);
ioff = btrfs_token_item_offset(leaf, item, &token);
btrfs_set_token_item_offset(leaf, item,
ioff - total_data, &token);
......
......@@ -37,6 +37,7 @@
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/sizes.h>
#include <linux/dynamic_debug.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
......@@ -676,9 +677,25 @@ struct btrfs_device;
struct btrfs_fs_devices;
struct btrfs_balance_control;
struct btrfs_delayed_root;
#define BTRFS_FS_BARRIER 1
#define BTRFS_FS_CLOSING_START 2
#define BTRFS_FS_CLOSING_DONE 3
#define BTRFS_FS_LOG_RECOVERING 4
#define BTRFS_FS_OPEN 5
#define BTRFS_FS_QUOTA_ENABLED 6
#define BTRFS_FS_QUOTA_ENABLING 7
#define BTRFS_FS_QUOTA_DISABLING 8
#define BTRFS_FS_UPDATE_UUID_TREE_GEN 9
#define BTRFS_FS_CREATING_FREE_SPACE_TREE 10
#define BTRFS_FS_BTREE_ERR 11
#define BTRFS_FS_LOG1_ERR 12
#define BTRFS_FS_LOG2_ERR 13
struct btrfs_fs_info {
u8 fsid[BTRFS_FSID_SIZE];
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
unsigned long flags;
struct btrfs_root *extent_root;
struct btrfs_root *tree_root;
struct btrfs_root *chunk_root;
......@@ -907,10 +924,6 @@ struct btrfs_fs_info {
int thread_pool_size;
struct kobject *space_info_kobj;
int do_barriers;
int closing;
int log_root_recovering;
int open;
u64 total_pinned;
......@@ -987,17 +1000,6 @@ struct btrfs_fs_info {
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
u32 check_integrity_print_mask;
#endif
/*
* quota information
*/
unsigned int quota_enabled:1;
/*
* quota_enabled only changes state after a commit. This holds the
* next state.
*/
unsigned int pending_quota_state:1;
/* is qgroup tracking in a consistent state? */
u64 qgroup_flags;
......@@ -1061,7 +1063,6 @@ struct btrfs_fs_info {
wait_queue_head_t replace_wait;
struct semaphore uuid_tree_rescan_sem;
unsigned int update_uuid_tree_gen:1;
/* Used to reclaim the metadata space in the background. */
struct work_struct async_reclaim_work;
......@@ -1080,7 +1081,6 @@ struct btrfs_fs_info {
*/
struct list_head pinned_chunks;
int creating_free_space_tree;
/* Used to record internally whether fs has been frozen */
int fs_frozen;
};
......@@ -1435,13 +1435,13 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
#define cpu_to_le8(v) (v)
#define __le8 u8
#define read_eb_member(eb, ptr, type, member, result) ( \
#define read_eb_member(eb, ptr, type, member, result) (\
read_extent_buffer(eb, (char *)(result), \
((unsigned long)(ptr)) + \
offsetof(type, member), \
sizeof(((type *)0)->member)))
#define write_eb_member(eb, ptr, type, member, result) ( \
#define write_eb_member(eb, ptr, type, member, result) (\
write_extent_buffer(eb, (char *)(result), \
((unsigned long)(ptr)) + \
offsetof(type, member), \
......@@ -2293,6 +2293,21 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
return offsetof(struct btrfs_leaf, items);
}
/*
* The leaf data grows from end-to-front in the node.
* this returns the address of the start of the last item,
* which is the stop of the leaf data stack
*/
static inline unsigned int leaf_data_end(struct btrfs_root *root,
struct extent_buffer *leaf)
{
u32 nr = btrfs_header_nritems(leaf);
if (nr == 0)
return BTRFS_LEAF_DATA_SIZE(root);
return btrfs_item_offset_nr(leaf, nr - 1);
}
/* struct btrfs_file_extent_item */
BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr,
......@@ -2867,10 +2882,14 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
{
/*
* Get synced with close_ctree()
* Do it this way so we only ever do one test_bit in the normal case.
*/
smp_mb();
return fs_info->closing;
if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
return 2;
return 1;
}
return 0;
}
/*
......@@ -3118,7 +3137,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
int nr);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state);
struct extent_state **cached_state, int dedupe);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
struct btrfs_root *parent_root,
......@@ -3236,14 +3255,17 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
unsigned long new_flags);
int btrfs_sync_fs(struct super_block *sb, int wait);
static inline __printf(2, 3)
void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
}
#ifdef CONFIG_PRINTK
__printf(2, 3)
void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...);
#else
static inline __printf(2, 3)
void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
}
#define btrfs_printk(fs_info, fmt, args...) \
btrfs_no_printk(fs_info, fmt, ##args)
#endif
#define btrfs_emerg(fs_info, fmt, args...) \
......@@ -3314,7 +3336,35 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args)
#define btrfs_info_rl(fs_info, fmt, args...) \
btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args)
#ifdef DEBUG
#if defined(CONFIG_DYNAMIC_DEBUG)
#define btrfs_debug(fs_info, fmt, args...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
btrfs_printk(fs_info, KERN_DEBUG fmt, ##args); \
} while (0)
#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args); \
} while (0)
#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, \
##args);\
} while (0)
#define btrfs_debug_rl(fs_info, fmt, args...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, \
##args); \
} while (0)
#elif defined(DEBUG)
#define btrfs_debug(fs_info, fmt, args...) \
btrfs_printk(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
......@@ -3325,13 +3375,13 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args)
#else
#define btrfs_debug(fs_info, fmt, args...) \
no_printk(KERN_DEBUG fmt, ##args)
btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
no_printk(KERN_DEBUG fmt, ##args)
btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
no_printk(KERN_DEBUG fmt, ##args)
btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_rl(fs_info, fmt, args...) \
no_printk(KERN_DEBUG fmt, ##args)
btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#endif
#define btrfs_printk_in_rcu(fs_info, fmt, args...) \
......@@ -3362,7 +3412,7 @@ do { \
__cold
static inline void assfail(char *expr, char *file, int line)
{
pr_err("BTRFS: assertion failed: %s, file: %s, line: %d",
pr_err("assertion failed: %s, file: %s, line: %d\n",
expr, file, line);
BUG();
}
......
......@@ -385,11 +385,8 @@ static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
struct btrfs_delayed_node *delayed_node,
struct btrfs_key *key)
{
struct btrfs_delayed_item *item;
item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
NULL, NULL);
return item;
}
static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
......@@ -1481,11 +1478,10 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&delayed_node->mutex);
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
if (unlikely(ret)) {
btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
"into the insertion tree of the delayed node"
"(root id: %llu, inode id: %llu, errno: %d)",
name_len, name, delayed_node->root->objectid,
delayed_node->inode_id, ret);
btrfs_err(root->fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
name_len, name, delayed_node->root->objectid,
delayed_node->inode_id, ret);
BUG();
}
mutex_unlock(&delayed_node->mutex);
......@@ -1553,11 +1549,9 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
if (unlikely(ret)) {
btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
"into the deletion tree of the delayed node"
"(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id,
ret);
btrfs_err(root->fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id, ret);
BUG();
}
mutex_unlock(&node->mutex);
......@@ -1874,7 +1868,8 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
* leads to enospc problems. This means we also can't do
* delayed inode refs
*/
if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
if (test_bit(BTRFS_FS_LOG_RECOVERING,
&BTRFS_I(inode)->root->fs_info->flags))
return -EAGAIN;
delayed_node = btrfs_get_or_create_delayed_node(inode);
......
......@@ -322,10 +322,11 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
elem = list_first_entry(&fs_info->tree_mod_seq_list,
struct seq_list, list);
if (seq >= elem->seq) {
pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
(u32)(seq >> 32), (u32)seq,
(u32)(elem->seq >> 32), (u32)elem->seq,
delayed_refs);
btrfs_debug(fs_info,
"holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
(u32)(seq >> 32), (u32)seq,
(u32)(elem->seq >> 32), (u32)elem->seq,
delayed_refs);
ret = 1;
}
}
......@@ -770,7 +771,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
if (!head_ref)
goto free_ref;
if (fs_info->quota_enabled && is_fstree(ref_root)) {
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
record = kmalloc(sizeof(*record), GFP_NOFS);
if (!record)
goto free_head_ref;
......@@ -828,7 +830,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
return -ENOMEM;
}
if (fs_info->quota_enabled && is_fstree(ref_root)) {
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
record = kmalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
......
......@@ -218,8 +218,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
}
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
btrfs_warn(fs_info, "error %d while searching for dev_replace item!",
ret);
btrfs_warn(fs_info,
"error %d while searching for dev_replace item!",
ret);
goto out;
}
......@@ -238,8 +239,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
*/
ret = btrfs_del_item(trans, dev_root, path);
if (ret != 0) {
btrfs_warn(fs_info, "delete too small dev_replace item failed %d!",
ret);
btrfs_warn(fs_info,
"delete too small dev_replace item failed %d!",
ret);
goto out;
}
ret = 1;
......@@ -251,8 +253,8 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, dev_root, path,
&key, sizeof(*ptr));
if (ret < 0) {
btrfs_warn(fs_info, "insert dev_replace item failed %d!",
ret);
btrfs_warn(fs_info,
"insert dev_replace item failed %d!", ret);
goto out;
}
}
......@@ -383,7 +385,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device);
if (ret)
btrfs_err(fs_info, "kobj add dev failed %d\n", ret);
btrfs_err(fs_info, "kobj add dev failed %d", ret);
btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
......@@ -772,9 +774,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
break;
}
if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
btrfs_info(fs_info, "cannot continue dev_replace, tgtdev is missing");
btrfs_info(fs_info,
"you may cancel the operation after 'mount -o degraded'");
"cannot continue dev_replace, tgtdev is missing");
btrfs_info(fs_info,
"you may cancel the operation after 'mount -o degraded'");
btrfs_dev_replace_unlock(dev_replace, 1);
return 0;
}
......
......@@ -472,9 +472,10 @@ int verify_dir_item(struct btrfs_root *root,
/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
if ((btrfs_dir_data_len(leaf, dir_item) +
btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) {
btrfs_crit(root->fs_info, "invalid dir item name + data len: %u + %u",
(unsigned)btrfs_dir_name_len(leaf, dir_item),
(unsigned)btrfs_dir_data_len(leaf, dir_item));
btrfs_crit(root->fs_info,
"invalid dir item name + data len: %u + %u",
(unsigned)btrfs_dir_name_len(leaf, dir_item),
(unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1;
}
......
......@@ -326,8 +326,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
read_extent_buffer(buf, &val, 0, csum_size);
btrfs_warn_rl(fs_info,
"%s checksum verify failed on %llu wanted %X found %X "
"level %d",
"%s checksum verify failed on %llu wanted %X found %X level %d",
fs_info->sb->s_id, buf->start,
val, found, btrfs_header_level(buf));
if (result != (char *)&inline_result)
......@@ -402,7 +401,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
* Return 0 if the superblock checksum type matches the checksum value of that
* algorithm. Pass the raw disk superblock data.
*/
static int btrfs_check_super_csum(char *raw_disk_sb)
static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
char *raw_disk_sb)
{
struct btrfs_super_block *disk_sb =
(struct btrfs_super_block *)raw_disk_sb;
......@@ -428,7 +428,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
}
if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
btrfs_err(fs_info, "unsupported checksum algorithm %u",
csum_type);
ret = 1;
}
......@@ -442,7 +442,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
*/
static int btree_read_extent_buffer_pages(struct btrfs_root *root,
struct extent_buffer *eb,
u64 start, u64 parent_transid)
u64 parent_transid)
{
struct extent_io_tree *io_tree;
int failed = 0;
......@@ -454,8 +454,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
while (1) {
ret = read_extent_buffer_pages(io_tree, eb, start,
WAIT_COMPLETE,
ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
btree_get_extent, mirror_num);
if (!ret) {
if (!verify_parent_transid(io_tree, eb,
......@@ -547,9 +546,10 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
}
#define CORRUPT(reason, eb, root, slot) \
btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \
"root=%llu, slot=%d", reason, \
btrfs_header_bytenr(eb), root->objectid, slot)
btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu," \
" root=%llu, slot=%d", \
btrfs_header_level(eb) == 0 ? "leaf" : "node",\
reason, btrfs_header_bytenr(eb), root->objectid, slot)
static noinline int check_leaf(struct btrfs_root *root,
struct extent_buffer *leaf)
......@@ -636,6 +636,10 @@ static noinline int check_leaf(struct btrfs_root *root,
static int check_node(struct btrfs_root *root, struct extent_buffer *node)
{
unsigned long nr = btrfs_header_nritems(node);
struct btrfs_key key, next_key;
int slot;
u64 bytenr;
int ret = 0;
if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
btrfs_crit(root->fs_info,
......@@ -643,7 +647,26 @@ static int check_node(struct btrfs_root *root, struct extent_buffer *node)
node->start, root->objectid, nr);
return -EIO;
}
return 0;
for (slot = 0; slot < nr - 1; slot++) {
bytenr = btrfs_node_blockptr(node, slot);
btrfs_node_key_to_cpu(node, &key, slot);
btrfs_node_key_to_cpu(node, &next_key, slot + 1);
if (!bytenr) {
CORRUPT("invalid item slot", node, root, slot);
ret = -EIO;
goto out;
}
if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
CORRUPT("bad key order", node, root, slot);
ret = -EIO;
goto out;
}
}
out:
return ret;
}
static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
......@@ -1132,7 +1155,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
buf, 0, WAIT_NONE, btree_get_extent, 0);
buf, WAIT_NONE, btree_get_extent, 0);
free_extent_buffer(buf);
}
......@@ -1150,7 +1173,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
btree_get_extent, mirror_num);
if (ret) {
free_extent_buffer(buf);
......@@ -1206,7 +1229,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
if (IS_ERR(buf))
return buf;
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
if (ret) {
free_extent_buffer(buf);
return ERR_PTR(ret);
......@@ -1839,7 +1862,7 @@ static int cleaner_kthread(void *arg)
* Do not do anything if we might cause open_ctree() to block
* before we have finished mounting the filesystem.
*/
if (!root->fs_info->open)
if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags))
goto sleep;
if (!mutex_trylock(&root->fs_info->cleaner_mutex))
......@@ -2332,8 +2355,6 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
fs_info->qgroup_op_tree = RB_ROOT;
INIT_LIST_HEAD(&fs_info->dirty_qgroups);
fs_info->qgroup_seq = 1;
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
fs_info->qgroup_ulist = NULL;
fs_info->qgroup_rescan_running = false;
mutex_init(&fs_info->qgroup_rescan_lock);
......@@ -2518,8 +2539,7 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
root = btrfs_read_tree_root(tree_root, &location);
if (!IS_ERR(root)) {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
fs_info->quota_enabled = 1;
fs_info->pending_quota_state = 1;
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
fs_info->quota_root = root;
}
......@@ -2710,8 +2730,7 @@ int open_ctree(struct super_block *sb,
extent_io_tree_init(&fs_info->freed_extents[1],
fs_info->btree_inode->i_mapping);
fs_info->pinned_extents = &fs_info->freed_extents[0];
fs_info->do_barriers = 1;
set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
mutex_init(&fs_info->ordered_operations_mutex);
mutex_init(&fs_info->tree_log_mutex);
......@@ -2762,7 +2781,7 @@ int open_ctree(struct super_block *sb,
* We want to check superblock checksum, the type is stored inside.
* Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
*/
if (btrfs_check_super_csum(bh->b_data)) {
if (btrfs_check_super_csum(fs_info, bh->b_data)) {
btrfs_err(fs_info, "superblock checksum mismatch");
err = -EINVAL;
brelse(bh);
......@@ -3199,10 +3218,9 @@ int open_ctree(struct super_block *sb,
return ret;
}
} else {
fs_info->update_uuid_tree_gen = 1;
set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
}
fs_info->open = 1;
set_bit(BTRFS_FS_OPEN, &fs_info->flags);
/*
* backuproot only affect mount behavior, and if open_ctree succeeded,
......@@ -3607,7 +3625,7 @@ int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
}
if (min_tolerated == INT_MAX) {
pr_warn("BTRFS: unknown raid flag: %llu\n", flags);
pr_warn("BTRFS: unknown raid flag: %llu", flags);
min_tolerated = 0;
}
......@@ -3893,8 +3911,7 @@ void close_ctree(struct btrfs_root *root)
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
fs_info->closing = 1;
smp_mb();
set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
/* wait for the qgroup rescan worker to stop */
btrfs_qgroup_wait_for_completion(fs_info, false);
......@@ -3939,8 +3956,7 @@ void close_ctree(struct btrfs_root *root)
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
fs_info->closing = 2;
smp_mb();
set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
btrfs_free_qgroup_config(fs_info);
......@@ -3965,7 +3981,7 @@ void close_ctree(struct btrfs_root *root)
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
btrfs_stop_all_workers(fs_info);
fs_info->open = 0;
clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
free_root_pointers(fs_info, 1);
iput(fs_info->btree_inode);
......@@ -4036,8 +4052,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
root = BTRFS_I(buf->pages[0]->mapping->host)->root;
btrfs_assert_tree_locked(buf);
if (transid != root->fs_info->generation)
WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
"found %llu running %llu\n",
WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
buf->start, transid, root->fs_info->generation);
was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty)
......@@ -4088,7 +4103,7 @@ void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
return btree_read_extent_buffer_pages(root, buf, parent_transid);
}
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
......@@ -4100,24 +4115,24 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int ret = 0;
if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
printk(KERN_ERR "BTRFS: no valid FS found\n");
btrfs_err(fs_info, "no valid FS found");
ret = -EINVAL;
}
if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
printk(KERN_WARNING "BTRFS: unrecognized super flag: %llu\n",
btrfs_warn(fs_info, "unrecognized super flag: %llu",
btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
btrfs_err(fs_info, "tree_root level too big: %d >= %d",
btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
ret = -EINVAL;
}
if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
ret = -EINVAL;
}
if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
btrfs_err(fs_info, "log_root level too big: %d >= %d",
btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
ret = -EINVAL;
}
......@@ -4128,47 +4143,48 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
*/
if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
printk(KERN_ERR "BTRFS: invalid sectorsize %llu\n", sectorsize);
btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
ret = -EINVAL;
}
/* Only PAGE SIZE is supported yet */
if (sectorsize != PAGE_SIZE) {
printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
sectorsize, PAGE_SIZE);
btrfs_err(fs_info,
"sectorsize %llu not supported yet, only support %lu",
sectorsize, PAGE_SIZE);
ret = -EINVAL;
}
if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
printk(KERN_ERR "BTRFS: invalid nodesize %llu\n", nodesize);
btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
ret = -EINVAL;
}
if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
printk(KERN_ERR "BTRFS: invalid leafsize %u, should be %llu\n",
le32_to_cpu(sb->__unused_leafsize),
nodesize);
btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
le32_to_cpu(sb->__unused_leafsize), nodesize);
ret = -EINVAL;
}
/* Root alignment check */
if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
btrfs_super_root(sb));
btrfs_warn(fs_info, "tree_root block unaligned: %llu",
btrfs_super_root(sb));
ret = -EINVAL;
}
if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
btrfs_super_chunk_root(sb));
btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
btrfs_super_chunk_root(sb));
ret = -EINVAL;
}
if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
btrfs_super_log_root(sb));
btrfs_warn(fs_info, "log_root block unaligned: %llu",
btrfs_super_log_root(sb));
ret = -EINVAL;
}
if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
fs_info->fsid, sb->dev_item.fsid);
btrfs_err(fs_info,
"dev_item UUID does not match fsid: %pU != %pU",
fs_info->fsid, sb->dev_item.fsid);
ret = -EINVAL;
}
......@@ -4178,25 +4194,25 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
*/
if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
btrfs_err(fs_info, "bytes_used is too small %llu",
btrfs_super_bytes_used(sb));
btrfs_super_bytes_used(sb));
ret = -EINVAL;
}
if (!is_power_of_2(btrfs_super_stripesize(sb))) {
btrfs_err(fs_info, "invalid stripesize %u",
btrfs_super_stripesize(sb));
btrfs_super_stripesize(sb));
ret = -EINVAL;
}
if (btrfs_super_num_devices(sb) > (1UL << 31))
printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
btrfs_super_num_devices(sb));
btrfs_warn(fs_info, "suspicious number of devices: %llu",
btrfs_super_num_devices(sb));
if (btrfs_super_num_devices(sb) == 0) {
printk(KERN_ERR "BTRFS: number of devices is 0\n");
btrfs_err(fs_info, "number of devices is 0");
ret = -EINVAL;
}
if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
btrfs_err(fs_info, "super offset mismatch %llu != %u",
btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
ret = -EINVAL;
}
......@@ -4205,17 +4221,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
* and one chunk
*/
if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
btrfs_super_sys_array_size(sb),
BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
btrfs_err(fs_info, "system chunk array too big %u > %u",
btrfs_super_sys_array_size(sb),
BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
ret = -EINVAL;
}
if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
+ sizeof(struct btrfs_chunk)) {
printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n",
btrfs_super_sys_array_size(sb),
sizeof(struct btrfs_disk_key)
+ sizeof(struct btrfs_chunk));
btrfs_err(fs_info, "system chunk array too small %u < %zu",
btrfs_super_sys_array_size(sb),
sizeof(struct btrfs_disk_key)
+ sizeof(struct btrfs_chunk));
ret = -EINVAL;
}
......@@ -4224,14 +4240,16 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
* but it's still possible that it's the one that's wrong.
*/
if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
printk(KERN_WARNING
"BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
btrfs_warn(fs_info,
"suspicious: generation < chunk_root_generation: %llu < %llu",
btrfs_super_generation(sb),
btrfs_super_chunk_root_generation(sb));
if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
&& btrfs_super_cache_generation(sb) != (u64)-1)
printk(KERN_WARNING
"BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
btrfs_warn(fs_info,
"suspicious: generation < cache_generation: %llu < %llu",
btrfs_super_generation(sb),
btrfs_super_cache_generation(sb));
return ret;
}
......@@ -4475,9 +4493,80 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
return 0;
}
static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
{
struct inode *inode;
inode = cache->io_ctl.inode;
if (inode) {
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
cache->io_ctl.inode = NULL;
iput(inode);
}
btrfs_put_block_group(cache);
}
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
struct btrfs_root *root)
{
struct btrfs_block_group_cache *cache;
spin_lock(&cur_trans->dirty_bgs_lock);
while (!list_empty(&cur_trans->dirty_bgs)) {
cache = list_first_entry(&cur_trans->dirty_bgs,
struct btrfs_block_group_cache,
dirty_list);
if (!cache) {
btrfs_err(root->fs_info,
"orphan block group dirty_bgs list");
spin_unlock(&cur_trans->dirty_bgs_lock);
return;
}
if (!list_empty(&cache->io_list)) {
spin_unlock(&cur_trans->dirty_bgs_lock);
list_del_init(&cache->io_list);
btrfs_cleanup_bg_io(cache);
spin_lock(&cur_trans->dirty_bgs_lock);
}
list_del_init(&cache->dirty_list);
spin_lock(&cache->lock);
cache->disk_cache_state = BTRFS_DC_ERROR;
spin_unlock(&cache->lock);
spin_unlock(&cur_trans->dirty_bgs_lock);
btrfs_put_block_group(cache);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
while (!list_empty(&cur_trans->io_bgs)) {
cache = list_first_entry(&cur_trans->io_bgs,
struct btrfs_block_group_cache,
io_list);
if (!cache) {
btrfs_err(root->fs_info,
"orphan block group on io_bgs list");
return;
}
list_del_init(&cache->io_list);
spin_lock(&cache->lock);
cache->disk_cache_state = BTRFS_DC_ERROR;
spin_unlock(&cache->lock);
btrfs_cleanup_bg_io(cache);
}
}
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
struct btrfs_root *root)
{
btrfs_cleanup_dirty_bgs(cur_trans, root);
ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs));
btrfs_destroy_delayed_refs(cur_trans, root);
cur_trans->state = TRANS_STATE_COMMIT_START;
......
......@@ -136,6 +136,8 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
struct btrfs_root *root);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
struct btrfs_root *root);
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
......
......@@ -87,7 +87,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
int force);
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
static void dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 ram_bytes, u64 num_bytes, int delalloc);
......@@ -266,9 +267,8 @@ static int exclude_super_stripes(struct btrfs_root *root,
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
cache->key.objectid, bytenr,
0, &logical, &nr, &stripe_len);
ret = btrfs_rmap_block(root->fs_info, cache->key.objectid,
bytenr, 0, &logical, &nr, &stripe_len);
if (ret)
return ret;
......@@ -730,11 +730,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
static struct btrfs_block_group_cache *
btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
{
struct btrfs_block_group_cache *cache;
cache = block_group_cache_tree_search(info, bytenr, 0);
return cache;
return block_group_cache_tree_search(info, bytenr, 0);
}
/*
......@@ -744,11 +740,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
struct btrfs_fs_info *info,
u64 bytenr)
{
struct btrfs_block_group_cache *cache;
cache = block_group_cache_tree_search(info, bytenr, 1);
return cache;
return block_group_cache_tree_search(info, bytenr, 1);
}
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
......@@ -2360,7 +2352,13 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
}
BUG_ON(node->ref_mod != 1);
if (node->ref_mod != 1) {
btrfs_err(root->fs_info,
"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
return -EIO;
}
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, root,
......@@ -2590,7 +2588,9 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (must_insert_reserved)
locked_ref->must_insert_reserved = 1;
locked_ref->processing = 0;
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
btrfs_debug(fs_info,
"run_delayed_extent_op returned %d",
ret);
btrfs_delayed_ref_unlock(locked_ref);
return ret;
}
......@@ -2650,7 +2650,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
locked_ref->processing = 0;
btrfs_delayed_ref_unlock(locked_ref);
btrfs_put_delayed_ref(ref);
btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
ret);
return ret;
}
......@@ -2940,7 +2941,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (trans->aborted)
return 0;
if (root->fs_info->creating_free_space_tree)
if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags))
return 0;
if (root == root->fs_info->extent_root)
......@@ -2971,7 +2972,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
spin_unlock(&delayed_refs->lock);
goto out;
}
count = (unsigned long)-1;
while (node) {
head = rb_entry(node, struct btrfs_delayed_ref_head,
......@@ -3694,6 +3694,8 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
goto again;
}
spin_unlock(&cur_trans->dirty_bgs_lock);
} else if (ret < 0) {
btrfs_cleanup_dirty_bgs(cur_trans, root);
}
btrfs_free_path(path);
......@@ -4429,7 +4431,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
left, thresh, type);
dump_space_info(info, 0, 0);
dump_space_info(root->fs_info, info, 0, 0);
}
if (left < thresh) {
......@@ -5186,7 +5188,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
* which means we won't have fs_info->fs_root set, so don't do
* the async reclaim as we will panic.
*/
if (!root->fs_info->log_root_recovering &&
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags) &&
need_do_async_reclaim(space_info, root, used) &&
!work_busy(&root->fs_info->async_reclaim_work)) {
trace_btrfs_trigger_flush(root->fs_info,
......@@ -5792,7 +5794,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
int ret;
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
if (root->fs_info->quota_enabled) {
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
/* One for parent inode, two for dir entries */
num_bytes = 3 * root->nodesize;
ret = btrfs_qgroup_reserve_meta(root, num_bytes);
......@@ -5970,7 +5972,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
if (root->fs_info->quota_enabled) {
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
ret = btrfs_qgroup_reserve_meta(root,
nr_extents * root->nodesize);
if (ret)
......@@ -6110,8 +6112,6 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
* @start: start range we are writing to
* @len: how long the range we are writing to
*
* TODO: This function will finally replace old btrfs_delalloc_reserve_space()
*
* This will do the following things
*
* o reserve space in data space info for num bytes
......@@ -6930,8 +6930,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
if (ret) {
btrfs_err(info, "umm, got %d back from search, was looking for %llu",
ret, bytenr);
btrfs_err(info,
"umm, got %d back from search, was looking for %llu",
ret, bytenr);
if (ret > 0)
btrfs_print_leaf(extent_root,
path->nodes[0]);
......@@ -6977,7 +6978,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, extent_root, &key, path,
-1, 1);
if (ret) {
btrfs_err(info, "umm, got %d back from search, was looking for %llu",
btrfs_err(info,
"umm, got %d back from search, was looking for %llu",
ret, bytenr);
btrfs_print_leaf(extent_root, path->nodes[0]);
}
......@@ -7004,8 +7006,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
refs = btrfs_extent_refs(leaf, ei);
if (refs < refs_to_drop) {
btrfs_err(info, "trying to drop %d refs but we only have %Lu "
"for bytenr %Lu", refs_to_drop, refs, bytenr);
btrfs_err(info,
"trying to drop %d refs but we only have %Lu for bytenr %Lu",
refs_to_drop, refs, bytenr);
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
......@@ -7901,23 +7904,24 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
return ret;
}
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
static void dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups)
{
struct btrfs_block_group_cache *cache;
int index = 0;
spin_lock(&info->lock);
printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
info->flags,
info->total_bytes - info->bytes_used - info->bytes_pinned -
info->bytes_reserved - info->bytes_readonly -
info->bytes_may_use, (info->full) ? "" : "not ");
printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
"reserved=%llu, may_use=%llu, readonly=%llu\n",
info->total_bytes, info->bytes_used, info->bytes_pinned,
info->bytes_reserved, info->bytes_may_use,
info->bytes_readonly);
btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
info->flags,
info->total_bytes - info->bytes_used - info->bytes_pinned -
info->bytes_reserved - info->bytes_readonly -
info->bytes_may_use, (info->full) ? "" : "not ");
btrfs_info(fs_info,
"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
info->total_bytes, info->bytes_used, info->bytes_pinned,
info->bytes_reserved, info->bytes_may_use,
info->bytes_readonly);
spin_unlock(&info->lock);
if (!dump_block_groups)
......@@ -7927,12 +7931,11 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
again:
list_for_each_entry(cache, &info->block_groups[index], list) {
spin_lock(&cache->lock);
printk(KERN_INFO "BTRFS: "
"block group %llu has %llu bytes, "
"%llu used %llu pinned %llu reserved %s\n",
cache->key.objectid, cache->key.offset,
btrfs_block_group_used(&cache->item), cache->pinned,
cache->reserved, cache->ro ? "[readonly]" : "");
btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
cache->key.objectid, cache->key.offset,
btrfs_block_group_used(&cache->item), cache->pinned,
cache->reserved, cache->ro ? "[readonly]" : "");
btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock);
}
......@@ -7946,6 +7949,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data, int delalloc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
bool final_tried = num_bytes == min_alloc_size;
u64 flags;
int ret;
......@@ -7956,8 +7960,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
hint_byte, ins, flags, delalloc);
if (!ret && !is_data) {
btrfs_dec_block_group_reservations(root->fs_info,
ins->objectid);
btrfs_dec_block_group_reservations(fs_info, ins->objectid);
} else if (ret == -ENOSPC) {
if (!final_tried && ins->offset) {
num_bytes = min(num_bytes >> 1, ins->offset);
......@@ -7967,14 +7970,15 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
if (num_bytes == min_alloc_size)
final_tried = true;
goto again;
} else if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
} else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(root->fs_info, flags);
btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
flags, num_bytes);
sinfo = __find_space_info(fs_info, flags);
btrfs_err(root->fs_info,
"allocation failed flags %llu, wanted %llu",
flags, num_bytes);
if (sinfo)
dump_space_info(sinfo, num_bytes, 1);
dump_space_info(fs_info, sinfo, num_bytes, 1);
}
}
......@@ -8462,7 +8466,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
u64 refs;
u64 flags;
u32 nritems;
u32 blocksize;
struct btrfs_key key;
struct extent_buffer *eb;
int ret;
......@@ -8480,7 +8483,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
eb = path->nodes[wc->level];
nritems = btrfs_header_nritems(eb);
blocksize = root->nodesize;
for (slot = path->slots[wc->level]; slot < nritems; slot++) {
if (nread >= wc->reada_count)
......@@ -8544,7 +8546,7 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
u64 bytenr, num_bytes;
/* We can be called directly from walk_up_proc() */
if (!root->fs_info->quota_enabled)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
return 0;
for (i = 0; i < nr; i++) {
......@@ -8653,7 +8655,7 @@ static int account_shared_subtree(struct btrfs_trans_handle *trans,
BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
BUG_ON(root_eb == NULL);
if (!root->fs_info->quota_enabled)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
return 0;
if (!extent_buffer_uptodate(root_eb)) {
......@@ -8884,14 +8886,13 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
&wc->refs[level - 1],
&wc->flags[level - 1]);
if (ret < 0) {
btrfs_tree_unlock(next);
return ret;
}
if (ret < 0)
goto out_unlock;
if (unlikely(wc->refs[level - 1] == 0)) {
btrfs_err(root->fs_info, "Missing references.");
BUG();
ret = -EIO;
goto out_unlock;
}
*lookup_info = 0;
......@@ -8943,7 +8944,12 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
}
level--;
BUG_ON(level != btrfs_header_level(next));
ASSERT(level == btrfs_header_level(next));
if (level != btrfs_header_level(next)) {
btrfs_err(root->fs_info, "mismatched level");
ret = -EIO;
goto out_unlock;
}
path->nodes[level] = next;
path->slots[level] = 0;
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
......@@ -8958,8 +8964,15 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
parent = path->nodes[level]->start;
} else {
BUG_ON(root->root_key.objectid !=
ASSERT(root->root_key.objectid ==
btrfs_header_owner(path->nodes[level]));
if (root->root_key.objectid !=
btrfs_header_owner(path->nodes[level])) {
btrfs_err(root->fs_info,
"mismatched block owner");
ret = -EIO;
goto out_unlock;
}
parent = 0;
}
......@@ -8968,20 +8981,24 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
generation, level - 1);
if (ret) {
btrfs_err_rl(root->fs_info,
"Error "
"%d accounting shared subtree. Quota "
"is out of sync, rescan required.",
ret);
"Error %d accounting shared subtree. Quota is out of sync, rescan required.",
ret);
}
}
ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
root->root_key.objectid, level - 1, 0);
BUG_ON(ret); /* -ENOMEM */
if (ret)
goto out_unlock;
}
*lookup_info = 1;
ret = 1;
out_unlock:
btrfs_tree_unlock(next);
free_extent_buffer(next);
*lookup_info = 1;
return 1;
return ret;
}
/*
......@@ -9061,10 +9078,8 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
ret = account_leaf_items(trans, root, eb);
if (ret) {
btrfs_err_rl(root->fs_info,
"error "
"%d accounting leaf items. Quota "
"is out of sync, rescan required.",
ret);
"error %d accounting leaf items. Quota is out of sync, rescan required.",
ret);
}
}
/* make block locked assertion in clean_tree_block happy */
......@@ -9180,9 +9195,10 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int update_ref,
int for_reloc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_trans_handle *trans;
struct btrfs_root *tree_root = root->fs_info->tree_root;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root_item *root_item = &root->root_item;
struct walk_control *wc;
struct btrfs_key key;
......@@ -9191,7 +9207,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level;
bool root_dropped = false;
btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
path = btrfs_alloc_path();
if (!path) {
......@@ -9320,7 +9336,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
btrfs_end_transaction_throttle(trans, tree_root);
if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
pr_debug("BTRFS: drop snapshot early exit\n");
btrfs_debug(fs_info,
"drop snapshot early exit");
err = -EAGAIN;
goto out_free;
}
......@@ -9386,7 +9403,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
if (!for_reloc && root_dropped == false)
btrfs_add_dead_root(root);
if (err && err != -EAGAIN)
btrfs_handle_fs_error(root->fs_info, err, NULL);
btrfs_handle_fs_error(fs_info, err, NULL);
return err;
}
......@@ -10020,7 +10037,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
if (WARN_ON(space_info->bytes_pinned > 0 ||
space_info->bytes_reserved > 0 ||
space_info->bytes_may_use > 0))
dump_space_info(space_info, 0, 0);
dump_space_info(info, space_info, 0, 0);
list_del(&space_info->list);
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
struct kobject *kobj;
......@@ -10069,7 +10086,8 @@ static void __link_block_group(struct btrfs_space_info *space_info,
return;
out_err:
pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
btrfs_warn(cache->fs_info,
"failed to add kobject for block cache, ignoring");
}
static struct btrfs_block_group_cache *
......@@ -10127,6 +10145,11 @@ int btrfs_read_block_groups(struct btrfs_root *root)
struct extent_buffer *leaf;
int need_clear = 0;
u64 cache_gen;
u64 feature;
int mixed;
feature = btrfs_super_incompat_flags(info->super_copy);
mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
root = info->extent_root;
key.objectid = 0;
......@@ -10180,6 +10203,15 @@ int btrfs_read_block_groups(struct btrfs_root *root)
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(cache->item));
cache->flags = btrfs_block_group_flags(&cache->item);
if (!mixed &&
((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
(cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
btrfs_err(info,
"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
cache->key.objectid);
ret = -EINVAL;
goto error;
}
key.objectid = found_key.objectid + found_key.offset;
btrfs_release_path(path);
......@@ -10789,7 +10821,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
struct btrfs_trans_handle *trans;
int ret = 0;
if (!fs_info->open)
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
spin_lock(&fs_info->unused_bgs_lock);
......
......@@ -20,6 +20,7 @@
#include "locking.h"
#include "rcu-string.h"
#include "backref.h"
#include "transaction.h"
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
......@@ -74,8 +75,7 @@ void btrfs_leak_debug_check(void)
while (!list_empty(&buffers)) {
eb = list_entry(buffers.next, struct extent_buffer, leak_list);
printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
"refs %d\n",
pr_err("BTRFS: buffer leak start %llu len %lu refs %d\n",
eb->start, eb->len, atomic_read(&eb->refs));
list_del(&eb->leak_list);
kmem_cache_free(extent_buffer_cache, eb);
......@@ -460,8 +460,7 @@ static int insert_state(struct extent_io_tree *tree,
if (node) {
struct extent_state *found;
found = rb_entry(node, struct extent_state, rb_node);
printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
"%llu %llu\n",
pr_err("BTRFS: found node %llu %llu on insert of %llu %llu\n",
found->start, found->end, start, end);
return -EEXIST;
}
......@@ -572,9 +571,8 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
{
btrfs_panic(tree_fs_info(tree), err, "Locking error: "
"Extent tree was modified by another "
"thread while locked.");
btrfs_panic(tree_fs_info(tree), err,
"Locking error: Extent tree was modified by another thread while locked.");
}
/*
......@@ -1729,7 +1727,7 @@ STATIC u64 find_lock_delalloc_range(struct inode *inode,
}
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
struct page *locked_page,
u64 delalloc_end, struct page *locked_page,
unsigned clear_bits,
unsigned long page_ops)
{
......@@ -2122,8 +2120,9 @@ int clean_io_failure(struct inode *inode, u64 start, struct page *page,
if (failrec->in_validation) {
/* there was no real error, just free the record */
pr_debug("clean_io_failure: freeing dummy error at %llu\n",
failrec->start);
btrfs_debug(fs_info,
"clean_io_failure: freeing dummy error at %llu",
failrec->start);
goto out;
}
if (fs_info->sb->s_flags & MS_RDONLY)
......@@ -2189,6 +2188,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
struct io_failure_record **failrec_ret)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct io_failure_record *failrec;
struct extent_map *em;
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
......@@ -2236,8 +2236,9 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
em->compress_type);
}
pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
logical, start, failrec->len);
btrfs_debug(fs_info,
"Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
logical, start, failrec->len);
failrec->logical = logical;
free_extent_map(em);
......@@ -2255,9 +2256,10 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
return ret;
}
} else {
pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
failrec->logical, failrec->start, failrec->len,
failrec->in_validation);
btrfs_debug(fs_info,
"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
failrec->logical, failrec->start, failrec->len,
failrec->in_validation);
/*
* when data can be on disk more than twice, add to failrec here
* (e.g. with a list for failed_mirror) to make
......@@ -2273,18 +2275,19 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec, int failed_mirror)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int num_copies;
num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
failrec->logical, failrec->len);
num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
if (num_copies == 1) {
/*
* we only have a single copy of the data, so don't bother with
* all the retry and error correction code that follows. no
* matter what the error is, it is very likely to persist.
*/
pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
num_copies, failrec->this_mirror, failed_mirror);
btrfs_debug(fs_info,
"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
......@@ -2323,8 +2326,9 @@ int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
}
if (failrec->this_mirror > num_copies) {
pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
num_copies, failrec->this_mirror, failed_mirror);
btrfs_debug(fs_info,
"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
......@@ -2415,8 +2419,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
}
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
read_mode, failrec->this_mirror, failrec->in_validation);
btrfs_debug(btrfs_sb(inode->i_sb),
"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
read_mode, failrec->this_mirror, failrec->in_validation);
ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
failrec->bio_flags, 0);
......@@ -2484,8 +2489,7 @@ static void end_bio_extent_writepage(struct bio *bio)
bvec->bv_offset, bvec->bv_len);
else
btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
"incomplete page write in btrfs with offset %u and "
"length %u",
"incomplete page write in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
}
......@@ -2541,10 +2545,12 @@ static void end_bio_extent_readpage(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
"mirror=%u\n", (u64)bio->bi_iter.bi_sector,
bio->bi_error, io_bio->mirror_num);
btrfs_debug(fs_info,
"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
(u64)bio->bi_iter.bi_sector, bio->bi_error,
io_bio->mirror_num);
tree = &BTRFS_I(inode)->io_tree;
/* We always issue full-page reads, but if some block
......@@ -2554,13 +2560,12 @@ static void end_bio_extent_readpage(struct bio *bio)
* if they don't add up to a full page. */
if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
"partial page read in btrfs with offset %u and length %u",
btrfs_err(fs_info,
"partial page read in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
else
btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
"incomplete page read in btrfs with offset %u and "
"length %u",
btrfs_info(fs_info,
"incomplete page read in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
}
......@@ -3624,7 +3629,6 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
static void set_btree_ioerr(struct page *page)
{
struct extent_buffer *eb = (struct extent_buffer *)page->private;
struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
SetPageError(page);
if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
......@@ -3670,13 +3674,13 @@ static void set_btree_ioerr(struct page *page)
*/
switch (eb->log_index) {
case -1:
set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags);
set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
break;
case 0:
set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
break;
case 1:
set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
break;
default:
BUG(); /* unexpected, logic error */
......@@ -3721,8 +3725,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct block_device *bdev = fs_info->fs_devices->latest_bdev;
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
u32 nritems;
unsigned long i, num_pages;
unsigned long bio_flags = 0;
unsigned long start, end;
int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
int ret = 0;
......@@ -3732,6 +3738,23 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
bio_flags = EXTENT_BIO_TREE_LOG;
/* set btree blocks beyond nritems with 0 to avoid stale content. */
nritems = btrfs_header_nritems(eb);
if (btrfs_header_level(eb) > 0) {
end = btrfs_node_key_ptr_offset(nritems);
memset_extent_buffer(eb, 0, end, eb->len - end);
} else {
/*
* leaf:
* header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
*/
start = btrfs_item_nr_offset(nritems);
end = btrfs_leaf_data(eb) +
leaf_data_end(fs_info->tree_root, eb);
memset_extent_buffer(eb, 0, start, end - start);
}
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
......@@ -4487,11 +4510,24 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
flags |= (FIEMAP_EXTENT_DELALLOC |
FIEMAP_EXTENT_UNKNOWN);
} else if (fieinfo->fi_extents_max) {
struct btrfs_trans_handle *trans;
u64 bytenr = em->block_start -
(em->start - em->orig_start);
disko = em->block_start + offset_in_extent;
/*
* We need a trans handle to get delayed refs
*/
trans = btrfs_join_transaction(root);
/*
* It's OK if we can't start a trans we can still check
* from commit_root
*/
if (IS_ERR(trans))
trans = NULL;
/*
* As btrfs supports shared space, this information
* can be exported to userspace tools via
......@@ -4499,9 +4535,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
* then we're just getting a count and we can skip the
* lookup stuff.
*/
ret = btrfs_check_shared(NULL, root->fs_info,
ret = btrfs_check_shared(trans, root->fs_info,
root->objectid,
btrfs_ino(inode), bytenr);
if (trans)
btrfs_end_transaction(trans, root);
if (ret < 0)
goto out_free;
if (ret)
......@@ -5173,11 +5211,10 @@ int extent_buffer_uptodate(struct extent_buffer *eb)
}
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait,
struct extent_buffer *eb, int wait,
get_extent_t *get_extent, int mirror_num)
{
unsigned long i;
unsigned long start_i;
struct page *page;
int err;
int ret = 0;
......@@ -5191,16 +5228,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
if (start) {
WARN_ON(start < eb->start);
start_i = (start >> PAGE_SHIFT) -
(eb->start >> PAGE_SHIFT);
} else {
start_i = 0;
}
num_pages = num_extent_pages(eb->start, eb->len);
for (i = start_i; i < num_pages; i++) {
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (wait == WAIT_NONE) {
if (!trylock_page(page))
......@@ -5209,21 +5238,29 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
lock_page(page);
}
locked_pages++;
}
/*
* We need to firstly lock all pages to make sure that
* the uptodate bit of our pages won't be affected by
* clear_extent_buffer_uptodate().
*/
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (!PageUptodate(page)) {
num_reads++;
all_uptodate = 0;
}
}
if (all_uptodate) {
if (start_i == 0)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
goto unlock_exit;
}
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
atomic_set(&eb->io_pages, num_reads);
for (i = start_i; i < num_pages; i++) {
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (!PageUptodate(page)) {
......@@ -5264,7 +5301,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if (ret || wait != WAIT_COMPLETE)
return ret;
for (i = start_i; i < num_pages; i++) {
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
wait_on_page_locked(page);
if (!PageUptodate(page))
......@@ -5274,12 +5311,10 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
return ret;
unlock_exit:
i = start_i;
while (locked_pages > 0) {
page = eb->pages[i];
i++;
unlock_page(page);
locked_pages--;
page = eb->pages[locked_pages];
unlock_page(page);
}
return ret;
}
......@@ -5382,8 +5417,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
}
if (start + min_len > eb->len) {
WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
"wanted %lu %lu\n",
WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
eb->start, eb->len, start, min_len);
return -EINVAL;
}
......@@ -5713,14 +5747,14 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
if (src_offset + len > dst->len) {
btrfs_err(dst->fs_info,
"memmove bogus src_offset %lu move "
"len %lu dst len %lu", src_offset, len, dst->len);
"memmove bogus src_offset %lu move len %lu dst len %lu",
src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
btrfs_err(dst->fs_info,
"memmove bogus dst_offset %lu move "
"len %lu dst len %lu", dst_offset, len, dst->len);
"memmove bogus dst_offset %lu move len %lu dst len %lu",
dst_offset, len, dst->len);
BUG_ON(1);
}
......@@ -5760,13 +5794,15 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i;
if (src_offset + len > dst->len) {
btrfs_err(dst->fs_info, "memmove bogus src_offset %lu move "
"len %lu len %lu", src_offset, len, dst->len);
btrfs_err(dst->fs_info,
"memmove bogus src_offset %lu move len %lu len %lu",
src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
btrfs_err(dst->fs_info, "memmove bogus dst_offset %lu move "
"len %lu len %lu", dst_offset, len, dst->len);
btrfs_err(dst->fs_info,
"memmove bogus dst_offset %lu move len %lu len %lu",
dst_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset < src_offset) {
......
......@@ -359,7 +359,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
#define WAIT_COMPLETE 1
#define WAIT_PAGE_LOCK 2
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait,
struct extent_buffer *eb, int wait,
get_extent_t *get_extent, int mirror_num);
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
......@@ -413,7 +413,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
struct page *locked_page,
u64 delalloc_end, struct page *locked_page,
unsigned bits_to_clear,
unsigned long page_ops);
struct bio *
......
......@@ -503,7 +503,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
cached);
cached, 0);
if (err)
return err;
......@@ -1110,13 +1110,25 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
if (key.objectid != ino ||
key.type != BTRFS_EXTENT_DATA_KEY) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
BUG_ON(btrfs_file_extent_type(leaf, fi) !=
BTRFS_FILE_EXTENT_PREALLOC);
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
BUG_ON(key.offset > start || extent_end < end);
if (key.offset > start || extent_end < end) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
......@@ -1213,12 +1225,19 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid,
ino, orig_offset);
BUG_ON(ret); /* -ENOMEM */
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
if (split == start) {
key.offset = start;
} else {
BUG_ON(start != key.offset);
if (start != key.offset) {
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
}
path->slots[0]--;
extent_end = end;
}
......@@ -1240,7 +1259,10 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
BUG_ON(ret); /* -ENOMEM */
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
other_start = 0;
other_end = start;
......@@ -1257,7 +1279,10 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
BUG_ON(ret); /* -ENOMEM */
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
if (del_nr == 0) {
fi = btrfs_item_ptr(leaf, path->slots[0],
......
......@@ -716,8 +716,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
if (BTRFS_I(inode)->generation != generation) {
btrfs_err(root->fs_info,
"free space inode generation (%llu) "
"did not match free space cache generation (%llu)",
"free space inode generation (%llu) did not match free space cache generation (%llu)",
BTRFS_I(inode)->generation, generation);
return 0;
}
......@@ -879,8 +878,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
if (!matched) {
__btrfs_remove_free_space_cache(ctl);
btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
block_group->key.objectid);
btrfs_warn(fs_info,
"block group %llu has wrong amount of free space",
block_group->key.objectid);
ret = -1;
}
out:
......@@ -891,8 +891,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
spin_unlock(&block_group->lock);
ret = 0;
btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now",
block_group->key.objectid);
btrfs_warn(fs_info,
"failed to load free space cache for block group %llu, rebuilding it now",
block_group->key.objectid);
}
iput(inode);
......@@ -2298,7 +2299,8 @@ static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
}
}
int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_free_space_ctl *ctl,
u64 offset, u64 bytes)
{
struct btrfs_free_space *info;
......@@ -2345,7 +2347,7 @@ int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
spin_unlock(&ctl->tree_lock);
if (ret) {
printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret);
btrfs_crit(fs_info, "unable to add free space :%d", ret);
ASSERT(ret != -EEXIST);
}
......@@ -2621,7 +2623,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
spin_unlock(&ctl->tree_lock);
if (align_gap_len)
__btrfs_add_free_space(ctl, align_gap, align_gap_len);
__btrfs_add_free_space(block_group->fs_info, ctl,
align_gap, align_gap_len);
return ret;
}
......
......@@ -89,13 +89,15 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct inode *inode);
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_free_space_ctl *ctl,
u64 bytenr, u64 size);
static inline int
btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 bytenr, u64 size)
{
return __btrfs_add_free_space(block_group->free_space_ctl,
return __btrfs_add_free_space(block_group->fs_info,
block_group->free_space_ctl,
bytenr, size);
}
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
......
......@@ -107,7 +107,7 @@ search_free_space_info(struct btrfs_trans_handle *trans,
if (ret < 0)
return ERR_PTR(ret);
if (ret != 0) {
btrfs_warn(fs_info, "missing free space info for %llu\n",
btrfs_warn(fs_info, "missing free space info for %llu",
block_group->key.objectid);
ASSERT(0);
return ERR_PTR(-ENOENT);
......@@ -261,7 +261,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
if (extent_count != expected_extent_count) {
btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->key.objectid, extent_count,
expected_extent_count);
ASSERT(0);
......@@ -442,7 +443,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
}
if (extent_count != expected_extent_count) {
btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->key.objectid, extent_count,
expected_extent_count);
ASSERT(0);
......@@ -1163,7 +1165,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
if (IS_ERR(trans))
return PTR_ERR(trans);
fs_info->creating_free_space_tree = 1;
set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
free_space_root = btrfs_create_tree(trans, fs_info,
BTRFS_FREE_SPACE_TREE_OBJECTID);
if (IS_ERR(free_space_root)) {
......@@ -1183,7 +1185,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
}
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
fs_info->creating_free_space_tree = 0;
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
ret = btrfs_commit_transaction(trans, tree_root);
if (ret)
......@@ -1192,7 +1194,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
return 0;
abort:
fs_info->creating_free_space_tree = 0;
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
return ret;
......@@ -1480,7 +1482,8 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
}
if (extent_count != expected_extent_count) {
btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->key.objectid, extent_count,
expected_extent_count);
ASSERT(0);
......@@ -1542,7 +1545,8 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
}
if (extent_count != expected_extent_count) {
btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
btrfs_err(fs_info,
"incorrect extent count for %llu; counted %u, expected %u",
block_group->key.objectid, extent_count,
expected_extent_count);
ASSERT(0);
......
......@@ -104,7 +104,7 @@ static int caching_kthread(void *data)
break;
if (last != (u64)-1 && last + 1 != key.objectid) {
__btrfs_add_free_space(ctl, last + 1,
__btrfs_add_free_space(fs_info, ctl, last + 1,
key.objectid - last - 1);
wake_up(&root->ino_cache_wait);
}
......@@ -115,7 +115,7 @@ static int caching_kthread(void *data)
}
if (last < root->highest_objectid - 1) {
__btrfs_add_free_space(ctl, last + 1,
__btrfs_add_free_space(fs_info, ctl, last + 1,
root->highest_objectid - last - 1);
}
......@@ -136,12 +136,13 @@ static int caching_kthread(void *data)
static void start_caching(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct task_struct *tsk;
int ret;
u64 objectid;
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return;
spin_lock(&root->ino_cache_lock);
......@@ -153,7 +154,7 @@ static void start_caching(struct btrfs_root *root)
root->ino_cache_state = BTRFS_CACHE_STARTED;
spin_unlock(&root->ino_cache_lock);
ret = load_free_ino_cache(root->fs_info, root);
ret = load_free_ino_cache(fs_info, root);
if (ret == 1) {
spin_lock(&root->ino_cache_lock);
root->ino_cache_state = BTRFS_CACHE_FINISHED;
......@@ -170,15 +171,15 @@ static void start_caching(struct btrfs_root *root)
*/
ret = btrfs_find_free_objectid(root, &objectid);
if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
__btrfs_add_free_space(ctl, objectid,
__btrfs_add_free_space(fs_info, ctl, objectid,
BTRFS_LAST_FREE_OBJECTID - objectid + 1);
}
tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
root->root_key.objectid);
if (IS_ERR(tsk)) {
btrfs_warn(root->fs_info, "failed to start inode caching task");
btrfs_clear_pending_and_info(root->fs_info, INODE_MAP_CACHE,
btrfs_warn(fs_info, "failed to start inode caching task");
btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
"disabling inode map caching");
}
}
......@@ -209,28 +210,29 @@ int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return;
again:
if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
__btrfs_add_free_space(pinned, objectid, 1);
__btrfs_add_free_space(fs_info, pinned, objectid, 1);
} else {
down_write(&root->fs_info->commit_root_sem);
down_write(&fs_info->commit_root_sem);
spin_lock(&root->ino_cache_lock);
if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
spin_unlock(&root->ino_cache_lock);
up_write(&root->fs_info->commit_root_sem);
up_write(&fs_info->commit_root_sem);
goto again;
}
spin_unlock(&root->ino_cache_lock);
start_caching(root);
__btrfs_add_free_space(pinned, objectid, 1);
__btrfs_add_free_space(fs_info, pinned, objectid, 1);
up_write(&root->fs_info->commit_root_sem);
up_write(&fs_info->commit_root_sem);
}
}
......@@ -277,7 +279,8 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
rb_erase(&info->offset_index, rbroot);
spin_unlock(rbroot_lock);
if (add_to_ctl)
__btrfs_add_free_space(ctl, info->offset, count);
__btrfs_add_free_space(root->fs_info, ctl,
info->offset, count);
kmem_cache_free(btrfs_free_space_cachep, info);
}
}
......
......@@ -560,8 +560,9 @@ static noinline void compress_file_range(struct inode *inode,
* we don't need to create any more async work items.
* Unlock and free up our temp pages.
*/
extent_clear_unlock_delalloc(inode, start, end, NULL,
clear_flags, PAGE_UNLOCK |
extent_clear_unlock_delalloc(inode, start, end, end,
NULL, clear_flags,
PAGE_UNLOCK |
PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK |
page_error_op |
......@@ -835,6 +836,8 @@ static noinline void submit_compressed_extents(struct inode *inode,
* clear dirty, set writeback and unlock the pages.
*/
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
......@@ -856,7 +859,8 @@ static noinline void submit_compressed_extents(struct inode *inode,
tree->ops->writepage_end_io_hook(p, start, end,
NULL, 0);
p->mapping = NULL;
extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
extent_clear_unlock_delalloc(inode, start, end, end,
NULL, 0,
PAGE_END_WRITEBACK |
PAGE_SET_ERROR);
free_async_extent_pages(async_extent);
......@@ -871,6 +875,8 @@ static noinline void submit_compressed_extents(struct inode *inode,
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
out_free:
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
......@@ -966,7 +972,8 @@ static noinline int cow_file_range(struct inode *inode,
ret = cow_file_range_inline(root, inode, start, end, 0, 0,
NULL);
if (ret == 0) {
extent_clear_unlock_delalloc(inode, start, end, NULL,
extent_clear_unlock_delalloc(inode, start, end,
delalloc_end, NULL,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
......@@ -1062,7 +1069,8 @@ static noinline int cow_file_range(struct inode *inode,
op |= PAGE_SET_PRIVATE2;
extent_clear_unlock_delalloc(inode, start,
start + ram_size - 1, locked_page,
start + ram_size - 1,
delalloc_end, locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC,
op);
disk_num_bytes -= cur_alloc_size;
......@@ -1079,7 +1087,8 @@ static noinline int cow_file_range(struct inode *inode,
btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
out_unlock:
extent_clear_unlock_delalloc(inode, start, end, locked_page,
extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
locked_page,
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DELALLOC | EXTENT_DEFRAG,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
......@@ -1258,7 +1267,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
path = btrfs_alloc_path();
if (!path) {
extent_clear_unlock_delalloc(inode, start, end, locked_page,
extent_clear_unlock_delalloc(inode, start, end, end,
locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, PAGE_UNLOCK |
......@@ -1276,7 +1286,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
extent_clear_unlock_delalloc(inode, start, end, locked_page,
extent_clear_unlock_delalloc(inode, start, end, end,
locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, PAGE_UNLOCK |
......@@ -1490,7 +1501,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
}
extent_clear_unlock_delalloc(inode, cur_offset,
cur_offset + num_bytes - 1,
cur_offset + num_bytes - 1, end,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC |
EXTENT_CLEAR_DATA_RESV,
......@@ -1522,7 +1533,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
ret = err;
if (ret && cur_offset < end)
extent_clear_unlock_delalloc(inode, cur_offset, end,
extent_clear_unlock_delalloc(inode, cur_offset, end, end,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
......@@ -1988,7 +1999,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
}
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state)
struct extent_state **cached_state, int dedupe)
{
WARN_ON((end & (PAGE_SIZE - 1)) == 0);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
......@@ -2052,7 +2063,8 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
goto out;
}
btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
0);
ClearPageChecked(page);
set_page_dirty(page);
out:
......@@ -2309,7 +2321,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
if (PTR_ERR(root) == -ENOENT)
return 0;
WARN_ON(1);
pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
inum, offset, root_id);
return PTR_ERR(root);
}
......@@ -3936,7 +3948,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
*/
if (!btrfs_is_free_space_inode(inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& !root->fs_info->log_root_recovering) {
&& !test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
btrfs_update_root_times(trans, root);
ret = btrfs_delayed_update_inode(trans, root, inode);
......@@ -4757,7 +4769,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
&cached_state);
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, block_start, block_end,
&cached_state, GFP_NOFS);
......@@ -5223,7 +5235,7 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_free_io_failure_record(inode, 0, (u64)-1);
if (root->fs_info->log_root_recovering) {
if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags));
goto no_delete;
......@@ -7012,8 +7024,9 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
em->start, em->len, start, len);
btrfs_err(root->fs_info,
"bad extent! em: [%llu %llu] passed [%llu %llu]",
em->start, em->len, start, len);
err = -EIO;
goto out;
}
......@@ -7865,18 +7878,19 @@ static int btrfs_check_dio_repairable(struct inode *inode,
struct io_failure_record *failrec,
int failed_mirror)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int num_copies;
num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
failrec->logical, failrec->len);
num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
if (num_copies == 1) {
/*
* we only have a single copy of the data, so don't bother with
* all the retry and error correction code that follows. no
* matter what the error is, it is very likely to persist.
*/
pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
num_copies, failrec->this_mirror, failed_mirror);
btrfs_debug(fs_info,
"Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
......@@ -7886,8 +7900,9 @@ static int btrfs_check_dio_repairable(struct inode *inode,
failrec->this_mirror++;
if (failrec->this_mirror > num_copies) {
pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
num_copies, failrec->this_mirror, failed_mirror);
btrfs_debug(fs_info,
"Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
......@@ -9055,7 +9070,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, end,
&cached_state);
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
......@@ -9377,8 +9392,9 @@ void btrfs_destroy_inode(struct inode *inode)
if (!ordered)
break;
else {
btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
ordered->file_offset, ordered->len);
btrfs_err(root->fs_info,
"found ordered extent %llu %llu on inode cleanup",
ordered->file_offset, ordered->len);
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
......
......@@ -1903,8 +1903,9 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
if (key.objectid == root->root_key.objectid) {
ret = -EPERM;
btrfs_err(root->fs_info, "deleting default subvolume "
"%llu is not allowed", key.objectid);
btrfs_err(root->fs_info,
"deleting default subvolume %llu is not allowed",
key.objectid);
goto out;
}
btrfs_release_path(path);
......@@ -4097,8 +4098,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
if (IS_ERR_OR_NULL(di)) {
btrfs_free_path(path);
btrfs_end_transaction(trans, root);
btrfs_err(new_root->fs_info, "Umm, you don't have the default dir"
"item, this isn't going to work");
btrfs_err(new_root->fs_info,
"Umm, you don't have the default diritem, this isn't going to work");
ret = -ENOENT;
goto out;
}
......@@ -5307,8 +5308,9 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
return -EFAULT;
if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
btrfs_err(root->fs_info, "unable to set label with more than %d bytes",
BTRFS_LABEL_SIZE - 1);
btrfs_err(root->fs_info,
"unable to set label with more than %d bytes",
BTRFS_LABEL_SIZE - 1);
return -EINVAL;
}
......
......@@ -141,7 +141,7 @@ static int lzo_compress_pages(struct list_head *ws,
ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
&out_len, workspace->mem);
if (ret != LZO_E_OK) {
printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
pr_debug("BTRFS: deflate in loop returned %d\n",
ret);
ret = -EIO;
goto out;
......@@ -356,7 +356,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
if (need_unmap)
kunmap(pages_in[page_in_index - 1]);
if (ret != LZO_E_OK) {
printk(KERN_WARNING "BTRFS: decompress failed\n");
pr_warn("BTRFS: decompress failed\n");
ret = -EIO;
break;
}
......@@ -402,7 +402,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
out_len = PAGE_SIZE;
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
if (ret != LZO_E_OK) {
printk(KERN_WARNING "BTRFS: decompress failed!\n");
pr_warn("BTRFS: decompress failed!\n");
ret = -EIO;
goto out;
}
......
......@@ -67,8 +67,8 @@ static void ordered_data_tree_panic(struct inode *inode, int errno,
u64 offset)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
"%llu", offset);
btrfs_panic(fs_info, errno,
"Inconsistency in ordered tree at offset %llu", offset);
}
/*
......
......@@ -24,12 +24,11 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
{
int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
int i;
printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu "
"num_stripes %d\n",
pr_info("\t\tchunk length %llu owner %llu type %llu num_stripes %d\n",
btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk),
btrfs_chunk_type(eb, chunk), num_stripes);
for (i = 0 ; i < num_stripes ; i++) {
printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i,
pr_info("\t\t\tstripe %d devid %llu offset %llu\n", i,
btrfs_stripe_devid_nr(eb, chunk, i),
btrfs_stripe_offset_nr(eb, chunk, i));
}
......@@ -37,8 +36,7 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
static void print_dev_item(struct extent_buffer *eb,
struct btrfs_dev_item *dev_item)
{
printk(KERN_INFO "\t\tdev item devid %llu "
"total_bytes %llu bytes used %llu\n",
pr_info("\t\tdev item devid %llu total_bytes %llu bytes used %llu\n",
btrfs_device_id(eb, dev_item),
btrfs_device_total_bytes(eb, dev_item),
btrfs_device_bytes_used(eb, dev_item));
......@@ -46,8 +44,7 @@ static void print_dev_item(struct extent_buffer *eb,
static void print_extent_data_ref(struct extent_buffer *eb,
struct btrfs_extent_data_ref *ref)
{
printk(KERN_INFO "\t\textent data backref root %llu "
"objectid %llu offset %llu count %u\n",
pr_info("\t\textent data backref root %llu objectid %llu offset %llu count %u\n",
btrfs_extent_data_ref_root(eb, ref),
btrfs_extent_data_ref_objectid(eb, ref),
btrfs_extent_data_ref_offset(eb, ref),
......@@ -72,7 +69,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
struct btrfs_extent_item_v0 *ei0;
BUG_ON(item_size != sizeof(*ei0));
ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
printk(KERN_INFO "\t\textent refs %u\n",
pr_info("\t\textent refs %u\n",
btrfs_extent_refs_v0(eb, ei0));
return;
#else
......@@ -83,7 +80,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
flags = btrfs_extent_flags(eb, ei);
printk(KERN_INFO "\t\textent refs %llu gen %llu flags %llu\n",
pr_info("\t\textent refs %llu gen %llu flags %llu\n",
btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei),
flags);
......@@ -92,8 +89,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
struct btrfs_tree_block_info *info;
info = (struct btrfs_tree_block_info *)(ei + 1);
btrfs_tree_block_key(eb, info, &key);
printk(KERN_INFO "\t\ttree block key (%llu %u %llu) "
"level %d\n",
pr_info("\t\ttree block key (%llu %u %llu) level %d\n",
btrfs_disk_key_objectid(&key), key.type,
btrfs_disk_key_offset(&key),
btrfs_tree_block_level(eb, info));
......@@ -110,12 +106,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
offset = btrfs_extent_inline_ref_offset(eb, iref);
switch (type) {
case BTRFS_TREE_BLOCK_REF_KEY:
printk(KERN_INFO "\t\ttree block backref "
"root %llu\n", offset);
pr_info("\t\ttree block backref root %llu\n", offset);
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
printk(KERN_INFO "\t\tshared block backref "
"parent %llu\n", offset);
pr_info("\t\tshared block backref parent %llu\n", offset);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
......@@ -123,8 +117,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
break;
case BTRFS_SHARED_DATA_REF_KEY:
sref = (struct btrfs_shared_data_ref *)(iref + 1);
printk(KERN_INFO "\t\tshared data backref "
"parent %llu count %u\n",
pr_info("\t\tshared data backref parent %llu count %u\n",
offset, btrfs_shared_data_ref_count(eb, sref));
break;
default:
......@@ -141,8 +134,7 @@ static void print_extent_ref_v0(struct extent_buffer *eb, int slot)
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0);
printk("\t\textent back ref root %llu gen %llu "
"owner %llu num_refs %lu\n",
printk("\t\textent back ref root %llu gen %llu owner %llu num_refs %lu\n",
btrfs_ref_root_v0(eb, ref0),
btrfs_ref_generation_v0(eb, ref0),
btrfs_ref_objectid_v0(eb, ref0),
......@@ -162,7 +154,7 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
__le64 subvol_id;
read_extent_buffer(l, &subvol_id, offset, sizeof(subvol_id));
printk(KERN_INFO "\t\tsubvol_id %llu\n",
pr_info("\t\tsubvol_id %llu\n",
(unsigned long long)le64_to_cpu(subvol_id));
item_size -= sizeof(u64);
offset += sizeof(u64);
......@@ -196,15 +188,13 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
item = btrfs_item_nr(i);
btrfs_item_key_to_cpu(l, &key, i);
type = key.type;
printk(KERN_INFO "\titem %d key (%llu %u %llu) itemoff %d "
"itemsize %d\n",
pr_info("\titem %d key (%llu %u %llu) itemoff %d itemsize %d\n",
i, key.objectid, type, key.offset,
btrfs_item_offset(l, item), btrfs_item_size(l, item));
switch (type) {
case BTRFS_INODE_ITEM_KEY:
ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
printk(KERN_INFO "\t\tinode generation %llu size %llu "
"mode %o\n",
pr_info("\t\tinode generation %llu size %llu mode %o\n",
btrfs_inode_generation(l, ii),
btrfs_inode_size(l, ii),
btrfs_inode_mode(l, ii));
......@@ -212,13 +202,13 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
case BTRFS_DIR_ITEM_KEY:
di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
btrfs_dir_item_key_to_cpu(l, di, &found_key);
printk(KERN_INFO "\t\tdir oid %llu type %u\n",
pr_info("\t\tdir oid %llu type %u\n",
found_key.objectid,
btrfs_dir_type(l, di));
break;
case BTRFS_ROOT_ITEM_KEY:
ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n",
pr_info("\t\troot data bytenr %llu refs %u\n",
btrfs_disk_root_bytenr(l, ri),
btrfs_disk_root_refs(l, ri));
break;
......@@ -227,10 +217,10 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
print_extent_item(l, i, type);
break;
case BTRFS_TREE_BLOCK_REF_KEY:
printk(KERN_INFO "\t\ttree block backref\n");
pr_info("\t\ttree block backref\n");
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
printk(KERN_INFO "\t\tshared block backref\n");
pr_info("\t\tshared block backref\n");
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = btrfs_item_ptr(l, i,
......@@ -240,7 +230,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
case BTRFS_SHARED_DATA_REF_KEY:
sref = btrfs_item_ptr(l, i,
struct btrfs_shared_data_ref);
printk(KERN_INFO "\t\tshared data backref count %u\n",
pr_info("\t\tshared data backref count %u\n",
btrfs_shared_data_ref_count(l, sref));
break;
case BTRFS_EXTENT_DATA_KEY:
......@@ -248,17 +238,14 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(l, fi) ==
BTRFS_FILE_EXTENT_INLINE) {
printk(KERN_INFO "\t\tinline extent data "
"size %u\n",
pr_info("\t\tinline extent data size %u\n",
btrfs_file_extent_inline_len(l, i, fi));
break;
}
printk(KERN_INFO "\t\textent data disk bytenr %llu "
"nr %llu\n",
pr_info("\t\textent data disk bytenr %llu nr %llu\n",
btrfs_file_extent_disk_bytenr(l, fi),
btrfs_file_extent_disk_num_bytes(l, fi));
printk(KERN_INFO "\t\textent data offset %llu "
"nr %llu ram %llu\n",
pr_info("\t\textent data offset %llu nr %llu ram %llu\n",
btrfs_file_extent_offset(l, fi),
btrfs_file_extent_num_bytes(l, fi),
btrfs_file_extent_ram_bytes(l, fi));
......@@ -273,7 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
struct btrfs_block_group_item);
printk(KERN_INFO "\t\tblock group used %llu\n",
pr_info("\t\tblock group used %llu\n",
btrfs_disk_block_group_used(l, bi));
break;
case BTRFS_CHUNK_ITEM_KEY:
......@@ -287,38 +274,36 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
case BTRFS_DEV_EXTENT_KEY:
dev_extent = btrfs_item_ptr(l, i,
struct btrfs_dev_extent);
printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n"
"\t\tchunk objectid %llu chunk offset %llu "
"length %llu\n",
pr_info("\t\tdev extent chunk_tree %llu\n\t\tchunk objectid %llu chunk offset %llu length %llu\n",
btrfs_dev_extent_chunk_tree(l, dev_extent),
btrfs_dev_extent_chunk_objectid(l, dev_extent),
btrfs_dev_extent_chunk_offset(l, dev_extent),
btrfs_dev_extent_length(l, dev_extent));
break;
case BTRFS_PERSISTENT_ITEM_KEY:
printk(KERN_INFO "\t\tpersistent item objectid %llu offset %llu\n",
pr_info("\t\tpersistent item objectid %llu offset %llu\n",
key.objectid, key.offset);
switch (key.objectid) {
case BTRFS_DEV_STATS_OBJECTID:
printk(KERN_INFO "\t\tdevice stats\n");
pr_info("\t\tdevice stats\n");
break;
default:
printk(KERN_INFO "\t\tunknown persistent item\n");
pr_info("\t\tunknown persistent item\n");
}
break;
case BTRFS_TEMPORARY_ITEM_KEY:
printk(KERN_INFO "\t\ttemporary item objectid %llu offset %llu\n",
pr_info("\t\ttemporary item objectid %llu offset %llu\n",
key.objectid, key.offset);
switch (key.objectid) {
case BTRFS_BALANCE_OBJECTID:
printk(KERN_INFO "\t\tbalance status\n");
pr_info("\t\tbalance status\n");
break;
default:
printk(KERN_INFO "\t\tunknown temporary item\n");
pr_info("\t\tunknown temporary item\n");
}
break;
case BTRFS_DEV_REPLACE_KEY:
printk(KERN_INFO "\t\tdev replace\n");
pr_info("\t\tdev replace\n");
break;
case BTRFS_UUID_KEY_SUBVOL:
case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
......@@ -343,12 +328,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
btrfs_print_leaf(root, c);
return;
}
btrfs_info(root->fs_info, "node %llu level %d total ptrs %d free spc %u",
btrfs_header_bytenr(c), level, nr,
(u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
btrfs_info(root->fs_info,
"node %llu level %d total ptrs %d free spc %u",
btrfs_header_bytenr(c), level, nr,
(u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i);
printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n",
pr_info("\tkey %d (%llu %u %llu) block %llu\n",
i, key.objectid, key.type, key.offset,
btrfs_node_blockptr(c, i));
}
......@@ -356,6 +342,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
struct extent_buffer *next = read_tree_block(root,
btrfs_node_blockptr(c, i),
btrfs_node_ptr_generation(c, i));
if (IS_ERR(next)) {
continue;
} else if (!extent_buffer_uptodate(next)) {
free_extent_buffer(next);
continue;
}
if (btrfs_is_leaf(next) &&
level != 1)
BUG();
......
......@@ -309,7 +309,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
u64 flags = 0;
u64 rescan_progress = 0;
if (!fs_info->quota_enabled)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0;
fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
......@@ -360,8 +360,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
fs_info->generation) {
flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
btrfs_err(fs_info,
"qgroup generation mismatch, "
"marked as inconsistent");
"qgroup generation mismatch, marked as inconsistent");
}
fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
ptr);
......@@ -463,13 +462,11 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
}
out:
fs_info->qgroup_flags |= flags;
if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
} else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
ret >= 0) {
if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
ret >= 0)
ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
}
btrfs_free_path(path);
if (ret < 0) {
......@@ -847,7 +844,7 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
}
ret = 0;
out:
root->fs_info->pending_quota_state = 0;
set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
btrfs_free_path(path);
return ret;
}
......@@ -868,7 +865,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (fs_info->quota_root) {
fs_info->pending_quota_state = 1;
set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
goto out;
}
......@@ -964,7 +961,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
}
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
fs_info->pending_quota_state = 1;
set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
spin_unlock(&fs_info->qgroup_lock);
out_free_path:
btrfs_free_path(path);
......@@ -993,8 +990,8 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
......@@ -1490,7 +1487,8 @@ int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
if (!fs_info->quota_enabled || bytenr == 0 || num_bytes == 0)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
|| bytenr == 0 || num_bytes == 0)
return 0;
if (WARN_ON(trans == NULL))
return -EINVAL;
......@@ -1713,7 +1711,7 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
if (old_roots)
nr_old_roots = old_roots->nnodes;
if (!fs_info->quota_enabled)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
goto out_free;
BUG_ON(!fs_info->quota_root);
......@@ -1833,10 +1831,14 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
if (!quota_root)
goto out;
if (!fs_info->quota_enabled && fs_info->pending_quota_state)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
test_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
start_rescan_worker = 1;
fs_info->quota_enabled = fs_info->pending_quota_state;
if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
spin_lock(&fs_info->qgroup_lock);
while (!list_empty(&fs_info->dirty_qgroups)) {
......@@ -1855,7 +1857,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
spin_lock(&fs_info->qgroup_lock);
}
if (fs_info->quota_enabled)
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
else
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
......@@ -1900,7 +1902,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
u64 nums;
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_enabled)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
goto out;
if (!quota_root) {
......@@ -1991,8 +1993,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
if (ret) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
btrfs_info(fs_info, "unable to update quota limit for %llu",
dstgroup->qgroupid);
btrfs_info(fs_info,
"unable to update quota limit for %llu",
dstgroup->qgroupid);
goto unlock;
}
}
......@@ -2226,8 +2229,7 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
return;
btrfs_err(trans->fs_info,
"qgroups not uptodate in trans handle %p: list is%s empty, "
"seq is %#x.%x",
"qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x",
trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
(u32)(trans->delayed_ref_elem.seq >> 32),
(u32)trans->delayed_ref_elem.seq);
......@@ -2255,10 +2257,11 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
&fs_info->qgroup_rescan_progress,
path, 1, 0);
pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
fs_info->qgroup_rescan_progress.objectid,
fs_info->qgroup_rescan_progress.type,
fs_info->qgroup_rescan_progress.offset, ret);
btrfs_debug(fs_info,
"current progress key (%llu %u %llu), search_slot ret %d",
fs_info->qgroup_rescan_progress.objectid,
fs_info->qgroup_rescan_progress.type,
fs_info->qgroup_rescan_progress.offset, ret);
if (ret) {
/*
......@@ -2347,7 +2350,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
err = PTR_ERR(trans);
break;
}
if (!fs_info->quota_enabled) {
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
err = -EINTR;
} else {
err = qgroup_rescan_leaf(fs_info, path, trans);
......@@ -2388,7 +2391,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
if (ret < 0) {
err = ret;
btrfs_err(fs_info, "fail to update qgroup status: %d\n", err);
btrfs_err(fs_info, "fail to update qgroup status: %d", err);
}
btrfs_end_transaction(trans, fs_info->quota_root);
......@@ -2578,8 +2581,8 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
struct ulist_iterator uiter;
int ret;
if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
len == 0)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
!is_fstree(root->objectid) || len == 0)
return 0;
changeset.bytes_changed = 0;
......@@ -2676,8 +2679,8 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
{
int ret;
if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
num_bytes == 0)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
!is_fstree(root->objectid) || num_bytes == 0)
return 0;
BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
......@@ -2692,7 +2695,8 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
{
int reserved;
if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
!is_fstree(root->objectid))
return;
reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
......@@ -2703,7 +2707,8 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
{
if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
!is_fstree(root->objectid))
return;
BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
......
......@@ -2143,7 +2143,10 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
BUG();
btrfs_warn(root->fs_info,
"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
__func__, (u64)bio->bi_iter.bi_sector << 9,
(u64)bio->bi_iter.bi_size, bbio->map_type);
if (generic_io)
btrfs_put_bbio(bbio);
kfree(rbio);
......
......@@ -820,7 +820,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
spin_lock(&fs_info->reada_lock);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
atomic_read(&device->reada_in_flight));
index = 0;
while (1) {
......@@ -829,17 +829,17 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
(void **)&zone, index, 1);
if (ret == 0)
break;
printk(KERN_DEBUG " zone %llu-%llu elems %llu locked "
"%d devs", zone->start, zone->end, zone->elems,
zone->locked);
pr_debug(" zone %llu-%llu elems %llu locked %d devs",
zone->start, zone->end, zone->elems,
zone->locked);
for (j = 0; j < zone->ndevs; ++j) {
printk(KERN_CONT " %lld",
pr_cont(" %lld",
zone->devs[j]->devid);
}
if (device->reada_curr_zone == zone)
printk(KERN_CONT " curr off %llu",
pr_cont(" curr off %llu",
device->reada_next - zone->start);
printk(KERN_CONT "\n");
pr_cont("\n");
index = (zone->end >> PAGE_SHIFT) + 1;
}
cnt = 0;
......@@ -851,21 +851,20 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
(void **)&re, index, 1);
if (ret == 0)
break;
printk(KERN_DEBUG
" re: logical %llu size %u empty %d scheduled %d",
pr_debug(" re: logical %llu size %u empty %d scheduled %d",
re->logical, fs_info->tree_root->nodesize,
list_empty(&re->extctl), re->scheduled);
for (i = 0; i < re->nzones; ++i) {
printk(KERN_CONT " zone %llu-%llu devs",
pr_cont(" zone %llu-%llu devs",
re->zones[i]->start,
re->zones[i]->end);
for (j = 0; j < re->zones[i]->ndevs; ++j) {
printk(KERN_CONT " %lld",
pr_cont(" %lld",
re->zones[i]->devs[j]->devid);
}
}
printk(KERN_CONT "\n");
pr_cont("\n");
index = (re->logical >> PAGE_SHIFT) + 1;
if (++cnt > 15)
break;
......@@ -885,20 +884,19 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
index = (re->logical >> PAGE_SHIFT) + 1;
continue;
}
printk(KERN_DEBUG
"re: logical %llu size %u list empty %d scheduled %d",
pr_debug("re: logical %llu size %u list empty %d scheduled %d",
re->logical, fs_info->tree_root->nodesize,
list_empty(&re->extctl), re->scheduled);
for (i = 0; i < re->nzones; ++i) {
printk(KERN_CONT " zone %llu-%llu devs",
pr_cont(" zone %llu-%llu devs",
re->zones[i]->start,
re->zones[i]->end);
for (j = 0; j < re->zones[i]->ndevs; ++j) {
printk(KERN_CONT " %lld",
pr_cont(" %lld",
re->zones[i]->devs[j]->devid);
}
}
printk(KERN_CONT "\n");
pr_cont("\n");
index = (re->logical >> PAGE_SHIFT) + 1;
}
spin_unlock(&fs_info->reada_lock);
......
......@@ -337,8 +337,9 @@ static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
rb_node);
if (bnode->root)
fs_info = bnode->root->fs_info;
btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
"found at offset %llu", bytenr);
btrfs_panic(fs_info, errno,
"Inconsistency in backref cache found at offset %llu",
bytenr);
}
/*
......@@ -923,9 +924,16 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
path2->slots[level]--;
eb = path2->nodes[level];
WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
cur->bytenr);
if (btrfs_node_blockptr(eb, path2->slots[level]) !=
cur->bytenr) {
btrfs_err(root->fs_info,
"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
cur->bytenr, level - 1, root->objectid,
node_key->objectid, node_key->type,
node_key->offset);
err = -ENOENT;
goto out;
}
lower = cur;
need_check = true;
for (; level < BTRFS_MAX_LEVEL; level++) {
......@@ -1296,9 +1304,9 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
"for start=%llu while inserting into relocation "
"tree", node->bytenr);
btrfs_panic(root->fs_info, -EEXIST,
"Duplicate root found for start=%llu while inserting into relocation tree",
node->bytenr);
kfree(node);
return -EEXIST;
}
......@@ -2350,6 +2358,10 @@ void free_reloc_roots(struct list_head *list)
while (!list_empty(list)) {
reloc_root = list_entry(list->next, struct btrfs_root,
root_list);
free_extent_buffer(reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
reloc_root->node = NULL;
reloc_root->commit_root = NULL;
__del_reloc_root(reloc_root);
}
}
......@@ -2686,11 +2698,15 @@ static int do_relocation(struct btrfs_trans_handle *trans,
if (!upper->eb) {
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
if (ret < 0) {
err = ret;
if (ret) {
if (ret < 0)
err = ret;
else
err = -ENOENT;
btrfs_release_path(path);
break;
}
BUG_ON(ret > 0);
if (!upper->eb) {
upper->eb = path->nodes[upper->level];
......@@ -3203,7 +3219,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
nr++;
}
btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0);
set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree,
......@@ -3952,7 +3968,7 @@ static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ret = 0;
if (!fs_info->quota_enabled)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0;
/*
......@@ -4365,8 +4381,9 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
goto out;
}
btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu",
rc->block_group->key.objectid, rc->block_group->flags);
btrfs_info(extent_root->fs_info,
"relocating block group %llu flags %llu",
rc->block_group->key.objectid, rc->block_group->flags);
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
......
......@@ -46,12 +46,7 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
!= btrfs_root_generation_v2(item)) {
if (btrfs_root_generation_v2(item) != 0) {
btrfs_warn(eb->fs_info,
"mismatching "
"generation and generation_v2 "
"found in root item. This root "
"was probably mounted with an "
"older kernel. Resetting all "
"new fields.");
"mismatching generation and generation_v2 found in root item. This root was probably mounted with an older kernel. Resetting all new fields.");
}
need_reset = 1;
}
......@@ -156,8 +151,9 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
btrfs_crit(root->fs_info, "unable to update root key %llu %u %llu",
key->objectid, key->type, key->offset);
btrfs_crit(root->fs_info,
"unable to update root key %llu %u %llu",
key->objectid, key->type, key->offset);
BUG_ON(1);
}
......@@ -302,8 +298,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
btrfs_handle_fs_error(tree_root->fs_info, err,
"Failed to start trans to delete "
"orphan item");
"Failed to start trans to delete orphan item");
break;
}
err = btrfs_del_orphan_item(trans, tree_root,
......@@ -311,8 +306,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
btrfs_end_transaction(trans, tree_root);
if (err) {
btrfs_handle_fs_error(tree_root->fs_info, err,
"Failed to delete root orphan "
"item");
"Failed to delete root orphan item");
break;
}
continue;
......
......@@ -575,23 +575,25 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
* hold all of the paths here
*/
for (i = 0; i < ipath->fspath->elem_cnt; ++i)
btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
"%s, sector %llu, root %llu, inode %llu, offset %llu, "
"length %llu, links %u (path: %s)", swarn->errstr,
swarn->logical, rcu_str_deref(swarn->dev->name),
(unsigned long long)swarn->sector, root, inum, offset,
min(isize - offset, (u64)PAGE_SIZE), nlink,
(char *)(unsigned long)ipath->fspath->val[i]);
btrfs_warn_in_rcu(fs_info,
"%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
swarn->errstr, swarn->logical,
rcu_str_deref(swarn->dev->name),
(unsigned long long)swarn->sector,
root, inum, offset,
min(isize - offset, (u64)PAGE_SIZE), nlink,
(char *)(unsigned long)ipath->fspath->val[i]);
free_ipath(ipath);
return 0;
err:
btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
"resolving failed with ret=%d", swarn->errstr,
swarn->logical, rcu_str_deref(swarn->dev->name),
(unsigned long long)swarn->sector, root, inum, offset, ret);
btrfs_warn_in_rcu(fs_info,
"%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
swarn->errstr, swarn->logical,
rcu_str_deref(swarn->dev->name),
(unsigned long long)swarn->sector,
root, inum, offset, ret);
free_ipath(ipath);
return 0;
......@@ -645,9 +647,8 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
item_size, &ref_root,
&ref_level);
btrfs_warn_in_rcu(fs_info,
"%s at logical %llu on dev %s, "
"sector %llu: metadata %s (level %d) in tree "
"%llu", errstr, swarn.logical,
"%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
errstr, swarn.logical,
rcu_str_deref(dev->name),
(unsigned long long)swarn.sector,
ref_level ? "node" : "leaf",
......@@ -1574,8 +1575,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
if (!page_bad->dev->bdev) {
btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
"scrub_repair_page_from_good_copy(bdev == NULL) "
"is unexpected");
"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
return -EIO;
}
......@@ -2961,7 +2961,8 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
(key.objectid < logic_start ||
key.objectid + bytes >
logic_start + map->stripe_len)) {
btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
btrfs_err(fs_info,
"scrub: tree block %llu spanning stripes, ignored. logical=%llu",
key.objectid, logic_start);
spin_lock(&sctx->stat_lock);
sctx->stat.uncorrectable_errors++;
......@@ -3312,8 +3313,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
key.objectid + bytes >
logical + map->stripe_len)) {
btrfs_err(fs_info,
"scrub: tree block %llu spanning "
"stripes, ignored. logical=%llu",
"scrub: tree block %llu spanning stripes, ignored. logical=%llu",
key.objectid, logical);
spin_lock(&sctx->stat_lock);
sctx->stat.uncorrectable_errors++;
......@@ -3640,7 +3640,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/
ro_set = 0;
} else {
btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
btrfs_warn(fs_info,
"failed setting block group ro, ret=%d\n",
ret);
btrfs_put_block_group(cache);
break;
......@@ -3861,8 +3862,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
/* not supported for data w/o checksums */
btrfs_err_rl(fs_info,
"scrub: size assumption sectorsize != PAGE_SIZE "
"(%d != %lu) fails",
"scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
fs_info->chunk_root->sectorsize, PAGE_SIZE);
return -EINVAL;
}
......@@ -3875,8 +3875,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
* would exhaust the array bounds of pagev member in
* struct scrub_block
*/
btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
"<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
btrfs_err(fs_info,
"scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
fs_info->chunk_root->nodesize,
SCRUB_MAX_PAGES_PER_BLOCK,
fs_info->chunk_root->sectorsize,
......@@ -4202,10 +4202,10 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
ret = iterate_inodes_from_logical(logical, fs_info, path,
record_inode_for_nocow, nocow_ctx);
if (ret != 0 && ret != -ENOENT) {
btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
"phys %llu, len %llu, mir %u, ret %d",
logical, physical_for_dev_replace, len, mirror_num,
ret);
btrfs_warn(fs_info,
"iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
logical, physical_for_dev_replace, len, mirror_num,
ret);
not_written = 1;
goto out;
}
......
此差异已折叠。
......@@ -151,12 +151,11 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_CRIT
"BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
sb->s_id, function, line, errno, errstr, &vaf);
va_end(args);
} else {
printk(KERN_CRIT "BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
sb->s_id, function, line, errno, errstr);
}
#endif
......@@ -462,9 +461,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_datasum:
if (btrfs_test_opt(info, NODATASUM)) {
if (btrfs_test_opt(info, NODATACOW))
btrfs_info(root->fs_info, "setting datasum, datacow enabled");
btrfs_info(root->fs_info,
"setting datasum, datacow enabled");
else
btrfs_info(root->fs_info, "setting datasum");
btrfs_info(root->fs_info,
"setting datasum");
}
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
......@@ -476,7 +477,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_info(root->fs_info,
"setting nodatacow, compression disabled");
} else {
btrfs_info(root->fs_info, "setting nodatacow");
btrfs_info(root->fs_info,
"setting nodatacow");
}
}
btrfs_clear_opt(info->mount_opt, COMPRESS);
......@@ -608,8 +610,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
info->alloc_start = memparse(num, NULL);
mutex_unlock(&info->chunk_mutex);
kfree(num);
btrfs_info(root->fs_info, "allocations start at %llu",
info->alloc_start);
btrfs_info(root->fs_info,
"allocations start at %llu",
info->alloc_start);
} else {
ret = -ENOMEM;
goto out;
......@@ -762,8 +765,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
goto out;
} else if (intarg >= 0) {
info->check_integrity_print_mask = intarg;
btrfs_info(root->fs_info, "check_integrity_print_mask 0x%x",
info->check_integrity_print_mask);
btrfs_info(root->fs_info,
"check_integrity_print_mask 0x%x",
info->check_integrity_print_mask);
} else {
ret = -EINVAL;
goto out;
......@@ -794,19 +798,22 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
intarg = 0;
ret = match_int(&args[0], &intarg);
if (ret < 0) {
btrfs_err(root->fs_info, "invalid commit interval");
btrfs_err(root->fs_info,
"invalid commit interval");
ret = -EINVAL;
goto out;
}
if (intarg > 0) {
if (intarg > 300) {
btrfs_warn(root->fs_info, "excessive commit interval %d",
intarg);
btrfs_warn(root->fs_info,
"excessive commit interval %d",
intarg);
}
info->commit_interval = intarg;
} else {
btrfs_info(root->fs_info, "using default commit interval %ds",
BTRFS_DEFAULT_COMMIT_INTERVAL);
btrfs_info(root->fs_info,
"using default commit interval %ds",
BTRFS_DEFAULT_COMMIT_INTERVAL);
info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
}
break;
......@@ -827,7 +834,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
#endif
case Opt_err:
btrfs_info(root->fs_info, "unrecognized mount option '%s'", p);
btrfs_info(root->fs_info,
"unrecognized mount option '%s'", p);
ret = -EINVAL;
goto out;
default:
......@@ -916,9 +924,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
}
break;
case Opt_subvolrootid:
printk(KERN_WARNING
"BTRFS: 'subvolrootid' mount option is deprecated and has "
"no effect\n");
pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
break;
case Opt_device:
device_name = match_strdup(&args[0]);
......@@ -1142,7 +1148,7 @@ static int btrfs_fill_super(struct super_block *sb,
sb->s_iflags |= SB_I_CGROUPWB;
err = open_ctree(sb, fs_devices, (char *)data);
if (err) {
printk(KERN_ERR "BTRFS: open_ctree failed\n");
btrfs_err(fs_info, "open_ctree failed");
return err;
}
......@@ -1440,12 +1446,13 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
if (!IS_ERR(root)) {
struct super_block *s = root->d_sb;
struct btrfs_fs_info *fs_info = btrfs_sb(s);
struct inode *root_inode = d_inode(root);
u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
ret = 0;
if (!is_subvolume_inode(root_inode)) {
pr_err("BTRFS: '%s' is not a valid subvolume\n",
btrfs_err(fs_info, "'%s' is not a valid subvolume",
subvol_name);
ret = -EINVAL;
}
......@@ -1455,8 +1462,9 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
* subvolume which was passed by ID is renamed and
* another subvolume is renamed over the old location.
*/
pr_err("BTRFS: subvol '%s' does not match subvolid %llu\n",
subvol_name, subvol_objectid);
btrfs_err(fs_info,
"subvol '%s' does not match subvolid %llu",
subvol_name, subvol_objectid);
ret = -EINVAL;
}
if (ret) {
......@@ -1830,13 +1838,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_info(fs_info, "creating UUID tree");
ret = btrfs_create_uuid_tree(fs_info);
if (ret) {
btrfs_warn(fs_info, "failed to create the UUID tree %d", ret);
btrfs_warn(fs_info,
"failed to create the UUID tree %d",
ret);
goto restore;
}
}
sb->s_flags &= ~MS_RDONLY;
fs_info->open = 1;
set_bit(BTRFS_FS_OPEN, &fs_info->flags);
}
out:
wake_up_process(fs_info->transaction_kthread);
......@@ -2346,7 +2356,7 @@ static void btrfs_interface_exit(void)
static void btrfs_print_mod_info(void)
{
printk(KERN_INFO "Btrfs loaded, crc32c=%s"
pr_info("Btrfs loaded, crc32c=%s"
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
......
此差异已折叠。
此差异已折叠。
......@@ -480,7 +480,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
*/
root->fs_info->tree_root = root;
root->fs_info->quota_root = root;
root->fs_info->quota_enabled = 1;
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
/*
* Can't use bytenr 0, some things freak out
......
此差异已折叠。
......@@ -82,6 +82,7 @@ struct btrfs_transaction {
spinlock_t dropped_roots_lock;
struct btrfs_delayed_ref_root delayed_refs;
int aborted;
struct btrfs_fs_info *fs_info;
};
#define __TRANS_FREEZABLE (1U << 0)
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -382,7 +382,7 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num,
int need_raid_map);
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len);
int btrfs_read_sys_array(struct btrfs_root *root);
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册