提交 6de5f18e 编写于 作者: E Elena Reshetova 提交者: David Sterba

btrfs: convert btrfs_delayed_node.refs from atomic_t to refcount_t

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
Signed-off-by: NElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: NHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: NKees Cook <keescook@chromium.org>
Signed-off-by: NDavid Windsor <dwindsor@gmail.com>
Signed-off-by: NDavid Sterba <dsterba@suse.com>
上级 6df8cdf5
...@@ -52,7 +52,7 @@ static inline void btrfs_init_delayed_node( ...@@ -52,7 +52,7 @@ static inline void btrfs_init_delayed_node(
{ {
delayed_node->root = root; delayed_node->root = root;
delayed_node->inode_id = inode_id; delayed_node->inode_id = inode_id;
atomic_set(&delayed_node->refs, 0); refcount_set(&delayed_node->refs, 0);
delayed_node->ins_root = RB_ROOT; delayed_node->ins_root = RB_ROOT;
delayed_node->del_root = RB_ROOT; delayed_node->del_root = RB_ROOT;
mutex_init(&delayed_node->mutex); mutex_init(&delayed_node->mutex);
...@@ -81,7 +81,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( ...@@ -81,7 +81,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
node = READ_ONCE(btrfs_inode->delayed_node); node = READ_ONCE(btrfs_inode->delayed_node);
if (node) { if (node) {
atomic_inc(&node->refs); refcount_inc(&node->refs);
return node; return node;
} }
...@@ -89,14 +89,14 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( ...@@ -89,14 +89,14 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
node = radix_tree_lookup(&root->delayed_nodes_tree, ino); node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
if (node) { if (node) {
if (btrfs_inode->delayed_node) { if (btrfs_inode->delayed_node) {
atomic_inc(&node->refs); /* can be accessed */ refcount_inc(&node->refs); /* can be accessed */
BUG_ON(btrfs_inode->delayed_node != node); BUG_ON(btrfs_inode->delayed_node != node);
spin_unlock(&root->inode_lock); spin_unlock(&root->inode_lock);
return node; return node;
} }
btrfs_inode->delayed_node = node; btrfs_inode->delayed_node = node;
/* can be accessed and cached in the inode */ /* can be accessed and cached in the inode */
atomic_add(2, &node->refs); refcount_add(2, &node->refs);
spin_unlock(&root->inode_lock); spin_unlock(&root->inode_lock);
return node; return node;
} }
...@@ -125,7 +125,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( ...@@ -125,7 +125,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
btrfs_init_delayed_node(node, root, ino); btrfs_init_delayed_node(node, root, ino);
/* cached in the btrfs inode and can be accessed */ /* cached in the btrfs inode and can be accessed */
atomic_add(2, &node->refs); refcount_set(&node->refs, 2);
ret = radix_tree_preload(GFP_NOFS); ret = radix_tree_preload(GFP_NOFS);
if (ret) { if (ret) {
...@@ -166,7 +166,7 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, ...@@ -166,7 +166,7 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
} else { } else {
list_add_tail(&node->n_list, &root->node_list); list_add_tail(&node->n_list, &root->node_list);
list_add_tail(&node->p_list, &root->prepare_list); list_add_tail(&node->p_list, &root->prepare_list);
atomic_inc(&node->refs); /* inserted into list */ refcount_inc(&node->refs); /* inserted into list */
root->nodes++; root->nodes++;
set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags); set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
} }
...@@ -180,7 +180,7 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, ...@@ -180,7 +180,7 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
spin_lock(&root->lock); spin_lock(&root->lock);
if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) { if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
root->nodes--; root->nodes--;
atomic_dec(&node->refs); /* not in the list */ refcount_dec(&node->refs); /* not in the list */
list_del_init(&node->n_list); list_del_init(&node->n_list);
if (!list_empty(&node->p_list)) if (!list_empty(&node->p_list))
list_del_init(&node->p_list); list_del_init(&node->p_list);
...@@ -201,7 +201,7 @@ static struct btrfs_delayed_node *btrfs_first_delayed_node( ...@@ -201,7 +201,7 @@ static struct btrfs_delayed_node *btrfs_first_delayed_node(
p = delayed_root->node_list.next; p = delayed_root->node_list.next;
node = list_entry(p, struct btrfs_delayed_node, n_list); node = list_entry(p, struct btrfs_delayed_node, n_list);
atomic_inc(&node->refs); refcount_inc(&node->refs);
out: out:
spin_unlock(&delayed_root->lock); spin_unlock(&delayed_root->lock);
...@@ -228,7 +228,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node( ...@@ -228,7 +228,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node(
p = node->n_list.next; p = node->n_list.next;
next = list_entry(p, struct btrfs_delayed_node, n_list); next = list_entry(p, struct btrfs_delayed_node, n_list);
atomic_inc(&next->refs); refcount_inc(&next->refs);
out: out:
spin_unlock(&delayed_root->lock); spin_unlock(&delayed_root->lock);
...@@ -253,11 +253,11 @@ static void __btrfs_release_delayed_node( ...@@ -253,11 +253,11 @@ static void __btrfs_release_delayed_node(
btrfs_dequeue_delayed_node(delayed_root, delayed_node); btrfs_dequeue_delayed_node(delayed_root, delayed_node);
mutex_unlock(&delayed_node->mutex); mutex_unlock(&delayed_node->mutex);
if (atomic_dec_and_test(&delayed_node->refs)) { if (refcount_dec_and_test(&delayed_node->refs)) {
bool free = false; bool free = false;
struct btrfs_root *root = delayed_node->root; struct btrfs_root *root = delayed_node->root;
spin_lock(&root->inode_lock); spin_lock(&root->inode_lock);
if (atomic_read(&delayed_node->refs) == 0) { if (refcount_read(&delayed_node->refs) == 0) {
radix_tree_delete(&root->delayed_nodes_tree, radix_tree_delete(&root->delayed_nodes_tree,
delayed_node->inode_id); delayed_node->inode_id);
free = true; free = true;
...@@ -286,7 +286,7 @@ static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( ...@@ -286,7 +286,7 @@ static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
p = delayed_root->prepare_list.next; p = delayed_root->prepare_list.next;
list_del_init(p); list_del_init(p);
node = list_entry(p, struct btrfs_delayed_node, p_list); node = list_entry(p, struct btrfs_delayed_node, p_list);
atomic_inc(&node->refs); refcount_inc(&node->refs);
out: out:
spin_unlock(&delayed_root->lock); spin_unlock(&delayed_root->lock);
...@@ -1621,7 +1621,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode, ...@@ -1621,7 +1621,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
* insert/delete delayed items in this period. So we also needn't * insert/delete delayed items in this period. So we also needn't
* requeue or dequeue this delayed node. * requeue or dequeue this delayed node.
*/ */
atomic_dec(&delayed_node->refs); refcount_dec(&delayed_node->refs);
return true; return true;
} }
...@@ -1963,7 +1963,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) ...@@ -1963,7 +1963,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
inode_id = delayed_nodes[n - 1]->inode_id + 1; inode_id = delayed_nodes[n - 1]->inode_id + 1;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
atomic_inc(&delayed_nodes[i]->refs); refcount_inc(&delayed_nodes[i]->refs);
spin_unlock(&root->inode_lock); spin_unlock(&root->inode_lock);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/refcount.h>
#include "ctree.h" #include "ctree.h"
/* types of the delayed item */ /* types of the delayed item */
...@@ -67,7 +67,7 @@ struct btrfs_delayed_node { ...@@ -67,7 +67,7 @@ struct btrfs_delayed_node {
struct rb_root del_root; struct rb_root del_root;
struct mutex mutex; struct mutex mutex;
struct btrfs_inode_item inode_item; struct btrfs_inode_item inode_item;
atomic_t refs; refcount_t refs;
u64 index_cnt; u64 index_cnt;
unsigned long flags; unsigned long flags;
int count; int count;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册