提交 da0fa9e4 编写于 作者: T Todd Kjos 提交者: Greg Kroah-Hartman

binder: protect proc->nodes with inner lock

When locks for binder_ref handling are added, proc->nodes
will need to be modified while holding the outer lock
Signed-off-by: NTodd Kjos <tkjos@google.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 673068ee
...@@ -313,6 +313,7 @@ struct binder_error { ...@@ -313,6 +313,7 @@ struct binder_error {
* @work: worklist element for node work * @work: worklist element for node work
* (protected by @proc->inner_lock) * (protected by @proc->inner_lock)
* @rb_node: element for proc->nodes tree * @rb_node: element for proc->nodes tree
* (protected by @proc->inner_lock)
* @dead_node: element for binder_dead_nodes list * @dead_node: element for binder_dead_nodes list
* (protected by binder_dead_nodes_lock) * (protected by binder_dead_nodes_lock)
* @proc: binder_proc that owns this node * @proc: binder_proc that owns this node
...@@ -470,6 +471,7 @@ enum binder_deferred_state { ...@@ -470,6 +471,7 @@ enum binder_deferred_state {
* @threads: rbtree of binder_threads in this proc * @threads: rbtree of binder_threads in this proc
* @nodes: rbtree of binder nodes associated with * @nodes: rbtree of binder nodes associated with
* this proc ordered by node->ptr * this proc ordered by node->ptr
* (protected by @inner_lock)
* @refs_by_desc: rbtree of refs ordered by ref->desc * @refs_by_desc: rbtree of refs ordered by ref->desc
* @refs_by_node: rbtree of refs ordered by ref->node * @refs_by_node: rbtree of refs ordered by ref->node
* @pid PID of group_leader of process * @pid PID of group_leader of process
...@@ -856,7 +858,7 @@ static void ...@@ -856,7 +858,7 @@ static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static void binder_free_thread(struct binder_thread *thread); static void binder_free_thread(struct binder_thread *thread);
static void binder_free_proc(struct binder_proc *proc); static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref(struct binder_node *node); static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{ {
...@@ -938,12 +940,14 @@ static void binder_set_nice(long nice) ...@@ -938,12 +940,14 @@ static void binder_set_nice(long nice)
binder_user_error("%d RLIMIT_NICE not set\n", current->pid); binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
} }
static struct binder_node *binder_get_node(struct binder_proc *proc, static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
binder_uintptr_t ptr) binder_uintptr_t ptr)
{ {
struct rb_node *n = proc->nodes.rb_node; struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node; struct binder_node *node;
BUG_ON(!spin_is_locked(&proc->inner_lock));
while (n) { while (n) {
node = rb_entry(n, struct binder_node, rb_node); node = rb_entry(n, struct binder_node, rb_node);
...@@ -957,15 +961,28 @@ static struct binder_node *binder_get_node(struct binder_proc *proc, ...@@ -957,15 +961,28 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
* to ensure node stays alive until * to ensure node stays alive until
* call to binder_put_node() * call to binder_put_node()
*/ */
binder_inc_node_tmpref(node); binder_inc_node_tmpref_ilocked(node);
return node; return node;
} }
} }
return NULL; return NULL;
} }
static struct binder_node *binder_new_node(struct binder_proc *proc, static struct binder_node *binder_get_node(struct binder_proc *proc,
struct flat_binder_object *fp) binder_uintptr_t ptr)
{
struct binder_node *node;
binder_inner_proc_lock(proc);
node = binder_get_node_ilocked(proc, ptr);
binder_inner_proc_unlock(proc);
return node;
}
static struct binder_node *binder_init_node_ilocked(
struct binder_proc *proc,
struct binder_node *new_node,
struct flat_binder_object *fp)
{ {
struct rb_node **p = &proc->nodes.rb_node; struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
...@@ -974,7 +991,9 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, ...@@ -974,7 +991,9 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_uintptr_t cookie = fp ? fp->cookie : 0; binder_uintptr_t cookie = fp ? fp->cookie : 0;
__u32 flags = fp ? fp->flags : 0; __u32 flags = fp ? fp->flags : 0;
BUG_ON(!spin_is_locked(&proc->inner_lock));
while (*p) { while (*p) {
parent = *p; parent = *p;
node = rb_entry(parent, struct binder_node, rb_node); node = rb_entry(parent, struct binder_node, rb_node);
...@@ -982,13 +1001,17 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, ...@@ -982,13 +1001,17 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
p = &(*p)->rb_left; p = &(*p)->rb_left;
else if (ptr > node->ptr) else if (ptr > node->ptr)
p = &(*p)->rb_right; p = &(*p)->rb_right;
else else {
return NULL; /*
* A matching node is already in
* the rb tree. Abandon the init
* and return it.
*/
binder_inc_node_tmpref_ilocked(node);
return node;
}
} }
node = new_node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE); binder_stats_created(BINDER_STAT_NODE);
node->tmp_refs++; node->tmp_refs++;
rb_link_node(&node->rb_node, parent, p); rb_link_node(&node->rb_node, parent, p);
...@@ -1007,6 +1030,27 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, ...@@ -1007,6 +1030,27 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
"%d:%d node %d u%016llx c%016llx created\n", "%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id, proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie); (u64)node->ptr, (u64)node->cookie);
return node;
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
struct flat_binder_object *fp)
{
struct binder_node *node;
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!new_node)
return NULL;
binder_inner_proc_lock(proc);
node = binder_init_node_ilocked(proc, new_node, fp);
binder_inner_proc_unlock(proc);
if (node != new_node)
/*
* The node was already added by another thread
*/
kfree(new_node);
return node; return node;
} }
...@@ -4420,6 +4464,7 @@ static void binder_deferred_release(struct binder_proc *proc) ...@@ -4420,6 +4464,7 @@ static void binder_deferred_release(struct binder_proc *proc)
nodes = 0; nodes = 0;
incoming_refs = 0; incoming_refs = 0;
binder_inner_proc_lock(proc);
while ((n = rb_first(&proc->nodes))) { while ((n = rb_first(&proc->nodes))) {
struct binder_node *node; struct binder_node *node;
...@@ -4430,10 +4475,13 @@ static void binder_deferred_release(struct binder_proc *proc) ...@@ -4430,10 +4475,13 @@ static void binder_deferred_release(struct binder_proc *proc)
* calling binder_node_release() which will either * calling binder_node_release() which will either
* kfree() the node or call binder_put_node() * kfree() the node or call binder_put_node()
*/ */
binder_inc_node_tmpref(node); binder_inc_node_tmpref_ilocked(node);
rb_erase(&node->rb_node, &proc->nodes); rb_erase(&node->rb_node, &proc->nodes);
binder_inner_proc_unlock(proc);
incoming_refs = binder_node_release(node, incoming_refs); incoming_refs = binder_node_release(node, incoming_refs);
binder_inner_proc_lock(proc);
} }
binder_inner_proc_unlock(proc);
outgoing_refs = 0; outgoing_refs = 0;
while ((n = rb_first(&proc->refs_by_desc))) { while ((n = rb_first(&proc->refs_by_desc))) {
...@@ -4618,14 +4666,16 @@ static void print_binder_thread_ilocked(struct seq_file *m, ...@@ -4618,14 +4666,16 @@ static void print_binder_thread_ilocked(struct seq_file *m,
m->count = start_pos; m->count = start_pos;
} }
static void print_binder_node_nlocked(struct seq_file *m, static void print_binder_node_nilocked(struct seq_file *m,
struct binder_node *node) struct binder_node *node)
{ {
struct binder_ref *ref; struct binder_ref *ref;
struct binder_work *w; struct binder_work *w;
int count; int count;
WARN_ON(!spin_is_locked(&node->lock)); WARN_ON(!spin_is_locked(&node->lock));
if (node->proc)
WARN_ON(!spin_is_locked(&node->proc->inner_lock));
count = 0; count = 0;
hlist_for_each_entry(ref, &node->refs, node_entry) hlist_for_each_entry(ref, &node->refs, node_entry)
...@@ -4643,11 +4693,9 @@ static void print_binder_node_nlocked(struct seq_file *m, ...@@ -4643,11 +4693,9 @@ static void print_binder_node_nlocked(struct seq_file *m,
} }
seq_puts(m, "\n"); seq_puts(m, "\n");
if (node->proc) { if (node->proc) {
binder_inner_proc_lock(node->proc);
list_for_each_entry(w, &node->async_todo, entry) list_for_each_entry(w, &node->async_todo, entry)
print_binder_work_ilocked(m, " ", print_binder_work_ilocked(m, " ",
" pending async transaction", w); " pending async transaction", w);
binder_inner_proc_unlock(node->proc);
} }
} }
...@@ -4669,6 +4717,7 @@ static void print_binder_proc(struct seq_file *m, ...@@ -4669,6 +4717,7 @@ static void print_binder_proc(struct seq_file *m,
struct rb_node *n; struct rb_node *n;
size_t start_pos = m->count; size_t start_pos = m->count;
size_t header_pos; size_t header_pos;
struct binder_node *last_node = NULL;
seq_printf(m, "proc %d\n", proc->pid); seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name); seq_printf(m, "context %s\n", proc->context->name);
...@@ -4678,15 +4727,30 @@ static void print_binder_proc(struct seq_file *m, ...@@ -4678,15 +4727,30 @@ static void print_binder_proc(struct seq_file *m,
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all); rb_node), print_all);
binder_inner_proc_unlock(proc);
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node, struct binder_node *node = rb_entry(n, struct binder_node,
rb_node); rb_node);
binder_node_lock(node); /*
if (print_all || node->has_async_transaction) * take a temporary reference on the node so it
print_binder_node_nlocked(m, node); * survives and isn't removed from the tree
binder_node_unlock(node); * while we print it.
*/
binder_inc_node_tmpref_ilocked(node);
/* Need to drop inner lock to take node lock */
binder_inner_proc_unlock(proc);
if (last_node)
binder_put_node(last_node);
binder_node_inner_lock(node);
print_binder_node_nilocked(m, node);
binder_node_inner_unlock(node);
last_node = node;
binder_inner_proc_lock(proc);
} }
binder_inner_proc_unlock(proc);
if (last_node)
binder_put_node(last_node);
if (print_all) { if (print_all) {
for (n = rb_first(&proc->refs_by_desc); for (n = rb_first(&proc->refs_by_desc);
n != NULL; n != NULL;
...@@ -4822,8 +4886,10 @@ static void print_binder_proc_stats(struct seq_file *m, ...@@ -4822,8 +4886,10 @@ static void print_binder_proc_stats(struct seq_file *m,
proc->ready_threads, proc->ready_threads,
binder_alloc_get_free_async_space(&proc->alloc)); binder_alloc_get_free_async_space(&proc->alloc));
count = 0; count = 0;
binder_inner_proc_lock(proc);
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++; count++;
binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count); seq_printf(m, " nodes: %d\n", count);
count = 0; count = 0;
strong = 0; strong = 0;
...@@ -4877,7 +4943,7 @@ static int binder_state_show(struct seq_file *m, void *unused) ...@@ -4877,7 +4943,7 @@ static int binder_state_show(struct seq_file *m, void *unused)
if (last_node) if (last_node)
binder_put_node(last_node); binder_put_node(last_node);
binder_node_lock(node); binder_node_lock(node);
print_binder_node_nlocked(m, node); print_binder_node_nilocked(m, node);
binder_node_unlock(node); binder_node_unlock(node);
last_node = node; last_node = node;
spin_lock(&binder_dead_nodes_lock); spin_lock(&binder_dead_nodes_lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册