提交 a60b890f 编写于 作者: T Todd Kjos 提交者: Greg Kroah-Hartman

binder: remove global binder lock

Remove global mutex and rely on fine-grained locking
Signed-off-by: NTodd Kjos <tkjos@google.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 ab51ec6b
......@@ -80,8 +80,6 @@
#include "binder_alloc.h"
#include "binder_trace.h"
static DEFINE_MUTEX(binder_main_lock);
static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
......@@ -924,19 +922,6 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
return retval;
}
static inline void binder_lock(const char *tag)
{
trace_binder_lock(tag);
mutex_lock(&binder_main_lock);
trace_binder_locked(tag);
}
static inline void binder_unlock(const char *tag)
{
trace_binder_unlock(tag);
mutex_unlock(&binder_main_lock);
}
static void binder_set_nice(long nice)
{
long min_nice;
......@@ -3557,8 +3542,6 @@ static int binder_thread_read(struct binder_proc *proc,
thread->looper |= BINDER_LOOPER_STATE_WAITING;
binder_unlock(__func__);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
!binder_worklist_empty(proc, &thread->todo));
......@@ -3584,8 +3567,6 @@ static int binder_thread_read(struct binder_proc *proc,
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
binder_lock(__func__);
binder_inner_proc_lock(proc);
if (wait_for_proc_work)
proc->ready_threads--;
......@@ -4101,8 +4082,6 @@ static unsigned int binder_poll(struct file *filp,
struct binder_thread *thread = NULL;
int wait_for_proc_work;
binder_lock(__func__);
thread = binder_get_thread(proc);
binder_inner_proc_lock(thread->proc);
......@@ -4110,8 +4089,6 @@ static unsigned int binder_poll(struct file *filp,
binder_worklist_empty_ilocked(&thread->todo);
binder_inner_proc_unlock(thread->proc);
binder_unlock(__func__);
if (wait_for_proc_work) {
if (binder_has_proc_work(proc, thread))
return POLLIN;
......@@ -4256,7 +4233,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
goto err_unlocked;
binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
......@@ -4315,7 +4291,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
err:
if (thread)
thread->looper_need_return = false;
binder_unlock(__func__);
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
......@@ -4421,15 +4396,11 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc);
binder_lock(__func__);
binder_stats_created(BINDER_STAT_PROC);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
binder_unlock(__func__);
mutex_lock(&binder_procs_lock);
hlist_add_head(&proc->proc_node, &binder_procs);
mutex_unlock(&binder_procs_lock);
......@@ -4655,7 +4626,6 @@ static void binder_deferred_func(struct work_struct *work)
int defer;
do {
binder_lock(__func__);
mutex_lock(&binder_deferred_lock);
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
......@@ -4682,7 +4652,6 @@ static void binder_deferred_func(struct work_struct *work)
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
binder_unlock(__func__);
if (files)
put_files_struct(files);
} while (proc);
......@@ -5097,8 +5066,6 @@ static int binder_state_show(struct seq_file *m, void *unused)
struct binder_node *node;
struct binder_node *last_node = NULL;
binder_lock(__func__);
seq_puts(m, "binder state:\n");
spin_lock(&binder_dead_nodes_lock);
......@@ -5128,7 +5095,7 @@ static int binder_state_show(struct seq_file *m, void *unused)
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
mutex_unlock(&binder_procs_lock);
binder_unlock(__func__);
return 0;
}
......@@ -5136,8 +5103,6 @@ static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
binder_lock(__func__);
seq_puts(m, "binder stats:\n");
print_binder_stats(m, "", &binder_stats);
......@@ -5146,7 +5111,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
mutex_unlock(&binder_procs_lock);
binder_unlock(__func__);
return 0;
}
......@@ -5154,14 +5119,12 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
mutex_unlock(&binder_procs_lock);
binder_unlock(__func__);
return 0;
}
......@@ -5170,8 +5133,6 @@ static int binder_proc_show(struct seq_file *m, void *unused)
struct binder_proc *itr;
int pid = (unsigned long)m->private;
binder_lock(__func__);
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
......@@ -5181,7 +5142,6 @@ static int binder_proc_show(struct seq_file *m, void *unused)
}
mutex_unlock(&binder_procs_lock);
binder_unlock(__func__);
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册