提交 fd615c39 编写于 作者: S Simon Fels

Add kernel patches to add namespace support to binder

上级 3c8dcca2
From 8f8ac2552c4a411cf1b8c6328409f861248e8d0d Mon Sep 17 00:00:00 2001
From: Oren Laadan <orenl@cellrox.com>
Date: Sun, 22 Dec 2013 10:07:39 +0000
Subject: [PATCH 1/2] ipc namespace: a generic per-ipc pointer and peripc_ops
Add a void * pointer to struct ipc_namespace. The access rules are:
1. (un)register ops with (un)register_peripc_operations()
2. call ipc_assign_generic() to put private data on the ipc_namespace
3. call ipc_access_generic() to access the private data
4. do not change the pointer during the lifetime of ipc_namespace
Modeled after generic net-ns pointers (commit dec827d), but simplified
to accommodate a single user for now (reduce code churn):
5. only one caller can register at a time
6. caller must register at boot time (not to be used by modules)
Signed-off-by: Oren Laadan <orenl@cellrox.com>
Signed-off-by: Amir Goldstein <cardoe@cardoe.com>
---
include/linux/ipc_namespace.h | 29 +++++++++++++++++++++
ipc/namespace.c | 9 +++++++
ipc/util.c | 60 +++++++++++++++++++++++++++++++++++++++++++
ipc/util.h | 3 +++
4 files changed, 101 insertions(+)
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index d6ad91f..535061a 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -70,8 +70,37 @@ struct ipc_namespace {
struct user_namespace *user_ns;
unsigned int proc_inum;
+
+ /* allow others to piggyback on ipc_namesspaces */
+ void *gen; /* for others' private stuff */
};
+/*
+ * To access to the per-ipc generic data:
+ * 1. (un)register ops with (un)register_peripc_operations()
+ * 2. call ipc_assign_generic() to put private data on the ipc_namespace
+ * 3. call ipc_access_generic() to access the private data
+ * 4. do not change the pointer during the lifetime of ipc_namespace
+ *
+ * Modeled after generic net-ns pointers (commit dec827d), simplified for
+ * a single user case for now:
+ * 5. only one caller can register at a time
+ * 6. caller must register at boot time (not to be used by modules)
+ */
+struct peripc_operations {
+ int (*init)(struct ipc_namespace *);
+ void (*exit)(struct ipc_namespace *);
+};
+
+static inline void ipc_assign_generic(struct ipc_namespace *ns, void *data)
+{ ns->gen = data; }
+
+static inline void *ipc_access_generic(struct ipc_namespace *ns)
+{ return ns->gen; }
+
+extern int register_peripc_ops(struct peripc_operations *ops);
+extern void unregister_peripc_ops(struct peripc_operations *ops);
+
extern struct ipc_namespace init_ipc_ns;
extern atomic_t nr_ipc_ns;
diff --git a/ipc/namespace.c b/ipc/namespace.c
index aba9a58..575aeae 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -33,9 +33,17 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
}
atomic_set(&ns->count, 1);
+
+ err = init_peripc_ns(ns);
+ if (err) {
+ kfree(ns);
+ return ERR_PTR(err);
+ }
+
err = mq_init_ns(ns);
if (err) {
proc_free_inum(ns->proc_inum);
+ exit_peripc_ns(ns);
kfree(ns);
return ERR_PTR(err);
}
@@ -111,6 +119,7 @@ static void free_ipc_ns(struct ipc_namespace *ns)
sem_exit_ns(ns);
msg_exit_ns(ns);
shm_exit_ns(ns);
+ exit_peripc_ns(ns);
atomic_dec(&nr_ipc_ns);
/*
diff --git a/ipc/util.c b/ipc/util.c
index 7353425..533f8f9 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -71,6 +71,66 @@ struct ipc_proc_iface {
int (*show)(struct seq_file *, void *);
};
+/* allow others to piggyback on ipc_namespace */
+static DEFINE_MUTEX(peripc_mutex);
+static struct peripc_operations *peripc_ops;
+
+/*
+ * peripc_operations is a simplified pernet_operations:
+ * - allow only one entity to register
+ * - allow to register only at boot time (no modules)
+ * (these assumptions make the code much simpler)
+ */
+
+static int init_peripc_count;
+
+/* caller hold peripc_mutex */
+int init_peripc_ns(struct ipc_namespace *ns)
+{
+ int ret = 0;
+
+ if (peripc_ops && peripc_ops->init)
+ ret = peripc_ops->init(ns);
+ if (ret == 0)
+ init_peripc_count++;
+ return ret;
+}
+
+/* caller hold peripc_mutex */
+void exit_peripc_ns(struct ipc_namespace *ns)
+{
+ if (peripc_ops && peripc_ops->exit)
+ peripc_ops->exit(ns);
+ init_peripc_count--;
+}
+
+int register_peripc_ops(struct peripc_operations *ops)
+{
+ int ret = -EBUSY;
+
+ mutex_lock(&peripc_mutex);
+ /* must be first register, and only init ipc_namespace exists */
+ if (peripc_ops == NULL && init_peripc_count == 0) {
+ peripc_ops = ops;
+ ret = init_peripc_ns(&init_ipc_ns);
+ if (ret < 0)
+ peripc_ops = NULL;
+ }
+ mutex_unlock(&peripc_mutex);
+ return ret;
+}
+
+void unregister_peripc_ops(struct peripc_operations *ops)
+{
+ mutex_lock(&peripc_mutex);
+ /* sanity: be same as registered, and no other ipc ns (beyond init) */
+ BUG_ON(peripc_ops != ops);
+ BUG_ON(init_peripc_count != 1);
+ if (ops->exit)
+ exit_peripc_ns(&init_ipc_ns);
+ peripc_ops = NULL;
+ mutex_unlock(&peripc_mutex);
+}
static void ipc_memory_notifier(struct work_struct *work)
{
ipcns_notify(IPCNS_MEMCHANGED);
diff --git a/ipc/util.h b/ipc/util.h
index 59d78aa..daee0be 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -47,6 +47,9 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
static inline void shm_exit_ns(struct ipc_namespace *ns) { }
#endif
+int init_peripc_ns(struct ipc_namespace *ns);
+void exit_peripc_ns(struct ipc_namespace *ns);
+
struct ipc_rcu {
struct rcu_head rcu;
atomic_t refcount;
--
2.7.4
From fcd9d70190dd7e6536878a6379122f06e3b90919 Mon Sep 17 00:00:00 2001
From: Oren Laadan <orenl@cellrox.com>
Date: Sun, 22 Dec 2013 10:07:40 +0000
Subject: [PATCH 2/2] binder: implement namepsace support for Android binder
driver
Add namespaces support for the Android binder driver.
As binder is an IPC mechanism, tie its namespace to IPC_NS.
In binder, the first process to call BINDER_SET_CONTEXT_MGR ioctl
becomes the manager with context 0, and thereafter IPC is realized
through binder handles obtained from this manager.
For namespaces, associate a separate binder state with each namespace
so each namespace has its own context manager. Binder users within a
namespace interact only with the context manager there. This suffices
because binder does not allow IPC not via the context manager.
Currently, statistics remain global, except for the list of processes
that hold an open binder device (reported per namespace).
Signed-off-by: Oren Laadan <orenl@cellrox.com>
Acked-by: Amir Goldstein <cardoe@cardoe.com>
---
drivers/android/Kconfig | 1 +
drivers/android/binder.c | 172 ++++++++++++++++++++++++++++++++++++++---------
2 files changed, 143 insertions(+), 30 deletions(-)
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index bdfc6c6..739063d 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,6 +10,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
depends on MMU
+ select SYSVIPC
default n
---help---
Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index fee479d..2a63e9b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/ipc_namespace.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
@@ -47,19 +48,98 @@
#include <uapi/linux/android/binder.h>
#include "binder_trace.h"
+/*
+ * Using a private context manager for each binder namespace is sufficient
+ * to isolate between namespaces, because in binder all IPC must be realized
+ * via hanldes obtained from the context manager.
+ *
+ * TODO: currently, most debugfs data is not tracked per binder namespaces.
+ * Except for "procs" which are properly virtualized, everything else is
+ * global, including stats, logs, and dead nodes.
+ */
+struct binder_namespace {
+ struct kref kref;
+
+ struct binder_node *context_mgr_node;
+ kuid_t context_mgr_uid;
+ int last_id;
+
+ struct hlist_head procs;
+};
+
+static struct binder_namespace *create_binder_ns(void)
+{
+ struct binder_namespace *binder_ns;
+
+ binder_ns = kzalloc(sizeof(struct binder_namespace), GFP_KERNEL);
+ if (binder_ns) {
+ kref_init(&binder_ns->kref);
+ binder_ns->context_mgr_uid = INVALID_UID;
+ INIT_HLIST_HEAD(&binder_ns->procs);
+ }
+ return binder_ns;
+}
+
+static void free_binder_ns(struct kref *kref)
+{
+ kfree(container_of(kref, struct binder_namespace, kref));
+}
+
+static void get_binder_ns(struct binder_namespace *binder_ns)
+{
+ kref_get(&binder_ns->kref);
+}
+
+static void put_binder_ns(struct binder_namespace *binder_ns)
+{
+ kref_put(&binder_ns->kref, free_binder_ns);
+}
+
+/*
+ * Binder is an IPC mechanism, so tie its namespace to IPC_NS
+ * using the generic data pointer and per-ipc operations.
+ */
+static struct binder_namespace *current_binder_ns(void)
+{
+ return ipc_access_generic(current->nsproxy->ipc_ns);
+}
+
+int binder_init_ns(struct ipc_namespace *ipcns)
+{
+ struct binder_namespace *binder_ns;
+ int ret = -ENOMEM;
+
+ binder_ns = create_binder_ns();
+ if (binder_ns) {
+ ipc_assign_generic(ipcns, binder_ns);
+ ret = 0;
+ }
+ return ret;
+}
+
+void binder_exit_ns(struct ipc_namespace *ipcns)
+{
+ struct binder_namespace *binder_ns;
+
+ binder_ns = ipc_access_generic(ipcns);
+ if (binder_ns)
+ put_binder_ns(binder_ns);
+}
+
+struct peripc_operations binder_peripc_ops = {
+ .init = binder_init_ns,
+ .exit = binder_exit_ns,
+};
+
static DEFINE_RT_MUTEX(binder_main_lock);
static DEFINE_MUTEX(binder_deferred_lock);
static DEFINE_MUTEX(binder_mmap_lock);
-static HLIST_HEAD(binder_procs);
static HLIST_HEAD(binder_deferred_list);
static HLIST_HEAD(binder_dead_nodes);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static struct binder_node *binder_context_mgr_node;
-static kuid_t binder_context_mgr_uid = INVALID_UID;
-static int binder_last_id;
static struct workqueue_struct *binder_deferred_workqueue;
#define BINDER_DEBUG_ENTRY(name) \
@@ -327,6 +407,8 @@ struct binder_proc {
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
+
+ struct binder_namespace *binder_ns;
};
enum {
@@ -910,7 +992,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_stats_created(BINDER_STAT_NODE);
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
- node->debug_id = ++binder_last_id;
+ node->debug_id = ++proc->binder_ns->last_id;
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
@@ -931,7 +1013,7 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
if (internal) {
if (target_list == NULL &&
node->internal_strong_refs == 0 &&
- !(node == binder_context_mgr_node &&
+ !(node == node->proc->binder_ns->context_mgr_node &&
node->has_strong_ref)) {
pr_err("invalid inc strong node for %d\n",
node->debug_id);
@@ -1045,13 +1127,13 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
if (new_ref == NULL)
return NULL;
binder_stats_created(BINDER_STAT_REF);
- new_ref->debug_id = ++binder_last_id;
+ new_ref->debug_id = ++proc->binder_ns->last_id;
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ new_ref->desc = (node == proc->binder_ns->context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (ref->desc > new_ref->desc)
@@ -1391,7 +1473,7 @@ static void binder_transaction(struct binder_proc *proc,
}
target_node = ref->node;
} else {
- target_node = binder_context_mgr_node;
+ target_node = proc->binder_ns->context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
@@ -1452,7 +1534,7 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = ++binder_last_id;
+ t->debug_id = ++proc->binder_ns->last_id;
e->debug_id = t->debug_id;
if (reply)
@@ -1787,10 +1869,10 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (target == 0 && binder_context_mgr_node &&
+ if (target == 0 && proc->binder_ns->context_mgr_node &&
(cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
ref = binder_get_ref_for_node(proc,
- binder_context_mgr_node);
+ proc->binder_ns->context_mgr_node);
if (ref->desc != target) {
binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
proc->pid, thread->pid,
@@ -2696,9 +2778,10 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
+ struct binder_namespace *binder_ns = proc->binder_ns;
kuid_t curr_euid = current_euid();
- if (binder_context_mgr_node != NULL) {
+ if (binder_ns->context_mgr_node != NULL) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto out;
@@ -2706,27 +2789,27 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto out;
- if (uid_valid(binder_context_mgr_uid)) {
- if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
+ if (uid_valid(binder_ns->context_mgr_uid)) {
+ if (!uid_eq(binder_ns->context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
- binder_context_mgr_uid));
+ binder_ns->context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
- binder_context_mgr_uid = curr_euid;
+ binder_ns->context_mgr_uid = curr_euid;
}
- binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (binder_context_mgr_node == NULL) {
+ binder_ns->context_mgr_node = binder_new_node(proc, 0, 0);
+ if (binder_ns->context_mgr_node == NULL) {
ret = -ENOMEM;
goto out;
}
- binder_context_mgr_node->local_weak_refs++;
- binder_context_mgr_node->local_strong_refs++;
- binder_context_mgr_node->has_strong_ref = 1;
- binder_context_mgr_node->has_weak_ref = 1;
+ binder_ns->context_mgr_node->local_weak_refs++;
+ binder_ns->context_mgr_node->local_strong_refs++;
+ binder_ns->context_mgr_node->has_strong_ref = 1;
+ binder_ns->context_mgr_node->has_weak_ref = 1;
out:
return ret;
}
@@ -2947,10 +3030,15 @@ err_bad_arg:
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
+ struct binder_namespace *binder_ns;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
current->group_leader->pid, current->pid);
+ binder_ns = current_binder_ns();
+ if (binder_ns == NULL)
+ return -ENOMEM;
+
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
@@ -2960,10 +3048,13 @@ static int binder_open(struct inode *nodp, struct file *filp)
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
+ proc->binder_ns = binder_ns;
+ get_binder_ns(binder_ns);
+
binder_lock(__func__);
binder_stats_created(BINDER_STAT_PROC);
- hlist_add_head(&proc->proc_node, &binder_procs);
+ hlist_add_head(&proc->proc_node, &binder_ns->procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
@@ -3067,6 +3158,7 @@ static int binder_node_release(struct binder_node *node, int refs)
static void binder_deferred_release(struct binder_proc *proc)
{
+ struct binder_namespace *binder_ns = proc->binder_ns;
struct binder_transaction *t;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, buffers,
@@ -3077,11 +3169,12 @@ static void binder_deferred_release(struct binder_proc *proc)
hlist_del(&proc->proc_node);
- if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+ if (binder_ns->context_mgr_node &&
+ binder_ns->context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%s: %d context_mgr_node gone\n",
__func__, proc->pid);
- binder_context_mgr_node = NULL;
+ binder_ns->context_mgr_node = NULL;
}
threads = 0;
@@ -3160,6 +3253,7 @@ static void binder_deferred_release(struct binder_proc *proc)
vfree(proc->buffer);
}
+ put_binder_ns(proc->binder_ns);
put_task_struct(proc->tsk);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
@@ -3540,10 +3634,14 @@ static void print_binder_proc_stats(struct seq_file *m,
static int binder_state_show(struct seq_file *m, void *unused)
{
+ struct binder_namespace *binder_ns = current_binder_ns();
struct binder_proc *proc;
struct binder_node *node;
int do_lock = !binder_debug_no_lock;
+ if (binder_ns == NULL)
+ return 0;
+
if (do_lock)
binder_lock(__func__);
@@ -3554,7 +3652,7 @@ static int binder_state_show(struct seq_file *m, void *unused)
hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
print_binder_node(m, node);
- hlist_for_each_entry(proc, &binder_procs, proc_node)
+ hlist_for_each_entry(proc, &binder_ns->procs, proc_node)
print_binder_proc(m, proc, 1);
if (do_lock)
binder_unlock(__func__);
@@ -3563,9 +3661,13 @@ static int binder_state_show(struct seq_file *m, void *unused)
static int binder_stats_show(struct seq_file *m, void *unused)
{
+ struct binder_namespace *binder_ns = current_binder_ns();
struct binder_proc *proc;
int do_lock = !binder_debug_no_lock;
+ if (binder_ns == NULL)
+ return 0;
+
if (do_lock)
binder_lock(__func__);
@@ -3573,7 +3675,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
print_binder_stats(m, "", &binder_stats);
- hlist_for_each_entry(proc, &binder_procs, proc_node)
+ hlist_for_each_entry(proc, &binder_ns->procs, proc_node)
print_binder_proc_stats(m, proc);
if (do_lock)
binder_unlock(__func__);
@@ -3582,14 +3684,18 @@ static int binder_stats_show(struct seq_file *m, void *unused)
static int binder_transactions_show(struct seq_file *m, void *unused)
{
+ struct binder_namespace *binder_ns = current_binder_ns();
struct binder_proc *proc;
int do_lock = !binder_debug_no_lock;
+ if (binder_ns == NULL)
+ return 0;
+
if (do_lock)
binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
- hlist_for_each_entry(proc, &binder_procs, proc_node)
+ hlist_for_each_entry(proc, &binder_ns->procs, proc_node)
print_binder_proc(m, proc, 0);
if (do_lock)
binder_unlock(__func__);
@@ -3661,9 +3767,15 @@ static int __init binder_init(void)
{
int ret;
+ ret = register_peripc_ops(&binder_peripc_ops);
+ if (ret < 0)
+ return ret;
+
binder_deferred_workqueue = create_singlethread_workqueue("binder");
- if (!binder_deferred_workqueue)
+ if (!binder_deferred_workqueue) {
+ unregister_peripc_ops(&binder_peripc_ops);
return -ENOMEM;
+ }
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
--
2.7.4
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册