提交 0c986253 编写于 作者: T Tejun Heo

Revert "sched, cgroup: replace signal_struct->group_rwsem with a global percpu_rwsem"

This reverts commit d59cfc09.

d59cfc09 ("sched, cgroup: replace signal_struct->group_rwsem with
a global percpu_rwsem") and b5ba75b5 ("cgroup: simplify
threadgroup locking") changed how cgroup synchronizes against task
fork and exits so that it uses global percpu_rwsem instead of
per-process rwsem; unfortunately, the write [un]lock paths of
percpu_rwsem always involve synchronize_rcu_expedited() which turned
out to be too expensive.

Improvements for percpu_rwsem are scheduled to be merged in the coming
v4.4-rc1 merge window which alleviates this issue.  For now, revert
the two commits to restore per-process rwsem.  They will be re-applied
for the v4.4-rc1 merge window.
Signed-off-by: NTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/g/55F8097A.7000206@de.ibm.comReported-by: NChristian Borntraeger <borntraeger@de.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: stable@vger.kernel.org # v4.2+
上级 f9f9e7b7
...@@ -473,31 +473,8 @@ struct cgroup_subsys { ...@@ -473,31 +473,8 @@ struct cgroup_subsys {
unsigned int depends_on; unsigned int depends_on;
}; };
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; void cgroup_threadgroup_change_begin(struct task_struct *tsk);
void cgroup_threadgroup_change_end(struct task_struct *tsk);
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
*
* Called from threadgroup_change_begin() and allows cgroup operations to
* synchronize against threadgroup changes using a percpu_rw_semaphore.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
}
/**
* cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
* @tsk: target task
*
* Called from threadgroup_change_end(). Counterpart of
* cgroup_threadcgroup_change_begin().
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
percpu_up_read(&cgroup_threadgroup_rwsem);
}
#else /* CONFIG_CGROUPS */ #else /* CONFIG_CGROUPS */
......
...@@ -25,6 +25,13 @@ ...@@ -25,6 +25,13 @@
extern struct files_struct init_files; extern struct files_struct init_files;
extern struct fs_struct init_fs; extern struct fs_struct init_fs;
#ifdef CONFIG_CGROUPS
#define INIT_GROUP_RWSEM(sig) \
.group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
#else
#define INIT_GROUP_RWSEM(sig)
#endif
#ifdef CONFIG_CPUSETS #ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk) \ #define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
...@@ -57,6 +64,7 @@ extern struct fs_struct init_fs; ...@@ -57,6 +64,7 @@ extern struct fs_struct init_fs;
INIT_PREV_CPUTIME(sig) \ INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \ .cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
INIT_GROUP_RWSEM(sig) \
} }
extern struct nsproxy init_nsproxy; extern struct nsproxy init_nsproxy;
......
...@@ -762,6 +762,18 @@ struct signal_struct { ...@@ -762,6 +762,18 @@ struct signal_struct {
unsigned audit_tty_log_passwd; unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf; struct tty_audit_buf *tty_audit_buf;
#endif #endif
#ifdef CONFIG_CGROUPS
/*
* group_rwsem prevents new tasks from entering the threadgroup and
* member tasks from exiting,a more specifically, setting of
* PF_EXITING. fork and exit paths are protected with this rwsem
* using threadgroup_change_begin/end(). Users which require
* threadgroup to remain stable should use threadgroup_[un]lock()
* which also takes care of exec path. Currently, cgroup is the
* only user.
*/
struct rw_semaphore group_rwsem;
#endif
oom_flags_t oom_flags; oom_flags_t oom_flags;
short oom_score_adj; /* OOM kill score adjustment */ short oom_score_adj; /* OOM kill score adjustment */
......
...@@ -46,7 +46,6 @@ ...@@ -46,7 +46,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/percpu-rwsem.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/kmod.h> #include <linux/kmod.h>
...@@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock); ...@@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
*/ */
static DEFINE_SPINLOCK(release_agent_path_lock); static DEFINE_SPINLOCK(release_agent_path_lock);
struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
#define cgroup_assert_mutex_or_rcu_locked() \ #define cgroup_assert_mutex_or_rcu_locked() \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&cgroup_mutex), \ !lockdep_is_held(&cgroup_mutex), \
...@@ -874,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset, ...@@ -874,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
return cset; return cset;
} }
void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
down_read(&tsk->signal->group_rwsem);
}
void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
up_read(&tsk->signal->group_rwsem);
}
/**
* threadgroup_lock - lock threadgroup
* @tsk: member task of the threadgroup to lock
*
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
* change ->group_leader/pid. This is useful for cases where the threadgroup
* needs to stay stable across blockable operations.
*
* fork and exit explicitly call threadgroup_change_{begin|end}() for
* synchronization. While held, no new task will be added to threadgroup
* and no existing live task will have its PF_EXITING set.
*
* de_thread() does threadgroup_change_{begin|end}() when a non-leader
* sub-thread becomes a new leader.
*/
static void threadgroup_lock(struct task_struct *tsk)
{
down_write(&tsk->signal->group_rwsem);
}
/**
* threadgroup_unlock - unlock threadgroup
* @tsk: member task of the threadgroup to unlock
*
* Reverse threadgroup_lock().
*/
static inline void threadgroup_unlock(struct task_struct *tsk)
{
up_write(&tsk->signal->group_rwsem);
}
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
{ {
struct cgroup *root_cgrp = kf_root->kn->priv; struct cgroup *root_cgrp = kf_root->kn->priv;
...@@ -2074,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, ...@@ -2074,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
lockdep_assert_held(&css_set_rwsem); lockdep_assert_held(&css_set_rwsem);
/* /*
* We are synchronized through cgroup_threadgroup_rwsem against * We are synchronized through threadgroup_lock() against PF_EXITING
* PF_EXITING setting such that we can't race against cgroup_exit() * setting such that we can't race against cgroup_exit() changing the
* changing the css_set to init_css_set and dropping the old one. * css_set to init_css_set and dropping the old one.
*/ */
WARN_ON_ONCE(tsk->flags & PF_EXITING); WARN_ON_ONCE(tsk->flags & PF_EXITING);
old_cset = task_css_set(tsk); old_cset = task_css_set(tsk);
...@@ -2133,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets) ...@@ -2133,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
* @src_cset and add it to @preloaded_csets, which should later be cleaned * @src_cset and add it to @preloaded_csets, which should later be cleaned
* up by cgroup_migrate_finish(). * up by cgroup_migrate_finish().
* *
* This function may be called without holding cgroup_threadgroup_rwsem * This function may be called without holding threadgroup_lock even if the
* even if the target is a process. Threads may be created and destroyed * target is a process. Threads may be created and destroyed but as long
* but as long as cgroup_mutex is not dropped, no new css_set can be put * as cgroup_mutex is not dropped, no new css_set can be put into play and
* into play and the preloaded css_sets are guaranteed to cover all * the preloaded css_sets are guaranteed to cover all migrations.
* migrations.
*/ */
static void cgroup_migrate_add_src(struct css_set *src_cset, static void cgroup_migrate_add_src(struct css_set *src_cset,
struct cgroup *dst_cgrp, struct cgroup *dst_cgrp,
...@@ -2240,7 +2278,7 @@ static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp, ...@@ -2240,7 +2278,7 @@ static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
* @threadgroup: whether @leader points to the whole process or a single task * @threadgroup: whether @leader points to the whole process or a single task
* *
* Migrate a process or task denoted by @leader to @cgrp. If migrating a * Migrate a process or task denoted by @leader to @cgrp. If migrating a
* process, the caller must be holding cgroup_threadgroup_rwsem. The * process, the caller must be holding threadgroup_lock of @leader. The
* caller is also responsible for invoking cgroup_migrate_add_src() and * caller is also responsible for invoking cgroup_migrate_add_src() and
* cgroup_migrate_prepare_dst() on the targets before invoking this * cgroup_migrate_prepare_dst() on the targets before invoking this
* function and following up with cgroup_migrate_finish(). * function and following up with cgroup_migrate_finish().
...@@ -2368,7 +2406,7 @@ static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader, ...@@ -2368,7 +2406,7 @@ static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
* @leader: the task or the leader of the threadgroup to be attached * @leader: the task or the leader of the threadgroup to be attached
* @threadgroup: attach the whole threadgroup? * @threadgroup: attach the whole threadgroup?
* *
* Call holding cgroup_mutex and cgroup_threadgroup_rwsem. * Call holding cgroup_mutex and threadgroup_lock of @leader.
*/ */
static int cgroup_attach_task(struct cgroup *dst_cgrp, static int cgroup_attach_task(struct cgroup *dst_cgrp,
struct task_struct *leader, bool threadgroup) struct task_struct *leader, bool threadgroup)
...@@ -2490,7 +2528,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, ...@@ -2490,7 +2528,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
get_task_struct(tsk); get_task_struct(tsk);
rcu_read_unlock(); rcu_read_unlock();
percpu_down_write(&cgroup_threadgroup_rwsem); threadgroup_lock(tsk);
if (threadgroup) { if (threadgroup) {
if (!thread_group_leader(tsk)) { if (!thread_group_leader(tsk)) {
/* /*
...@@ -2500,7 +2538,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, ...@@ -2500,7 +2538,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
* try again; this is * try again; this is
* "double-double-toil-and-trouble-check locking". * "double-double-toil-and-trouble-check locking".
*/ */
percpu_up_write(&cgroup_threadgroup_rwsem); threadgroup_unlock(tsk);
put_task_struct(tsk); put_task_struct(tsk);
goto retry_find_task; goto retry_find_task;
} }
...@@ -2510,7 +2548,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, ...@@ -2510,7 +2548,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
if (!ret) if (!ret)
ret = cgroup_attach_task(cgrp, tsk, threadgroup); ret = cgroup_attach_task(cgrp, tsk, threadgroup);
percpu_up_write(&cgroup_threadgroup_rwsem); threadgroup_unlock(tsk);
put_task_struct(tsk); put_task_struct(tsk);
out_unlock_cgroup: out_unlock_cgroup:
...@@ -2713,17 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2713,17 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
goto out_finish; goto out_finish;
last_task = task; last_task = task;
percpu_down_write(&cgroup_threadgroup_rwsem); threadgroup_lock(task);
/* raced against de_thread() from another thread? */ /* raced against de_thread() from another thread? */
if (!thread_group_leader(task)) { if (!thread_group_leader(task)) {
percpu_up_write(&cgroup_threadgroup_rwsem); threadgroup_unlock(task);
put_task_struct(task); put_task_struct(task);
continue; continue;
} }
ret = cgroup_migrate(src_cset->dfl_cgrp, task, true); ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
percpu_up_write(&cgroup_threadgroup_rwsem); threadgroup_unlock(task);
put_task_struct(task); put_task_struct(task);
if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret)) if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
...@@ -5045,7 +5083,6 @@ int __init cgroup_init(void) ...@@ -5045,7 +5083,6 @@ int __init cgroup_init(void)
unsigned long key; unsigned long key;
int ssid, err; int ssid, err;
BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files)); BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files)); BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
......
...@@ -1149,6 +1149,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) ...@@ -1149,6 +1149,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
tty_audit_fork(sig); tty_audit_fork(sig);
sched_autogroup_fork(sig); sched_autogroup_fork(sig);
#ifdef CONFIG_CGROUPS
init_rwsem(&sig->group_rwsem);
#endif
sig->oom_score_adj = current->signal->oom_score_adj; sig->oom_score_adj = current->signal->oom_score_adj;
sig->oom_score_adj_min = current->signal->oom_score_adj_min; sig->oom_score_adj_min = current->signal->oom_score_adj_min;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册