提交 5faaedb6 编写于 作者: C Cai Xinchen 提交者: Yongqiang Liu

Revert "cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock"

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I6TI3Y
CVE: NA

--------------------------------

This reverts commit 4924308a.
Signed-off-by: NCai Xinchen <caixinchen1@huawei.com>
Reviewed-by: NWang Weiyang <wangweiyang2@huawei.com>
Signed-off-by: NYongqiang Liu <liuyongqiang13@huawei.com>
上级 7d8391de
...@@ -56,7 +56,6 @@ ...@@ -56,7 +56,6 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/sched/cputime.h> #include <linux/sched/cputime.h>
#include <net/sock.h> #include <net/sock.h>
#include <linux/cpu.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/cgroup.h> #include <trace/events/cgroup.h>
...@@ -2213,45 +2212,6 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) ...@@ -2213,45 +2212,6 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
} }
EXPORT_SYMBOL_GPL(task_cgroup_path); EXPORT_SYMBOL_GPL(task_cgroup_path);
/**
* cgroup_attach_lock - Lock for ->attach()
* @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
*
* cgroup migration sometimes needs to stabilize threadgroups against forks and
* exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
* implementations (e.g. cpuset), also need to disable CPU hotplug.
* Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
* lead to deadlocks.
*
* Bringing up a CPU may involve creating and destroying tasks which requires
* read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
* cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
* write-locking threadgroup_rwsem, the locking order is reversed and we end up
* waiting for an on-going CPU hotplug operation which in turn is waiting for
* the threadgroup_rwsem to be released to create new tasks. For more details:
*
* http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
*
* Resolve the situation by always acquiring cpus_read_lock() before optionally
* write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
* CPU hotplug is disabled on entry.
*/
static void cgroup_attach_lock(void)
{
cpus_read_lock();
percpu_down_write(&cgroup_threadgroup_rwsem);
}
/**
* cgroup_attach_unlock - Undo cgroup_attach_lock()
* @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
*/
static void cgroup_attach_unlock(void)
{
percpu_up_write(&cgroup_threadgroup_rwsem);
cpus_read_unlock();
}
/** /**
* cgroup_migrate_add_task - add a migration target task to a migration context * cgroup_migrate_add_task - add a migration target task to a migration context
* @task: target task * @task: target task
...@@ -2731,7 +2691,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) ...@@ -2731,7 +2691,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
cgroup_attach_lock(); percpu_down_write(&cgroup_threadgroup_rwsem);
rcu_read_lock(); rcu_read_lock();
if (pid) { if (pid) {
...@@ -2762,7 +2722,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) ...@@ -2762,7 +2722,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
goto out_unlock_rcu; goto out_unlock_rcu;
out_unlock_threadgroup: out_unlock_threadgroup:
cgroup_attach_unlock(); percpu_up_write(&cgroup_threadgroup_rwsem);
out_unlock_rcu: out_unlock_rcu:
rcu_read_unlock(); rcu_read_unlock();
return tsk; return tsk;
...@@ -2777,7 +2737,7 @@ void cgroup_procs_write_finish(struct task_struct *task) ...@@ -2777,7 +2737,7 @@ void cgroup_procs_write_finish(struct task_struct *task)
/* release reference from cgroup_procs_write_start() */ /* release reference from cgroup_procs_write_start() */
put_task_struct(task); put_task_struct(task);
cgroup_attach_unlock(); percpu_up_write(&cgroup_threadgroup_rwsem);
for_each_subsys(ss, ssid) for_each_subsys(ss, ssid)
if (ss->post_attach) if (ss->post_attach)
ss->post_attach(); ss->post_attach();
...@@ -2858,7 +2818,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2858,7 +2818,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&cgroup_mutex);
cgroup_attach_lock(); percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */ /* look up all csses currently attached to @cgrp's subtree */
spin_lock_irq(&css_set_lock); spin_lock_irq(&css_set_lock);
...@@ -2888,7 +2848,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2888,7 +2848,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
ret = cgroup_migrate_execute(&mgctx); ret = cgroup_migrate_execute(&mgctx);
out_finish: out_finish:
cgroup_migrate_finish(&mgctx); cgroup_migrate_finish(&mgctx);
cgroup_attach_unlock(); percpu_up_write(&cgroup_threadgroup_rwsem);
return ret; return ret;
} }
......
...@@ -1612,7 +1612,11 @@ static void cpuset_attach(struct cgroup_taskset *tset) ...@@ -1612,7 +1612,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css); cgroup_taskset_first(tset, &css);
cs = css_cs(css); cs = css_cs(css);
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ /*
* It should hold cpus lock because a cpu offline event can
* cause set_cpus_allowed_ptr() failed.
*/
get_online_cpus();
mutex_lock(&cpuset_mutex); mutex_lock(&cpuset_mutex);
/* prepare for attach */ /* prepare for attach */
...@@ -1675,6 +1679,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) ...@@ -1675,6 +1679,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq); wake_up(&cpuset_attach_wq);
mutex_unlock(&cpuset_mutex); mutex_unlock(&cpuset_mutex);
put_online_cpus();
} }
/* The various types of files and directories in a cpuset file system */ /* The various types of files and directories in a cpuset file system */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册