提交 41db4d6d 编写于 作者: D Ding Tianhong 提交者: Yang Yingliang

ascend/share pool: bugfix, sp exit is not atomic

ascend inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI
CVE: NA

---------------------------------------------------

When mmput is called concurrently, the judgment of "mm_users == 2"
in sp_group_exit is not atomic with atomic_dec_and_test in mmput.
The judgment of "mm_users == 2" may never be valid. As a result,
mm leakage occurs.

For example, in a typical scenario, a process has two threads, with
the mmget is performed in sp_group_add_task. In this case, mm_users
is 3. When two threads exit at the same time, the judgment of
"mm_users == 2" fail.

Therefore, the judgment and atomic_dec_and_test are put in the spg
rw_lock to ensure the serialization of the whole process.
Signed-off-by: NDing Tianhong <dingtianhong@huawei.com>
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 67c0af6a
...@@ -156,7 +156,7 @@ static inline void sp_init_mm(struct mm_struct *mm) ...@@ -156,7 +156,7 @@ static inline void sp_init_mm(struct mm_struct *mm)
} }
extern int sp_group_add_task(int pid, int spg_id); extern int sp_group_add_task(int pid, int spg_id);
extern void sp_group_exit(struct mm_struct *mm); extern int sp_group_exit(struct mm_struct *mm);
extern void sp_group_post_exit(struct mm_struct *mm); extern void sp_group_post_exit(struct mm_struct *mm);
extern int sp_group_id_by_pid(int pid); extern int sp_group_id_by_pid(int pid);
extern int sp_group_walk(int spg_id, void *data, int (*func)(struct mm_struct *mm, void *)); extern int sp_group_walk(int spg_id, void *data, int (*func)(struct mm_struct *mm, void *));
...@@ -299,8 +299,9 @@ static inline int sp_group_add_task(int pid, int spg_id) ...@@ -299,8 +299,9 @@ static inline int sp_group_add_task(int pid, int spg_id)
return -EPERM; return -EPERM;
} }
static inline void sp_group_exit(struct mm_struct *mm) static inline int sp_group_exit(struct mm_struct *mm)
{ {
return 0;
} }
static inline void sp_group_post_exit(struct mm_struct *mm) static inline void sp_group_post_exit(struct mm_struct *mm)
......
...@@ -1086,7 +1086,8 @@ void mmput(struct mm_struct *mm) ...@@ -1086,7 +1086,8 @@ void mmput(struct mm_struct *mm)
{ {
might_sleep(); might_sleep();
sp_group_exit(mm); if (sp_group_exit(mm))
return;
if (atomic_dec_and_test(&mm->mm_users)) if (atomic_dec_and_test(&mm->mm_users))
__mmput(mm); __mmput(mm);
......
...@@ -3110,14 +3110,20 @@ EXPORT_SYMBOL(sharepool_no_page); ...@@ -3110,14 +3110,20 @@ EXPORT_SYMBOL(sharepool_no_page);
#define MM_WOULD_FREE 2 #define MM_WOULD_FREE 2
void sp_group_exit(struct mm_struct *mm) int sp_group_exit(struct mm_struct *mm)
{ {
struct sp_group *spg = mm->sp_group; struct sp_group *spg = mm->sp_group;
bool is_alive = true; bool is_alive = true;
if (!spg || !enable_ascend_share_pool) if (!spg || !enable_ascend_share_pool)
return; return 0;
/*
* The judgment of mm->mm_users == MM_WOULD_FREE and atomic_dec_and_test
* must be atomic. Otherwise, mm->mm_users == MM_WOULD_FREE may never be
* true due to the gap in the middle.
*/
down_write(&spg->rw_lock);
/* /*
* Recall we add mm->users by 1 deliberately in sp_group_add_task(). * Recall we add mm->users by 1 deliberately in sp_group_add_task().
* If the mm_users is 2, it means that the mm is ready to be freed * If the mm_users is 2, it means that the mm is ready to be freed
...@@ -3125,21 +3131,30 @@ void sp_group_exit(struct mm_struct *mm) ...@@ -3125,21 +3131,30 @@ void sp_group_exit(struct mm_struct *mm)
* do_exit() -> exit_mm() -> mmput() -> THIS function. * do_exit() -> exit_mm() -> mmput() -> THIS function.
*/ */
if (atomic_read(&mm->mm_users) == MM_WOULD_FREE) { if (atomic_read(&mm->mm_users) == MM_WOULD_FREE) {
down_write(&spg->rw_lock);
/* a dead group should NOT be reactive again */ /* a dead group should NOT be reactive again */
if (spg_valid(spg) && list_is_singular(&spg->procs)) if (spg_valid(spg) && list_is_singular(&spg->procs))
is_alive = spg->is_alive = false; is_alive = spg->is_alive = false;
if (mm->sp_group) /* concurrency handle of sp_group_add_task */ if (mm->sp_group) /* concurrency handle of sp_group_add_task */
list_del(&mm->sp_node); /* affect spg->procs */ list_del(&mm->sp_node); /* affect spg->procs */
/* match with get_task_mm() in sp_group_add_task() */
atomic_dec(&mm->mm_users);
up_write(&spg->rw_lock); up_write(&spg->rw_lock);
if (!is_alive) if (!is_alive)
blocking_notifier_call_chain(&sp_notifier_chain, 0, blocking_notifier_call_chain(&sp_notifier_chain, 0,
mm->sp_group); mm->sp_group);
/* match with get_task_mm() in sp_group_add_task() */ return 0;
atomic_dec(&mm->mm_users);
} }
if (atomic_dec_and_test(&mm->mm_users)) {
up_write(&spg->rw_lock);
WARN(1, "Invalid user counting\n");
return 0;
}
up_write(&spg->rw_lock);
return 1;
} }
void sp_group_post_exit(struct mm_struct *mm) void sp_group_post_exit(struct mm_struct *mm)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册