提交 8eb04a7a 编写于 作者: A Andrea Arcangeli 提交者: Xie XiuQi

userfaultfd: use RCU to free the task struct when fork fails if MEMCG

euler inclusion
category: bugfix
bugzilla: 10989
CVE: NA

------------------------------------------------

MEMCG depends on the task structure not to be freed under
rcu_read_lock() in get_mem_cgroup_from_mm() after it dereferences
mm->owner.

A better fix would be to avoid registering forked vmas in userfaultfd
contexts reported to the monitor, if case fork ends up failing.
Signed-off-by: NAndrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Nzhong jiang <zhongjiang@huawei.com>
Reviewed-by: NMiao Xie <miaoxie@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 4877d0fd
...@@ -956,6 +956,15 @@ static void mm_init_aio(struct mm_struct *mm) ...@@ -956,6 +956,15 @@ static void mm_init_aio(struct mm_struct *mm)
#endif #endif
} }
static __always_inline void mm_clear_owner(struct mm_struct *mm,
struct task_struct *p)
{
#ifdef CONFIG_MEMCG
if (mm->owner == p)
mm->owner = NULL;
#endif
}
static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{ {
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
...@@ -1335,6 +1344,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk) ...@@ -1335,6 +1344,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
free_pt: free_pt:
/* don't put binfmt in mmput, we haven't got module yet */ /* don't put binfmt in mmput, we haven't got module yet */
mm->binfmt = NULL; mm->binfmt = NULL;
mm_init_owner(mm, NULL);
mmput(mm); mmput(mm);
fail_nomem: fail_nomem:
...@@ -1666,6 +1676,24 @@ static inline void rcu_copy_process(struct task_struct *p) ...@@ -1666,6 +1676,24 @@ static inline void rcu_copy_process(struct task_struct *p)
#endif /* #ifdef CONFIG_TASKS_RCU */ #endif /* #ifdef CONFIG_TASKS_RCU */
} }
#ifdef CONFIG_MEMCG
static void __delayed_free_task(struct rcu_head *rhp)
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
free_task(tsk);
}
#endif /* CONFIG_MEMCG */
static __always_inline void delayed_free_task(struct task_struct *tsk)
{
#ifdef CONFIG_MEMCG
call_rcu(&tsk->rcu, __delayed_free_task);
#else /* CONFIG_MEMCG */
free_task(tsk);
#endif /* CONFIG_MEMCG */
}
/* /*
* This creates a new process as a copy of the old one, * This creates a new process as a copy of the old one,
* but does not actually start it yet. * but does not actually start it yet.
...@@ -2121,8 +2149,10 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2121,8 +2149,10 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_cleanup_namespaces: bad_fork_cleanup_namespaces:
exit_task_namespaces(p); exit_task_namespaces(p);
bad_fork_cleanup_mm: bad_fork_cleanup_mm:
if (p->mm) if (p->mm) {
mm_clear_owner(p->mm, p);
mmput(p->mm); mmput(p->mm);
}
bad_fork_cleanup_signal: bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD)) if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal); free_signal_struct(p->signal);
...@@ -2153,7 +2183,7 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2153,7 +2183,7 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_free: bad_fork_free:
p->state = TASK_DEAD; p->state = TASK_DEAD;
put_task_stack(p); put_task_stack(p);
free_task(p); delayed_free_task(p);
fork_out: fork_out:
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&current->sighand->siglock);
hlist_del_init(&delayed.node); hlist_del_init(&delayed.node);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册