提交 40966e31 编写于 作者: E Eric W. Biederman

kthread: Ensure struct kthread is present for all kthreads

Today the rules are a bit iffy and arbitrary about which kernel
threads have struct kthread present.  Both idle threads and thread
started with create_kthread want struct kthread present so that is
effectively all kernel threads.  Make the rule that if PF_KTHREAD
and the task is running then struct kthread is present.

This will allow the kernel thread code to using tsk->exit_code
with different semantics from ordinary processes.

To make ensure that struct kthread is present for all
kernel threads move it's allocation into copy_process.

Add a deallocation of struct kthread in exec for processes
that were kernel threads.

Move the allocation of struct kthread for the initial thread
earlier so that it is not repeated for each additional idle
thread.

Move the initialization of struct kthread into set_kthread_struct
so that the structure is always and reliably initailized.

Clear set_child_tid in free_kthread_struct to ensure the kthread
struct is reliably freed during exec.  The function
free_kthread_struct does not need to clear vfork_done during exec as
exec_mm_release called from exec_mmap has already cleared vfork_done.
Signed-off-by: N"Eric W. Biederman" <ebiederm@xmission.com>
上级 cead1855
...@@ -1307,6 +1307,8 @@ int begin_new_exec(struct linux_binprm * bprm) ...@@ -1307,6 +1307,8 @@ int begin_new_exec(struct linux_binprm * bprm)
*/ */
force_uaccess_begin(); force_uaccess_begin();
if (me->flags & PF_KTHREAD)
free_kthread_struct(me);
me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
PF_NOFREEZE | PF_NO_SETAFFINITY); PF_NOFREEZE | PF_NO_SETAFFINITY);
flush_thread(); flush_thread();
......
...@@ -33,7 +33,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), ...@@ -33,7 +33,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu, unsigned int cpu,
const char *namefmt); const char *namefmt);
void set_kthread_struct(struct task_struct *p); bool set_kthread_struct(struct task_struct *p);
void kthread_set_per_cpu(struct task_struct *k, int cpu); void kthread_set_per_cpu(struct task_struct *k, int cpu);
bool kthread_is_per_cpu(struct task_struct *k); bool kthread_is_per_cpu(struct task_struct *k);
......
...@@ -2118,6 +2118,10 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2118,6 +2118,10 @@ static __latent_entropy struct task_struct *copy_process(
p->io_context = NULL; p->io_context = NULL;
audit_set_context(p, NULL); audit_set_context(p, NULL);
cgroup_fork(p); cgroup_fork(p);
if (p->flags & PF_KTHREAD) {
if (!set_kthread_struct(p))
goto bad_fork_cleanup_threadgroup_lock;
}
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy); p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) { if (IS_ERR(p->mempolicy)) {
......
...@@ -93,20 +93,27 @@ static inline struct kthread *__to_kthread(struct task_struct *p) ...@@ -93,20 +93,27 @@ static inline struct kthread *__to_kthread(struct task_struct *p)
return kthread; return kthread;
} }
void set_kthread_struct(struct task_struct *p) bool set_kthread_struct(struct task_struct *p)
{ {
struct kthread *kthread; struct kthread *kthread;
if (__to_kthread(p)) if (WARN_ON_ONCE(to_kthread(p)))
return; return false;
kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
if (!kthread)
return false;
init_completion(&kthread->exited);
init_completion(&kthread->parked);
p->vfork_done = &kthread->exited;
/* /*
* We abuse ->set_child_tid to avoid the new member and because it * We abuse ->set_child_tid to avoid the new member and because it
* can't be wrongly copied by copy_process(). We also rely on fact * can't be wrongly copied by copy_process().
* that the caller can't exec, so PF_KTHREAD can't be cleared.
*/ */
p->set_child_tid = (__force void __user *)kthread; p->set_child_tid = (__force void __user *)kthread;
return true;
} }
void free_kthread_struct(struct task_struct *k) void free_kthread_struct(struct task_struct *k)
...@@ -114,13 +121,13 @@ void free_kthread_struct(struct task_struct *k) ...@@ -114,13 +121,13 @@ void free_kthread_struct(struct task_struct *k)
struct kthread *kthread; struct kthread *kthread;
/* /*
* Can be NULL if this kthread was created by kernel_thread() * Can be NULL if kmalloc() in set_kthread_struct() failed.
* or if kmalloc() in kthread() failed.
*/ */
kthread = to_kthread(k); kthread = to_kthread(k);
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
WARN_ON_ONCE(kthread && kthread->blkcg_css); WARN_ON_ONCE(kthread && kthread->blkcg_css);
#endif #endif
k->set_child_tid = (__force void __user *)NULL;
kfree(kthread); kfree(kthread);
} }
...@@ -315,7 +322,6 @@ static int kthread(void *_create) ...@@ -315,7 +322,6 @@ static int kthread(void *_create)
struct kthread *self; struct kthread *self;
int ret; int ret;
set_kthread_struct(current);
self = to_kthread(current); self = to_kthread(current);
/* If user was SIGKILLed, I release the structure. */ /* If user was SIGKILLed, I release the structure. */
...@@ -325,17 +331,8 @@ static int kthread(void *_create) ...@@ -325,17 +331,8 @@ static int kthread(void *_create)
kthread_exit(-EINTR); kthread_exit(-EINTR);
} }
if (!self) {
create->result = ERR_PTR(-ENOMEM);
complete(done);
kthread_exit(-ENOMEM);
}
self->threadfn = threadfn; self->threadfn = threadfn;
self->data = data; self->data = data;
init_completion(&self->exited);
init_completion(&self->parked);
current->vfork_done = &self->exited;
/* /*
* The new thread inherited kthreadd's priority and CPU mask. Reset * The new thread inherited kthreadd's priority and CPU mask. Reset
......
...@@ -8599,14 +8599,6 @@ void __init init_idle(struct task_struct *idle, int cpu) ...@@ -8599,14 +8599,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
__sched_fork(0, idle); __sched_fork(0, idle);
/*
* The idle task doesn't need the kthread struct to function, but it
* is dressed up as a per-CPU kthread and thus needs to play the part
* if we want to avoid special-casing it in code that deals with per-CPU
* kthreads.
*/
set_kthread_struct(idle);
raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_rq_lock(rq); raw_spin_rq_lock(rq);
...@@ -9427,6 +9419,14 @@ void __init sched_init(void) ...@@ -9427,6 +9419,14 @@ void __init sched_init(void)
mmgrab(&init_mm); mmgrab(&init_mm);
enter_lazy_tlb(&init_mm, current); enter_lazy_tlb(&init_mm, current);
/*
* The idle task doesn't need the kthread struct to function, but it
* is dressed up as a per-CPU kthread and thus needs to play the part
* if we want to avoid special-casing it in code that deals with per-CPU
* kthreads.
*/
WARN_ON(set_kthread_struct(current));
/* /*
* Make us the idle thread. Technically, schedule() should not be * Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be, * called from this thread, however somewhere below it might be,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册