提交 fb0a685c 编写于 作者: D Daniel Rebelo de Oliveira 提交者: Linus Torvalds

kernel/fork.c: fix a few coding style issues

Signed-off-by: NDaniel Rebelo de Oliveira <psykon@gmail.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 293eb1e7
...@@ -80,7 +80,7 @@ ...@@ -80,7 +80,7 @@
* Protected counters by write_lock_irq(&tasklist_lock) * Protected counters by write_lock_irq(&tasklist_lock)
*/ */
unsigned long total_forks; /* Handle normal Linux uptimes. */ unsigned long total_forks; /* Handle normal Linux uptimes. */
int nr_threads; /* The idle threads do not count.. */ int nr_threads; /* The idle threads do not count.. */
int max_threads; /* tunable limit on nr_threads */ int max_threads; /* tunable limit on nr_threads */
...@@ -232,7 +232,7 @@ void __init fork_init(unsigned long mempages) ...@@ -232,7 +232,7 @@ void __init fork_init(unsigned long mempages)
/* /*
* we need to allow at least 20 threads to boot a system * we need to allow at least 20 threads to boot a system
*/ */
if(max_threads < 20) if (max_threads < 20)
max_threads = 20; max_threads = 20;
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
...@@ -268,7 +268,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) ...@@ -268,7 +268,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
return NULL; return NULL;
} }
err = arch_dup_task_struct(tsk, orig); err = arch_dup_task_struct(tsk, orig);
if (err) if (err)
goto out; goto out;
...@@ -288,8 +288,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) ...@@ -288,8 +288,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
tsk->stack_canary = get_random_int(); tsk->stack_canary = get_random_int();
#endif #endif
/* One for us, one for whoever does the "release_task()" (usually parent) */ /*
atomic_set(&tsk->usage,2); * One for us, one for whoever does the "release_task()" (usually
* parent)
*/
atomic_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE #ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0; tsk->btrace_seq = 0;
#endif #endif
...@@ -437,7 +440,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) ...@@ -437,7 +440,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
goto out; goto out;
} }
static inline int mm_alloc_pgd(struct mm_struct * mm) static inline int mm_alloc_pgd(struct mm_struct *mm)
{ {
mm->pgd = pgd_alloc(mm); mm->pgd = pgd_alloc(mm);
if (unlikely(!mm->pgd)) if (unlikely(!mm->pgd))
...@@ -445,7 +448,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm) ...@@ -445,7 +448,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm)
return 0; return 0;
} }
static inline void mm_free_pgd(struct mm_struct * mm) static inline void mm_free_pgd(struct mm_struct *mm)
{ {
pgd_free(mm, mm->pgd); pgd_free(mm, mm->pgd);
} }
...@@ -482,7 +485,7 @@ static void mm_init_aio(struct mm_struct *mm) ...@@ -482,7 +485,7 @@ static void mm_init_aio(struct mm_struct *mm)
#endif #endif
} }
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
{ {
atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1); atomic_set(&mm->mm_count, 1);
...@@ -513,9 +516,9 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) ...@@ -513,9 +516,9 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
/* /*
* Allocate and initialize an mm_struct. * Allocate and initialize an mm_struct.
*/ */
struct mm_struct * mm_alloc(void) struct mm_struct *mm_alloc(void)
{ {
struct mm_struct * mm; struct mm_struct *mm;
mm = allocate_mm(); mm = allocate_mm();
if (!mm) if (!mm)
...@@ -583,7 +586,7 @@ void added_exe_file_vma(struct mm_struct *mm) ...@@ -583,7 +586,7 @@ void added_exe_file_vma(struct mm_struct *mm)
void removed_exe_file_vma(struct mm_struct *mm) void removed_exe_file_vma(struct mm_struct *mm)
{ {
mm->num_exe_file_vmas--; mm->num_exe_file_vmas--;
if ((mm->num_exe_file_vmas == 0) && mm->exe_file){ if ((mm->num_exe_file_vmas == 0) && mm->exe_file) {
fput(mm->exe_file); fput(mm->exe_file);
mm->exe_file = NULL; mm->exe_file = NULL;
} }
...@@ -775,9 +778,9 @@ struct mm_struct *dup_mm(struct task_struct *tsk) ...@@ -775,9 +778,9 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
return NULL; return NULL;
} }
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
{ {
struct mm_struct * mm, *oldmm; struct mm_struct *mm, *oldmm;
int retval; int retval;
tsk->min_flt = tsk->maj_flt = 0; tsk->min_flt = tsk->maj_flt = 0;
...@@ -844,7 +847,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) ...@@ -844,7 +847,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
return 0; return 0;
} }
static int copy_files(unsigned long clone_flags, struct task_struct * tsk) static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
{ {
struct files_struct *oldf, *newf; struct files_struct *oldf, *newf;
int error = 0; int error = 0;
...@@ -1166,11 +1169,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1166,11 +1169,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
cgroup_fork(p); cgroup_fork(p);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy); p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) { if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy); retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL; p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup; goto bad_fork_cleanup_cgroup;
} }
mpol_fix_fork_child_flag(p); mpol_fix_fork_child_flag(p);
#endif #endif
#ifdef CONFIG_CPUSETS #ifdef CONFIG_CPUSETS
...@@ -1216,25 +1219,33 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1216,25 +1219,33 @@ static struct task_struct *copy_process(unsigned long clone_flags,
retval = perf_event_init_task(p); retval = perf_event_init_task(p);
if (retval) if (retval)
goto bad_fork_cleanup_policy; goto bad_fork_cleanup_policy;
retval = audit_alloc(p);
if ((retval = audit_alloc(p))) if (retval)
goto bad_fork_cleanup_policy; goto bad_fork_cleanup_policy;
/* copy all the process information */ /* copy all the process information */
if ((retval = copy_semundo(clone_flags, p))) retval = copy_semundo(clone_flags, p);
if (retval)
goto bad_fork_cleanup_audit; goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p))) retval = copy_files(clone_flags, p);
if (retval)
goto bad_fork_cleanup_semundo; goto bad_fork_cleanup_semundo;
if ((retval = copy_fs(clone_flags, p))) retval = copy_fs(clone_flags, p);
if (retval)
goto bad_fork_cleanup_files; goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p))) retval = copy_sighand(clone_flags, p);
if (retval)
goto bad_fork_cleanup_fs; goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p))) retval = copy_signal(clone_flags, p);
if (retval)
goto bad_fork_cleanup_sighand; goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p))) retval = copy_mm(clone_flags, p);
if (retval)
goto bad_fork_cleanup_signal; goto bad_fork_cleanup_signal;
if ((retval = copy_namespaces(clone_flags, p))) retval = copy_namespaces(clone_flags, p);
if (retval)
goto bad_fork_cleanup_mm; goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p))) retval = copy_io(clone_flags, p);
if (retval)
goto bad_fork_cleanup_namespaces; goto bad_fork_cleanup_namespaces;
retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
if (retval) if (retval)
...@@ -1256,7 +1267,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1256,7 +1267,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* /*
* Clear TID on mm_release()? * Clear TID on mm_release()?
*/ */
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
p->plug = NULL; p->plug = NULL;
#endif #endif
...@@ -1324,7 +1335,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1324,7 +1335,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* it's process group. * it's process group.
* A fatal signal pending means that current will exit, so the new * A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL). * thread can't slip out of an OOM kill (or normal SIGKILL).
*/ */
recalc_sigpending(); recalc_sigpending();
if (signal_pending(current)) { if (signal_pending(current)) {
spin_unlock(&current->sighand->siglock); spin_unlock(&current->sighand->siglock);
...@@ -1685,12 +1696,14 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) ...@@ -1685,12 +1696,14 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
*/ */
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
do_sysvsem = 1; do_sysvsem = 1;
if ((err = unshare_fs(unshare_flags, &new_fs))) err = unshare_fs(unshare_flags, &new_fs);
if (err)
goto bad_unshare_out; goto bad_unshare_out;
if ((err = unshare_fd(unshare_flags, &new_fd))) err = unshare_fd(unshare_flags, &new_fd);
if (err)
goto bad_unshare_cleanup_fs; goto bad_unshare_cleanup_fs;
if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs);
new_fs))) if (err)
goto bad_unshare_cleanup_fd; goto bad_unshare_cleanup_fd;
if (new_fs || new_fd || do_sysvsem || new_nsproxy) { if (new_fs || new_fd || do_sysvsem || new_nsproxy) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册