fork.c 60.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  linux/kernel/fork.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

/*
 *  'fork.c' contains the help-routines for the 'fork' system call
 * (see also entry.S and others).
 * Fork is rather simple, once you get the hang of it, but the memory
 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
 */

#include <linux/slab.h>
15
#include <linux/sched/autogroup.h>
16
#include <linux/sched/mm.h>
17
#include <linux/sched/coredump.h>
18
#include <linux/sched/user.h>
19
#include <linux/sched/numa_balancing.h>
20
#include <linux/sched/stat.h>
21
#include <linux/sched/task.h>
22
#include <linux/sched/task_stack.h>
23
#include <linux/sched/cputime.h>
24
#include <linux/rtmutex.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30 31 32 33
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
A
Al Viro 已提交
34
#include <linux/fdtable.h>
35
#include <linux/iocontext.h>
L
Linus Torvalds 已提交
36 37 38
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
A
Andrea Arcangeli 已提交
39
#include <linux/mmu_notifier.h>
40
#include <linux/hmm.h>
L
Linus Torvalds 已提交
41
#include <linux/fs.h>
D
Davidlohr Bueso 已提交
42 43
#include <linux/mm.h>
#include <linux/vmacache.h>
S
Serge E. Hallyn 已提交
44
#include <linux/nsproxy.h>
45
#include <linux/capability.h>
L
Linus Torvalds 已提交
46
#include <linux/cpu.h>
47
#include <linux/cgroup.h>
L
Linus Torvalds 已提交
48
#include <linux/security.h>
49
#include <linux/hugetlb.h>
50
#include <linux/seccomp.h>
L
Linus Torvalds 已提交
51 52 53 54
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
55
#include <linux/compat.h>
56
#include <linux/kthread.h>
57
#include <linux/task_io_accounting_ops.h>
58
#include <linux/rcupdate.h>
L
Linus Torvalds 已提交
59 60 61
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
62
#include <linux/memcontrol.h>
63
#include <linux/ftrace.h>
64
#include <linux/proc_fs.h>
L
Linus Torvalds 已提交
65 66
#include <linux/profile.h>
#include <linux/rmap.h>
H
Hugh Dickins 已提交
67
#include <linux/ksm.h>
L
Linus Torvalds 已提交
68
#include <linux/acct.h>
69
#include <linux/userfaultfd_k.h>
70
#include <linux/tsacct_kern.h>
M
Matt Helsley 已提交
71
#include <linux/cn_proc.h>
R
Rafael J. Wysocki 已提交
72
#include <linux/freezer.h>
73
#include <linux/delayacct.h>
74
#include <linux/taskstats_kern.h>
75
#include <linux/random.h>
M
Miloslav Trmac 已提交
76
#include <linux/tty.h>
77
#include <linux/blkdev.h>
78
#include <linux/fs_struct.h>
79
#include <linux/magic.h>
80
#include <linux/sched/mm.h>
81
#include <linux/perf_event.h>
82
#include <linux/posix-timers.h>
83
#include <linux/user-return-notifier.h>
Y
Ying Han 已提交
84
#include <linux/oom.h>
A
Andrea Arcangeli 已提交
85
#include <linux/khugepaged.h>
86
#include <linux/signalfd.h>
87
#include <linux/uprobes.h>
88
#include <linux/aio.h>
89
#include <linux/compiler.h>
90
#include <linux/sysctl.h>
D
Dmitry Vyukov 已提交
91
#include <linux/kcov.h>
92
#include <linux/livepatch.h>
93
#include <linux/thread_info.h>
L
Linus Torvalds 已提交
94 95 96

#include <asm/pgtable.h>
#include <asm/pgalloc.h>
97
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
98 99 100 101
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

102 103
#include <trace/events/sched.h>

104 105 106
#define CREATE_TRACE_POINTS
#include <trace/events/task.h>

107 108 109 110 111 112 113 114 115 116
/*
 * Minimum number of threads to boot the kernel
 */
#define MIN_THREADS 20

/*
 * Maximum number of threads
 */
#define MAX_THREADS FUTEX_TID_MASK

L
Linus Torvalds 已提交
117 118 119 120
/*
 * Protected counters by write_lock_irq(&tasklist_lock)
 */
unsigned long total_forks;	/* Handle normal Linux uptimes. */
121
int nr_threads;			/* The idle threads do not count.. */
L
Linus Torvalds 已提交
122 123 124 125 126

int max_threads;		/* tunable limit on nr_threads */

DEFINE_PER_CPU(unsigned long, process_counts) = 0;

127
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
128 129 130 131 132 133 134 135

#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
{
	return lockdep_is_held(&tasklist_lock);
}
EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
#endif /* #ifdef CONFIG_PROVE_RCU */
L
Linus Torvalds 已提交
136 137 138 139 140 141

int nr_processes(void)
{
	int cpu;
	int total = 0;

142
	for_each_possible_cpu(cpu)
L
Linus Torvalds 已提交
143 144 145 146 147
		total += per_cpu(process_counts, cpu);

	return total;
}

148 149 150 151
void __weak arch_release_task_struct(struct task_struct *tsk)
{
}

152
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
153
static struct kmem_cache *task_struct_cachep;
154 155 156 157 158 159 160 161 162 163

static inline struct task_struct *alloc_task_struct_node(int node)
{
	return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
}

static inline void free_task_struct(struct task_struct *tsk)
{
	kmem_cache_free(task_struct_cachep, tsk);
}
L
Linus Torvalds 已提交
164 165
#endif

166
void __weak arch_release_thread_stack(unsigned long *stack)
167 168 169
{
}

170
#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
171

172 173 174 175
/*
 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
 * kmemcache based allocator.
 */
176
# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
177 178 179 180 181 182 183 184

#ifdef CONFIG_VMAP_STACK
/*
 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
 * flush.  Try to minimize the number of calls by caching stacks.
 */
#define NR_CACHED_STACKS 2
static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202

static int free_vm_stack_cache(unsigned int cpu)
{
	struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
	int i;

	for (i = 0; i < NR_CACHED_STACKS; i++) {
		struct vm_struct *vm_stack = cached_vm_stacks[i];

		if (!vm_stack)
			continue;

		vfree(vm_stack->addr);
		cached_vm_stacks[i] = NULL;
	}

	return 0;
}
203 204
#endif

205
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
206
{
207
#ifdef CONFIG_VMAP_STACK
208 209 210 211
	void *stack;
	int i;

	for (i = 0; i < NR_CACHED_STACKS; i++) {
212 213 214
		struct vm_struct *s;

		s = this_cpu_xchg(cached_stacks[i], NULL);
215 216 217 218

		if (!s)
			continue;

219 220
		/* Clear stale pointers from reused stack. */
		memset(s->addr, 0, THREAD_SIZE);
221

222 223 224 225
		tsk->stack_vm_area = s;
		return s->addr;
	}

226
	stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
227
				     VMALLOC_START, VMALLOC_END,
228
				     THREADINFO_GFP,
229 230
				     PAGE_KERNEL,
				     0, node, __builtin_return_address(0));
231 232 233 234 235 236 237 238 239 240

	/*
	 * We can't call find_vm_area() in interrupt context, and
	 * free_thread_stack() can be called in interrupt context,
	 * so cache the vm_struct.
	 */
	if (stack)
		tsk->stack_vm_area = find_vm_area(stack);
	return stack;
#else
241 242
	struct page *page = alloc_pages_node(node, THREADINFO_GFP,
					     THREAD_SIZE_ORDER);
243 244

	return page ? page_address(page) : NULL;
245
#endif
246 247
}

248
static inline void free_thread_stack(struct task_struct *tsk)
249
{
250 251 252 253 254
#ifdef CONFIG_VMAP_STACK
	if (task_stack_vm_area(tsk)) {
		int i;

		for (i = 0; i < NR_CACHED_STACKS; i++) {
255 256
			if (this_cpu_cmpxchg(cached_stacks[i],
					NULL, tsk->stack_vm_area) != NULL)
257 258 259 260 261
				continue;

			return;
		}

262
		vfree_atomic(tsk->stack);
263 264 265 266 267
		return;
	}
#endif

	__free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
268
}
269
# else
270
static struct kmem_cache *thread_stack_cache;
271

272
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
273 274
						  int node)
{
275
	return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
276 277
}

278
static void free_thread_stack(struct task_struct *tsk)
279
{
280
	kmem_cache_free(thread_stack_cache, tsk->stack);
281 282
}

283
void thread_stack_cache_init(void)
284
{
285 286 287
	thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
					THREAD_SIZE, THREAD_SIZE, 0, 0,
					THREAD_SIZE, NULL);
288
	BUG_ON(thread_stack_cache == NULL);
289 290
}
# endif
291 292
#endif

L
Linus Torvalds 已提交
293
/* SLAB cache for signal_struct structures (tsk->signal) */
294
static struct kmem_cache *signal_cachep;
L
Linus Torvalds 已提交
295 296

/* SLAB cache for sighand_struct structures (tsk->sighand) */
297
struct kmem_cache *sighand_cachep;
L
Linus Torvalds 已提交
298 299

/* SLAB cache for files_struct structures (tsk->files) */
300
struct kmem_cache *files_cachep;
L
Linus Torvalds 已提交
301 302

/* SLAB cache for fs_struct structures (tsk->fs) */
303
struct kmem_cache *fs_cachep;
L
Linus Torvalds 已提交
304 305

/* SLAB cache for vm_area_struct structures */
306
struct kmem_cache *vm_area_cachep;
L
Linus Torvalds 已提交
307 308

/* SLAB cache for mm_struct structures (tsk->mm) */
309
static struct kmem_cache *mm_cachep;
L
Linus Torvalds 已提交
310

311
static void account_kernel_stack(struct task_struct *tsk, int account)
312
{
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	void *stack = task_stack_page(tsk);
	struct vm_struct *vm = task_stack_vm_area(tsk);

	BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);

	if (vm) {
		int i;

		BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);

		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
			mod_zone_page_state(page_zone(vm->pages[i]),
					    NR_KERNEL_STACK_KB,
					    PAGE_SIZE / 1024 * account);
		}

		/* All stack pages belong to the same memcg. */
330 331
		mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB,
				     account * (THREAD_SIZE / 1024));
332 333 334 335 336 337 338 339 340 341
	} else {
		/*
		 * All stack pages are in the same zone and belong to the
		 * same memcg.
		 */
		struct page *first_page = virt_to_page(stack);

		mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
				    THREAD_SIZE / 1024 * account);

342 343
		mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
				     account * (THREAD_SIZE / 1024));
344
	}
345 346
}

347
static void release_task_stack(struct task_struct *tsk)
L
Linus Torvalds 已提交
348
{
349 350 351
	if (WARN_ON(tsk->state != TASK_DEAD))
		return;  /* Better to leak the stack than to free prematurely */

352
	account_kernel_stack(tsk, -1);
353
	arch_release_thread_stack(tsk->stack);
354
	free_thread_stack(tsk);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	tsk->stack = NULL;
#ifdef CONFIG_VMAP_STACK
	tsk->stack_vm_area = NULL;
#endif
}

#ifdef CONFIG_THREAD_INFO_IN_TASK
void put_task_stack(struct task_struct *tsk)
{
	if (atomic_dec_and_test(&tsk->stack_refcount))
		release_task_stack(tsk);
}
#endif

void free_task(struct task_struct *tsk)
{
#ifndef CONFIG_THREAD_INFO_IN_TASK
	/*
	 * The task is finally done with both the stack and thread_info,
	 * so free both.
	 */
	release_task_stack(tsk);
#else
	/*
	 * If the task had a separate stack allocation, it should be gone
	 * by now.
	 */
	WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
#endif
I
Ingo Molnar 已提交
384
	rt_mutex_debug_task_free(tsk);
385
	ftrace_graph_exit_task(tsk);
386
	put_seccomp_filter(tsk);
387
	arch_release_task_struct(tsk);
388 389
	if (tsk->flags & PF_KTHREAD)
		free_kthread_struct(tsk);
L
Linus Torvalds 已提交
390 391 392 393
	free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
#ifdef CONFIG_MMU
static __latent_entropy int dup_mmap(struct mm_struct *mm,
					struct mm_struct *oldmm)
{
	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
	struct rb_node **rb_link, *rb_parent;
	int retval;
	unsigned long charge;
	LIST_HEAD(uf);

	uprobe_start_dup_mmap();
	if (down_write_killable(&oldmm->mmap_sem)) {
		retval = -EINTR;
		goto fail_uprobe_end;
	}
	flush_cache_dup_mm(oldmm);
	uprobe_dup_mmap(oldmm, mm);
	/*
	 * Not linked in yet - no deadlock potential:
	 */
	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);

	/* No ordering required: file already has been exposed. */
	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));

	mm->total_vm = oldmm->total_vm;
	mm->data_vm = oldmm->data_vm;
	mm->exec_vm = oldmm->exec_vm;
	mm->stack_vm = oldmm->stack_vm;

	rb_link = &mm->mm_rb.rb_node;
	rb_parent = NULL;
	pprev = &mm->mmap;
	retval = ksm_fork(mm, oldmm);
	if (retval)
		goto out;
	retval = khugepaged_fork(mm, oldmm);
	if (retval)
		goto out;

	prev = NULL;
	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
		struct file *file;

		if (mpnt->vm_flags & VM_DONTCOPY) {
			vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
			continue;
		}
		charge = 0;
		if (mpnt->vm_flags & VM_ACCOUNT) {
			unsigned long len = vma_pages(mpnt);

			if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
				goto fail_nomem;
			charge = len;
		}
		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		INIT_LIST_HEAD(&tmp->anon_vma_chain);
		retval = vma_dup_policy(mpnt, tmp);
		if (retval)
			goto fail_nomem_policy;
		tmp->vm_mm = mm;
		retval = dup_userfaultfd(tmp, &uf);
		if (retval)
			goto fail_nomem_anon_vma_fork;
		if (tmp->vm_flags & VM_WIPEONFORK) {
			/* VM_WIPEONFORK gets a clean slate in the child. */
			tmp->anon_vma = NULL;
			if (anon_vma_prepare(tmp))
				goto fail_nomem_anon_vma_fork;
		} else if (anon_vma_fork(tmp, mpnt))
			goto fail_nomem_anon_vma_fork;
		tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
		tmp->vm_next = tmp->vm_prev = NULL;
		file = tmp->vm_file;
		if (file) {
			struct inode *inode = file_inode(file);
			struct address_space *mapping = file->f_mapping;

			get_file(file);
			if (tmp->vm_flags & VM_DENYWRITE)
				atomic_dec(&inode->i_writecount);
			i_mmap_lock_write(mapping);
			if (tmp->vm_flags & VM_SHARED)
				atomic_inc(&mapping->i_mmap_writable);
			flush_dcache_mmap_lock(mapping);
			/* insert tmp into the share list, just after mpnt */
			vma_interval_tree_insert_after(tmp, mpnt,
					&mapping->i_mmap);
			flush_dcache_mmap_unlock(mapping);
			i_mmap_unlock_write(mapping);
		}

		/*
		 * Clear hugetlb-related page reserves for children. This only
		 * affects MAP_PRIVATE mappings. Faults generated by the child
		 * are not guaranteed to succeed, even if read-only
		 */
		if (is_vm_hugetlb_page(tmp))
			reset_vma_resv_huge_pages(tmp);

		/*
		 * Link in the new vma and copy the page table entries.
		 */
		*pprev = tmp;
		pprev = &tmp->vm_next;
		tmp->vm_prev = prev;
		prev = tmp;

		__vma_link_rb(mm, tmp, rb_link, rb_parent);
		rb_link = &tmp->vm_rb.rb_right;
		rb_parent = &tmp->vm_rb;

		mm->map_count++;
		if (!(tmp->vm_flags & VM_WIPEONFORK))
			retval = copy_page_range(mm, oldmm, mpnt);

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);

		if (retval)
			goto out;
	}
	/* a new mm has just been created */
	arch_dup_mmap(oldmm, mm);
	retval = 0;
out:
	up_write(&mm->mmap_sem);
	flush_tlb_mm(oldmm);
	up_write(&oldmm->mmap_sem);
	dup_userfaultfd_complete(&uf);
fail_uprobe_end:
	uprobe_end_dup_mmap();
	return retval;
fail_nomem_anon_vma_fork:
	mpol_put(vma_policy(tmp));
fail_nomem_policy:
	kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
	retval = -ENOMEM;
	vm_unacct_memory(charge);
	goto out;
}

static inline int mm_alloc_pgd(struct mm_struct *mm)
{
	mm->pgd = pgd_alloc(mm);
	if (unlikely(!mm->pgd))
		return -ENOMEM;
	return 0;
}

static inline void mm_free_pgd(struct mm_struct *mm)
{
	pgd_free(mm, mm->pgd);
}
#else
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
	down_write(&oldmm->mmap_sem);
	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
	up_write(&oldmm->mmap_sem);
	return 0;
}
#define mm_alloc_pgd(mm)	(0)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */

static void check_mm(struct mm_struct *mm)
{
	int i;

	for (i = 0; i < NR_MM_COUNTERS; i++) {
		long x = atomic_long_read(&mm->rss_stat.count[i]);

		if (unlikely(x))
			printk(KERN_ALERT "BUG: Bad rss-counter state "
					  "mm:%p idx:%d val:%ld\n", mm, i, x);
	}

	if (mm_pgtables_bytes(mm))
		pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
				mm_pgtables_bytes(mm));

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
	VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
#endif
}

#define allocate_mm()	(kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))

/*
 * Called when the last reference to the mm
 * is dropped: either by a lazy thread or by
 * mmput. Free the page directory and the mm.
 */
594
void __mmdrop(struct mm_struct *mm)
595 596
{
	BUG_ON(mm == &init_mm);
597 598
	WARN_ON_ONCE(mm == current->mm);
	WARN_ON_ONCE(mm == current->active_mm);
599 600 601 602 603 604 605 606
	mm_free_pgd(mm);
	destroy_context(mm);
	hmm_mm_destroy(mm);
	mmu_notifier_mm_destroy(mm);
	check_mm(mm);
	put_user_ns(mm->user_ns);
	free_mm(mm);
}
607
EXPORT_SYMBOL_GPL(__mmdrop);
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624

static void mmdrop_async_fn(struct work_struct *work)
{
	struct mm_struct *mm;

	mm = container_of(work, struct mm_struct, async_put_work);
	__mmdrop(mm);
}

static void mmdrop_async(struct mm_struct *mm)
{
	if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
		INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
		schedule_work(&mm->async_put_work);
	}
}

625 626
static inline void free_signal_struct(struct signal_struct *sig)
{
627
	taskstats_tgid_free(sig);
628
	sched_autogroup_exit(sig);
629 630 631 632
	/*
	 * __mmdrop is not safe to call from softirq context on x86 due to
	 * pgd_dtor so postpone it to the async context
	 */
633
	if (sig->oom_mm)
634
		mmdrop_async(sig->oom_mm);
635 636 637 638 639
	kmem_cache_free(signal_cachep, sig);
}

static inline void put_signal_struct(struct signal_struct *sig)
{
640
	if (atomic_dec_and_test(&sig->sigcnt))
641 642 643
		free_signal_struct(sig);
}

644
void __put_task_struct(struct task_struct *tsk)
L
Linus Torvalds 已提交
645
{
E
Eugene Teo 已提交
646
	WARN_ON(!tsk->exit_state);
L
Linus Torvalds 已提交
647 648 649
	WARN_ON(atomic_read(&tsk->usage));
	WARN_ON(tsk == current);

650
	cgroup_free(tsk);
651
	task_numa_free(tsk);
652
	security_task_free(tsk);
653
	exit_creds(tsk);
654
	delayacct_tsk_free(tsk);
655
	put_signal_struct(tsk->signal);
L
Linus Torvalds 已提交
656 657 658 659

	if (!profile_handoff_task(tsk))
		free_task(tsk);
}
660
EXPORT_SYMBOL_GPL(__put_task_struct);
L
Linus Torvalds 已提交
661

T
Thomas Gleixner 已提交
662
void __init __weak arch_task_cache_init(void) { }
663

664 665 666
/*
 * set_max_threads
 */
667
static void set_max_threads(unsigned int max_threads_suggested)
668
{
669
	u64 threads;
670 671

	/*
672 673
	 * The number of threads shall be limited such that the thread
	 * structures may only consume a small part of the available memory.
674
	 */
675 676 677 678 679 680
	if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
		threads = MAX_THREADS;
	else
		threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
				    (u64) THREAD_SIZE * 8UL);

681 682 683
	if (threads > max_threads_suggested)
		threads = max_threads_suggested;

684
	max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
685 686
}

687 688 689 690
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
/* Initialized by the architecture: */
int arch_task_struct_size __read_mostly;
#endif
691

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
{
	/* Fetch thread_struct whitelist for the architecture. */
	arch_thread_struct_whitelist(offset, size);

	/*
	 * Handle zero-sized whitelist or empty thread_struct, otherwise
	 * adjust offset to position of thread_struct in task_struct.
	 */
	if (unlikely(*size == 0))
		*offset = 0;
	else
		*offset += offsetof(struct task_struct, thread);
}

707
void __init fork_init(void)
L
Linus Torvalds 已提交
708
{
709
	int i;
710
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
L
Linus Torvalds 已提交
711
#ifndef ARCH_MIN_TASKALIGN
712
#define ARCH_MIN_TASKALIGN	0
L
Linus Torvalds 已提交
713
#endif
P
Peter Zijlstra 已提交
714
	int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
715
	unsigned long useroffset, usersize;
716

L
Linus Torvalds 已提交
717
	/* create a slab on which task_structs can be allocated */
718 719
	task_struct_whitelist(&useroffset, &usersize);
	task_struct_cachep = kmem_cache_create_usercopy("task_struct",
720
			arch_task_struct_size, align,
721 722
			SLAB_PANIC|SLAB_ACCOUNT,
			useroffset, usersize, NULL);
L
Linus Torvalds 已提交
723 724
#endif

725 726 727
	/* do the arch specific task caches init */
	arch_task_cache_init();

728
	set_max_threads(MAX_THREADS);
L
Linus Torvalds 已提交
729 730 731 732 733

	init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
	init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
	init_task.signal->rlim[RLIMIT_SIGPENDING] =
		init_task.signal->rlim[RLIMIT_NPROC];
734

735 736 737
	for (i = 0; i < UCOUNT_COUNTS; i++) {
		init_user_ns.ucount_max[i] = max_threads/2;
	}
738 739 740 741 742

#ifdef CONFIG_VMAP_STACK
	cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
			  NULL, free_vm_stack_cache);
#endif
743 744

	lockdep_init_task(&init_task);
L
Linus Torvalds 已提交
745 746
}

747
int __weak arch_dup_task_struct(struct task_struct *dst,
748 749 750 751 752 753
					       struct task_struct *src)
{
	*dst = *src;
	return 0;
}

754 755 756 757 758 759 760 761
void set_task_stack_end_magic(struct task_struct *tsk)
{
	unsigned long *stackend;

	stackend = end_of_stack(tsk);
	*stackend = STACK_END_MAGIC;	/* for overflow detection */
}

762
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
L
Linus Torvalds 已提交
763 764
{
	struct task_struct *tsk;
765
	unsigned long *stack;
766
	struct vm_struct *stack_vm_area;
P
Peter Zijlstra 已提交
767
	int err;
L
Linus Torvalds 已提交
768

769 770
	if (node == NUMA_NO_NODE)
		node = tsk_fork_get_node(orig);
771
	tsk = alloc_task_struct_node(node);
L
Linus Torvalds 已提交
772 773 774
	if (!tsk)
		return NULL;

775 776
	stack = alloc_thread_stack_node(tsk, node);
	if (!stack)
777
		goto free_tsk;
L
Linus Torvalds 已提交
778

779 780
	stack_vm_area = task_stack_vm_area(tsk);

781
	err = arch_dup_task_struct(tsk, orig);
782 783 784 785 786 787 788 789 790 791

	/*
	 * arch_dup_task_struct() clobbers the stack-related fields.  Make
	 * sure they're properly initialized before using any stack-related
	 * functions again.
	 */
	tsk->stack = stack;
#ifdef CONFIG_VMAP_STACK
	tsk->stack_vm_area = stack_vm_area;
#endif
792 793 794
#ifdef CONFIG_THREAD_INFO_IN_TASK
	atomic_set(&tsk->stack_refcount, 1);
#endif
795

796
	if (err)
797
		goto free_stack;
798

K
Kees Cook 已提交
799 800 801 802 803 804 805 806 807
#ifdef CONFIG_SECCOMP
	/*
	 * We must handle setting up seccomp filters once we're under
	 * the sighand lock in case orig has changed between now and
	 * then. Until then, filter must be NULL to avoid messing up
	 * the usage counts on the error path calling free_task.
	 */
	tsk->seccomp.filter = NULL;
#endif
808 809

	setup_thread_stack(tsk, orig);
810
	clear_user_return_notifier(tsk);
811
	clear_tsk_need_resched(tsk);
812
	set_task_stack_end_magic(tsk);
L
Linus Torvalds 已提交
813

814
#ifdef CONFIG_CC_STACKPROTECTOR
815
	tsk->stack_canary = get_random_canary();
816 817
#endif

818 819 820 821 822
	/*
	 * One for us, one for whoever does the "release_task()" (usually
	 * parent)
	 */
	atomic_set(&tsk->usage, 2);
823
#ifdef CONFIG_BLK_DEV_IO_TRACE
824
	tsk->btrace_seq = 0;
825
#endif
826
	tsk->splice_pipe = NULL;
827
	tsk->task_frag.page = NULL;
828
	tsk->wake_q.next = NULL;
829

830
	account_kernel_stack(tsk, 1);
831

D
Dmitry Vyukov 已提交
832 833
	kcov_task_init(tsk);

834 835 836 837
#ifdef CONFIG_FAULT_INJECTION
	tsk->fail_nth = 0;
#endif

L
Linus Torvalds 已提交
838
	return tsk;
839

840
free_stack:
841
	free_thread_stack(tsk);
842
free_tsk:
843 844
	free_task_struct(tsk);
	return NULL;
L
Linus Torvalds 已提交
845 846
}

D
Daniel Walker 已提交
847
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
L
Linus Torvalds 已提交
848

849 850 851 852 853 854 855 856 857 858 859 860
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;

static int __init coredump_filter_setup(char *s)
{
	default_dump_filter =
		(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
		MMF_DUMP_FILTER_MASK;
	return 1;
}

__setup("coredump_filter=", coredump_filter_setup);

L
Linus Torvalds 已提交
861 862
#include <linux/init_task.h>

A
Alexey Dobriyan 已提交
863 864 865 866
static void mm_init_aio(struct mm_struct *mm)
{
#ifdef CONFIG_AIO
	spin_lock_init(&mm->ioctx_lock);
867
	mm->ioctx_table = NULL;
A
Alexey Dobriyan 已提交
868 869 870
#endif
}

871 872 873 874 875 876 877
static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
#ifdef CONFIG_MEMCG
	mm->owner = p;
#endif
}

878 879 880 881 882 883 884
static void mm_init_uprobes_state(struct mm_struct *mm)
{
#ifdef CONFIG_UPROBES
	mm->uprobes_state.xol_area = NULL;
#endif
}

885 886
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
	struct user_namespace *user_ns)
L
Linus Torvalds 已提交
887
{
888 889 890
	mm->mmap = NULL;
	mm->mm_rb = RB_ROOT;
	mm->vmacache_seqnum = 0;
L
Linus Torvalds 已提交
891 892 893 894
	atomic_set(&mm->mm_users, 1);
	atomic_set(&mm->mm_count, 1);
	init_rwsem(&mm->mmap_sem);
	INIT_LIST_HEAD(&mm->mmlist);
895
	mm->core_state = NULL;
896
	mm_pgtables_bytes_init(mm);
897 898
	mm->map_count = 0;
	mm->locked_vm = 0;
V
Vladimir Davydov 已提交
899
	mm->pinned_vm = 0;
K
KAMEZAWA Hiroyuki 已提交
900
	memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
L
Linus Torvalds 已提交
901
	spin_lock_init(&mm->page_table_lock);
902
	mm_init_cpumask(mm);
A
Alexey Dobriyan 已提交
903
	mm_init_aio(mm);
904
	mm_init_owner(mm, p);
905
	RCU_INIT_POINTER(mm->exe_file, NULL);
906
	mmu_notifier_mm_init(mm);
907
	hmm_mm_init(mm);
908
	init_tlb_flush_pending(mm);
909 910 911
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
	mm->pmd_huge_pte = NULL;
#endif
912
	mm_init_uprobes_state(mm);
L
Linus Torvalds 已提交
913

914 915 916 917 918
	if (current->mm) {
		mm->flags = current->mm->flags & MMF_INIT_MASK;
		mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
	} else {
		mm->flags = default_dump_filter;
L
Linus Torvalds 已提交
919
		mm->def_flags = 0;
920 921
	}

922 923 924 925 926
	if (mm_alloc_pgd(mm))
		goto fail_nopgd;

	if (init_new_context(p, mm))
		goto fail_nocontext;
927

928
	mm->user_ns = get_user_ns(user_ns);
929 930 931 932 933
	return mm;

fail_nocontext:
	mm_free_pgd(mm);
fail_nopgd:
L
Linus Torvalds 已提交
934 935 936 937 938 939 940
	free_mm(mm);
	return NULL;
}

/*
 * Allocate and initialize an mm_struct.
 */
941
struct mm_struct *mm_alloc(void)
L
Linus Torvalds 已提交
942
{
943
	struct mm_struct *mm;
L
Linus Torvalds 已提交
944 945

	mm = allocate_mm();
946 947 948 949
	if (!mm)
		return NULL;

	memset(mm, 0, sizeof(*mm));
950
	return mm_init(mm, current, current_user_ns());
L
Linus Torvalds 已提交
951 952
}

953 954 955 956 957 958 959 960 961
static inline void __mmput(struct mm_struct *mm)
{
	VM_BUG_ON(atomic_read(&mm->mm_users));

	uprobe_clear_state(mm);
	exit_aio(mm);
	ksm_exit(mm);
	khugepaged_exit(mm); /* must run before exit_mmap */
	exit_mmap(mm);
962
	mm_put_huge_zero_page(mm);
963 964 965 966 967 968 969 970 971 972 973
	set_mm_exe_file(mm, NULL);
	if (!list_empty(&mm->mmlist)) {
		spin_lock(&mmlist_lock);
		list_del(&mm->mmlist);
		spin_unlock(&mmlist_lock);
	}
	if (mm->binfmt)
		module_put(mm->binfmt->module);
	mmdrop(mm);
}

L
Linus Torvalds 已提交
974 975 976 977 978
/*
 * Decrement the use count and release all resources for an mm.
 */
void mmput(struct mm_struct *mm)
{
A
Andrew Morton 已提交
979 980
	might_sleep();

981 982 983 984 985
	if (atomic_dec_and_test(&mm->mm_users))
		__mmput(mm);
}
EXPORT_SYMBOL_GPL(mmput);

986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
#ifdef CONFIG_MMU
static void mmput_async_fn(struct work_struct *work)
{
	struct mm_struct *mm = container_of(work, struct mm_struct,
					    async_put_work);

	__mmput(mm);
}

void mmput_async(struct mm_struct *mm)
{
	if (atomic_dec_and_test(&mm->mm_users)) {
		INIT_WORK(&mm->async_put_work, mmput_async_fn);
		schedule_work(&mm->async_put_work);
	}
}
#endif

1004 1005 1006 1007 1008
/**
 * set_mm_exe_file - change a reference to the mm's executable file
 *
 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
 *
1009 1010 1011 1012 1013
 * Main users are mmput() and sys_execve(). Callers prevent concurrent
 * invocations: in mmput() nobody alive left, in execve task is single
 * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
 * mm->exe_file, but does so without using set_mm_exe_file() in order
 * to do avoid the need for any locks.
1014
 */
1015 1016
void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
{
1017 1018 1019 1020 1021 1022 1023 1024
	struct file *old_exe_file;

	/*
	 * It is safe to dereference the exe_file without RCU as
	 * this function is only called if nobody else can access
	 * this mm -- see comment above for justification.
	 */
	old_exe_file = rcu_dereference_raw(mm->exe_file);
1025

1026 1027
	if (new_exe_file)
		get_file(new_exe_file);
1028 1029 1030
	rcu_assign_pointer(mm->exe_file, new_exe_file);
	if (old_exe_file)
		fput(old_exe_file);
1031 1032
}

1033 1034 1035 1036 1037 1038
/**
 * get_mm_exe_file - acquire a reference to the mm's executable file
 *
 * Returns %NULL if mm has no associated executable file.
 * User must release file via fput().
 */
1039 1040 1041 1042
struct file *get_mm_exe_file(struct mm_struct *mm)
{
	struct file *exe_file;

1043 1044 1045 1046 1047
	rcu_read_lock();
	exe_file = rcu_dereference(mm->exe_file);
	if (exe_file && !get_file_rcu(exe_file))
		exe_file = NULL;
	rcu_read_unlock();
1048 1049
	return exe_file;
}
1050
EXPORT_SYMBOL(get_mm_exe_file);
1051

M
Mateusz Guzik 已提交
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
/**
 * get_task_exe_file - acquire a reference to the task's executable file
 *
 * Returns %NULL if task's mm (if any) has no associated executable file or
 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
 * User must release file via fput().
 */
struct file *get_task_exe_file(struct task_struct *task)
{
	struct file *exe_file = NULL;
	struct mm_struct *mm;

	task_lock(task);
	mm = task->mm;
	if (mm) {
		if (!(task->flags & PF_KTHREAD))
			exe_file = get_mm_exe_file(mm);
	}
	task_unlock(task);
	return exe_file;
}
EXPORT_SYMBOL(get_task_exe_file);
1074

L
Linus Torvalds 已提交
1075 1076 1077
/**
 * get_task_mm - acquire a reference to the task's mm
 *
1078
 * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
L
Linus Torvalds 已提交
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
 * this kernel workthread has transiently adopted a user mm with use_mm,
 * to do its AIO) is not set and if so returns a reference to it, after
 * bumping up the use count.  User must release the mm via mmput()
 * after use.  Typically used by /proc and ptrace.
 */
struct mm_struct *get_task_mm(struct task_struct *task)
{
	struct mm_struct *mm;

	task_lock(task);
	mm = task->mm;
	if (mm) {
1091
		if (task->flags & PF_KTHREAD)
L
Linus Torvalds 已提交
1092 1093
			mm = NULL;
		else
V
Vegard Nossum 已提交
1094
			mmget(mm);
L
Linus Torvalds 已提交
1095 1096 1097 1098 1099 1100
	}
	task_unlock(task);
	return mm;
}
EXPORT_SYMBOL_GPL(get_task_mm);

1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
{
	struct mm_struct *mm;
	int err;

	err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
	if (err)
		return ERR_PTR(err);

	mm = get_task_mm(task);
	if (mm && mm != current->mm &&
			!ptrace_may_access(task, mode)) {
		mmput(mm);
		mm = ERR_PTR(-EACCES);
	}
	mutex_unlock(&task->signal->cred_guard_mutex);

	return mm;
}

1121
static void complete_vfork_done(struct task_struct *tsk)
1122
{
O
Oleg Nesterov 已提交
1123
	struct completion *vfork;
1124

O
Oleg Nesterov 已提交
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
	task_lock(tsk);
	vfork = tsk->vfork_done;
	if (likely(vfork)) {
		tsk->vfork_done = NULL;
		complete(vfork);
	}
	task_unlock(tsk);
}

static int wait_for_vfork_done(struct task_struct *child,
				struct completion *vfork)
{
	int killed;

	freezer_do_not_count();
	killed = wait_for_completion_killable(vfork);
	freezer_count();

	if (killed) {
		task_lock(child);
		child->vfork_done = NULL;
		task_unlock(child);
	}

	put_task_struct(child);
	return killed;
1151 1152
}

L
Linus Torvalds 已提交
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
/* Please note the differences between mmput and mm_release.
 * mmput is called whenever we stop holding onto a mm_struct,
 * error success whatever.
 *
 * mm_release is called after a mm_struct has been removed
 * from the current process.
 *
 * This difference is important for error handling, when we
 * only half set up a mm_struct for a new process and need to restore
 * the old one.  Because we mmput the new mm_struct before
 * restoring the old one. . .
 * Eric Biederman 10 January 1998
 */
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
1168 1169
	/* Get rid of any futexes when releasing the mm */
#ifdef CONFIG_FUTEX
1170
	if (unlikely(tsk->robust_list)) {
1171
		exit_robust_list(tsk);
1172 1173
		tsk->robust_list = NULL;
	}
1174
#ifdef CONFIG_COMPAT
1175
	if (unlikely(tsk->compat_robust_list)) {
1176
		compat_exit_robust_list(tsk);
1177 1178
		tsk->compat_robust_list = NULL;
	}
1179
#endif
1180 1181
	if (unlikely(!list_empty(&tsk->pi_state_list)))
		exit_pi_state_list(tsk);
1182 1183
#endif

1184 1185
	uprobe_free_utask(tsk);

L
Linus Torvalds 已提交
1186 1187 1188
	/* Get rid of any cached register state */
	deactivate_mm(tsk, mm);

1189
	/*
1190 1191 1192
	 * Signal userspace if we're not exiting with a core dump
	 * because we want to leave the value intact for debugging
	 * purposes.
1193
	 */
1194
	if (tsk->clear_child_tid) {
1195
		if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
1196 1197 1198 1199 1200 1201
		    atomic_read(&mm->mm_users) > 1) {
			/*
			 * We don't check the error code - if userspace has
			 * not set up a proper pointer then tough luck.
			 */
			put_user(0, tsk->clear_child_tid);
1202 1203
			do_futex(tsk->clear_child_tid, FUTEX_WAKE,
					1, NULL, NULL, 0, 0);
1204
		}
L
Linus Torvalds 已提交
1205 1206
		tsk->clear_child_tid = NULL;
	}
1207 1208 1209 1210 1211 1212 1213

	/*
	 * All done, finally we can wake up parent and return this mm to him.
	 * Also kthread_stop() uses this completion for synchronization.
	 */
	if (tsk->vfork_done)
		complete_vfork_done(tsk);
L
Linus Torvalds 已提交
1214 1215
}

1216 1217 1218 1219
/*
 * Allocate a new mm structure and copy contents from the
 * mm structure of the passed in task structure.
 */
1220
static struct mm_struct *dup_mm(struct task_struct *tsk)
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
{
	struct mm_struct *mm, *oldmm = current->mm;
	int err;

	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	memcpy(mm, oldmm, sizeof(*mm));

1231
	if (!mm_init(mm, tsk, mm->user_ns))
1232 1233 1234 1235 1236 1237 1238 1239 1240
		goto fail_nomem;

	err = dup_mmap(mm, oldmm);
	if (err)
		goto free_pt;

	mm->hiwater_rss = get_mm_rss(mm);
	mm->hiwater_vm = mm->total_vm;

1241 1242 1243
	if (mm->binfmt && !try_module_get(mm->binfmt->module))
		goto free_pt;

1244 1245 1246
	return mm;

free_pt:
1247 1248
	/* don't put binfmt in mmput, we haven't got module yet */
	mm->binfmt = NULL;
1249 1250 1251 1252 1253 1254
	mmput(mm);

fail_nomem:
	return NULL;
}

1255
static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
L
Linus Torvalds 已提交
1256
{
1257
	struct mm_struct *mm, *oldmm;
L
Linus Torvalds 已提交
1258 1259 1260 1261
	int retval;

	tsk->min_flt = tsk->maj_flt = 0;
	tsk->nvcsw = tsk->nivcsw = 0;
1262 1263 1264
#ifdef CONFIG_DETECT_HUNG_TASK
	tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
#endif
L
Linus Torvalds 已提交
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277

	tsk->mm = NULL;
	tsk->active_mm = NULL;

	/*
	 * Are we cloning a kernel thread?
	 *
	 * We need to steal a active VM for that..
	 */
	oldmm = current->mm;
	if (!oldmm)
		return 0;

D
Davidlohr Bueso 已提交
1278 1279 1280
	/* initialize the new vmacache entries */
	vmacache_flush(tsk);

L
Linus Torvalds 已提交
1281
	if (clone_flags & CLONE_VM) {
V
Vegard Nossum 已提交
1282
		mmget(oldmm);
L
Linus Torvalds 已提交
1283 1284 1285 1286 1287
		mm = oldmm;
		goto good_mm;
	}

	retval = -ENOMEM;
1288
	mm = dup_mm(tsk);
L
Linus Torvalds 已提交
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
	if (!mm)
		goto fail_nomem;

good_mm:
	tsk->mm = mm;
	tsk->active_mm = mm;
	return 0;

fail_nomem:
	return retval;
}

A
Alexey Dobriyan 已提交
1301
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
L
Linus Torvalds 已提交
1302
{
A
Al Viro 已提交
1303
	struct fs_struct *fs = current->fs;
L
Linus Torvalds 已提交
1304
	if (clone_flags & CLONE_FS) {
A
Al Viro 已提交
1305
		/* tsk->fs is already what we want */
N
Nick Piggin 已提交
1306
		spin_lock(&fs->lock);
A
Al Viro 已提交
1307
		if (fs->in_exec) {
N
Nick Piggin 已提交
1308
			spin_unlock(&fs->lock);
A
Al Viro 已提交
1309 1310 1311
			return -EAGAIN;
		}
		fs->users++;
N
Nick Piggin 已提交
1312
		spin_unlock(&fs->lock);
L
Linus Torvalds 已提交
1313 1314
		return 0;
	}
A
Al Viro 已提交
1315
	tsk->fs = copy_fs_struct(fs);
L
Linus Torvalds 已提交
1316 1317 1318 1319 1320
	if (!tsk->fs)
		return -ENOMEM;
	return 0;
}

1321
static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
{
	struct files_struct *oldf, *newf;
	int error = 0;

	/*
	 * A background process may not have any files ...
	 */
	oldf = current->files;
	if (!oldf)
		goto out;

	if (clone_flags & CLONE_FILES) {
		atomic_inc(&oldf->count);
		goto out;
	}

	newf = dup_fd(oldf, &error);
	if (!newf)
		goto out;

	tsk->files = newf;
	error = 0;
out:
	return error;
}

1348
static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
1349 1350 1351
{
#ifdef CONFIG_BLOCK
	struct io_context *ioc = current->io_context;
1352
	struct io_context *new_ioc;
1353 1354 1355

	if (!ioc)
		return 0;
1356 1357 1358 1359
	/*
	 * Share io context with parent, if CLONE_IO is set
	 */
	if (clone_flags & CLONE_IO) {
T
Tejun Heo 已提交
1360 1361
		ioc_task_link(ioc);
		tsk->io_context = ioc;
1362
	} else if (ioprio_valid(ioc->ioprio)) {
1363 1364
		new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
		if (unlikely(!new_ioc))
1365 1366
			return -ENOMEM;

1367
		new_ioc->ioprio = ioc->ioprio;
1368
		put_io_context(new_ioc);
1369 1370 1371 1372 1373
	}
#endif
	return 0;
}

A
Alexey Dobriyan 已提交
1374
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
L
Linus Torvalds 已提交
1375 1376 1377
{
	struct sighand_struct *sig;

Z
Zhaolei 已提交
1378
	if (clone_flags & CLONE_SIGHAND) {
L
Linus Torvalds 已提交
1379 1380 1381 1382
		atomic_inc(&current->sighand->count);
		return 0;
	}
	sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
I
Ingo Molnar 已提交
1383
	rcu_assign_pointer(tsk->sighand, sig);
L
Linus Torvalds 已提交
1384 1385
	if (!sig)
		return -ENOMEM;
1386

L
Linus Torvalds 已提交
1387 1388 1389 1390 1391
	atomic_set(&sig->count, 1);
	memcpy(sig->action, current->sighand->action, sizeof(sig->action));
	return 0;
}

1392
void __cleanup_sighand(struct sighand_struct *sighand)
1393
{
1394 1395
	if (atomic_dec_and_test(&sighand->count)) {
		signalfd_cleanup(sighand);
1396
		/*
1397
		 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1398 1399
		 * without an RCU grace period, see __lock_task_sighand().
		 */
1400
		kmem_cache_free(sighand_cachep, sighand);
1401
	}
1402 1403
}

1404
#ifdef CONFIG_POSIX_TIMERS
1405 1406 1407 1408 1409
/*
 * Initialize POSIX timer handling for a thread group.
 */
static void posix_cpu_timers_init_group(struct signal_struct *sig)
{
1410 1411
	unsigned long cpu_limit;

1412
	cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1413
	if (cpu_limit != RLIM_INFINITY) {
1414
		sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC;
1415
		sig->cputimer.running = true;
1416 1417
	}

1418 1419 1420 1421 1422
	/* The timer lists. */
	INIT_LIST_HEAD(&sig->cpu_timers[0]);
	INIT_LIST_HEAD(&sig->cpu_timers[1]);
	INIT_LIST_HEAD(&sig->cpu_timers[2]);
}
1423 1424 1425
#else
static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { }
#endif
1426

A
Alexey Dobriyan 已提交
1427
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
L
Linus Torvalds 已提交
1428 1429 1430
{
	struct signal_struct *sig;

1431
	if (clone_flags & CLONE_THREAD)
1432 1433
		return 0;

1434
	sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
L
Linus Torvalds 已提交
1435 1436 1437 1438
	tsk->signal = sig;
	if (!sig)
		return -ENOMEM;

1439
	sig->nr_threads = 1;
L
Linus Torvalds 已提交
1440
	atomic_set(&sig->live, 1);
1441
	atomic_set(&sig->sigcnt, 1);
1442 1443 1444 1445 1446

	/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
	sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
	tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);

L
Linus Torvalds 已提交
1447
	init_waitqueue_head(&sig->wait_chldexit);
1448
	sig->curr_target = tsk;
L
Linus Torvalds 已提交
1449
	init_sigpending(&sig->shared_pending);
1450
	seqlock_init(&sig->stats_lock);
1451
	prev_cputime_init(&sig->prev_cputime);
L
Linus Torvalds 已提交
1452

1453
#ifdef CONFIG_POSIX_TIMERS
1454
	INIT_LIST_HEAD(&sig->posix_timers);
1455
	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
L
Linus Torvalds 已提交
1456
	sig->real_timer.function = it_real_fn;
1457
#endif
L
Linus Torvalds 已提交
1458 1459 1460 1461 1462

	task_lock(current->group_leader);
	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
	task_unlock(current->group_leader);

1463 1464
	posix_cpu_timers_init_group(sig);

M
Miloslav Trmac 已提交
1465
	tty_audit_fork(sig);
1466
	sched_autogroup_fork(sig);
M
Miloslav Trmac 已提交
1467

D
David Rientjes 已提交
1468
	sig->oom_score_adj = current->signal->oom_score_adj;
1469
	sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1470

1471 1472
	mutex_init(&sig->cred_guard_mutex);

L
Linus Torvalds 已提交
1473 1474 1475
	return 0;
}

K
Kees Cook 已提交
1476 1477 1478 1479 1480 1481 1482 1483 1484
static void copy_seccomp(struct task_struct *p)
{
#ifdef CONFIG_SECCOMP
	/*
	 * Must be called with sighand->lock held, which is common to
	 * all threads in the group. Holding cred_guard_mutex is not
	 * needed because this new task is not yet running and cannot
	 * be racing exec.
	 */
1485
	assert_spin_locked(&current->sighand->siglock);
K
Kees Cook 已提交
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508

	/* Ref-count the new filter user, and assign it. */
	get_seccomp_filter(current);
	p->seccomp = current->seccomp;

	/*
	 * Explicitly enable no_new_privs here in case it got set
	 * between the task_struct being duplicated and holding the
	 * sighand lock. The seccomp state and nnp must be in sync.
	 */
	if (task_no_new_privs(current))
		task_set_no_new_privs(p);

	/*
	 * If the parent gained a seccomp mode after copying thread
	 * flags and between before we held the sighand lock, we have
	 * to manually enable the seccomp thread flag here.
	 */
	if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
		set_tsk_thread_flag(p, TIF_SECCOMP);
#endif
}

1509
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
L
Linus Torvalds 已提交
1510 1511 1512
{
	current->clear_child_tid = tidptr;

1513
	return task_pid_vnr(current);
L
Linus Torvalds 已提交
1514 1515
}

A
Alexey Dobriyan 已提交
1516
static void rt_mutex_init_task(struct task_struct *p)
I
Ingo Molnar 已提交
1517
{
1518
	raw_spin_lock_init(&p->pi_lock);
1519
#ifdef CONFIG_RT_MUTEXES
1520
	p->pi_waiters = RB_ROOT_CACHED;
1521
	p->pi_top_task = NULL;
I
Ingo Molnar 已提交
1522 1523 1524 1525
	p->pi_blocked_on = NULL;
#endif
}

1526
#ifdef CONFIG_POSIX_TIMERS
1527 1528 1529 1530 1531
/*
 * Initialize POSIX timer handling for a single task.
 */
static void posix_cpu_timers_init(struct task_struct *tsk)
{
1532 1533
	tsk->cputime_expires.prof_exp = 0;
	tsk->cputime_expires.virt_exp = 0;
1534 1535 1536 1537 1538
	tsk->cputime_expires.sched_exp = 0;
	INIT_LIST_HEAD(&tsk->cpu_timers[0]);
	INIT_LIST_HEAD(&tsk->cpu_timers[1]);
	INIT_LIST_HEAD(&tsk->cpu_timers[2]);
}
1539 1540 1541
#else
static inline void posix_cpu_timers_init(struct task_struct *tsk) { }
#endif
1542

1543 1544 1545 1546 1547 1548
static inline void
init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
{
	 task->pids[type].pid = pid;
}

1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
static inline void rcu_copy_process(struct task_struct *p)
{
#ifdef CONFIG_PREEMPT_RCU
	p->rcu_read_lock_nesting = 0;
	p->rcu_read_unlock_special.s = 0;
	p->rcu_blocked_node = NULL;
	INIT_LIST_HEAD(&p->rcu_node_entry);
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
	p->rcu_tasks_holdout = false;
	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
	p->rcu_tasks_idle_cpu = -1;
#endif /* #ifdef CONFIG_TASKS_RCU */
}

L
Linus Torvalds 已提交
1564 1565 1566 1567 1568 1569 1570 1571
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
1572 1573
static __latent_entropy struct task_struct *copy_process(
					unsigned long clone_flags,
1574 1575 1576
					unsigned long stack_start,
					unsigned long stack_size,
					int __user *child_tidptr,
R
Roland McGrath 已提交
1577
					struct pid *pid,
1578
					int trace,
1579 1580
					unsigned long tls,
					int node)
L
Linus Torvalds 已提交
1581 1582
{
	int retval;
1583
	struct task_struct *p;
L
Linus Torvalds 已提交
1584

1585 1586 1587 1588
	/*
	 * Don't allow sharing the root directory with processes in a different
	 * namespace
	 */
L
Linus Torvalds 已提交
1589 1590 1591
	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

1592 1593 1594
	if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
		return ERR_PTR(-EINVAL);

L
Linus Torvalds 已提交
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609
	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

1620
	/*
1621
	 * If the new process will be in a different pid or user namespace
1622
	 * do not allow it to share a thread group with the forking task.
1623
	 */
1624
	if (clone_flags & CLONE_THREAD) {
1625 1626 1627 1628 1629
		if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
		    (task_active_pid_ns(current) !=
				current->nsproxy->pid_ns_for_children))
			return ERR_PTR(-EINVAL);
	}
1630

L
Linus Torvalds 已提交
1631
	retval = -ENOMEM;
1632
	p = dup_task_struct(current, node);
L
Linus Torvalds 已提交
1633 1634 1635
	if (!p)
		goto fork_out;

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
	/*
	 * This _must_ happen before we call free_task(), i.e. before we jump
	 * to any of the bad_fork_* labels. This is to avoid freeing
	 * p->set_child_tid which is (ab)used as a kthread's data pointer for
	 * kernel threads (PF_KTHREAD).
	 */
	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;

1648 1649
	ftrace_graph_init_task(p);

1650 1651
	rt_mutex_init_task(p);

I
Ingo Molnar 已提交
1652
#ifdef CONFIG_PROVE_LOCKING
1653 1654 1655
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
L
Linus Torvalds 已提交
1656
	retval = -EAGAIN;
1657
	if (atomic_read(&p->real_cred->user->processes) >=
1658
			task_rlimit(p, RLIMIT_NPROC)) {
1659 1660
		if (p->real_cred->user != INIT_USER &&
		    !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
L
Linus Torvalds 已提交
1661 1662
			goto bad_fork_free;
	}
1663
	current->flags &= ~PF_NPROC_EXCEEDED;
L
Linus Torvalds 已提交
1664

1665 1666 1667
	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;
L
Linus Torvalds 已提交
1668 1669 1670 1671 1672 1673

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
1674
	retval = -EAGAIN;
L
Linus Torvalds 已提交
1675 1676 1677
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

1678
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
1679
	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
1680
	p->flags |= PF_FORKNOEXEC;
L
Linus Torvalds 已提交
1681 1682
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
1683
	rcu_copy_process(p);
L
Linus Torvalds 已提交
1684 1685 1686 1687 1688
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

1689
	p->utime = p->stime = p->gtime = 0;
1690
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1691
	p->utimescaled = p->stimescaled = 0;
1692
#endif
1693 1694
	prev_cputime_init(&p->prev_cputime);

1695
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1696 1697 1698
	seqcount_init(&p->vtime.seqcount);
	p->vtime.starttime = 0;
	p->vtime.state = VTIME_INACTIVE;
1699 1700
#endif

1701 1702 1703
#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif
1704

1705 1706
	p->default_timer_slack_ns = current->timer_slack_ns;

1707
	task_io_accounting_init(&p->ioac);
L
Linus Torvalds 已提交
1708 1709
	acct_clear_integrals(p);

1710
	posix_cpu_timers_init(p);
L
Linus Torvalds 已提交
1711

1712
	p->start_time = ktime_get_ns();
1713
	p->real_start_time = ktime_get_boot_ns();
L
Linus Torvalds 已提交
1714 1715
	p->io_context = NULL;
	p->audit_context = NULL;
1716
	cgroup_fork(p);
L
Linus Torvalds 已提交
1717
#ifdef CONFIG_NUMA
1718
	p->mempolicy = mpol_dup(p->mempolicy);
1719 1720 1721
	if (IS_ERR(p->mempolicy)) {
		retval = PTR_ERR(p->mempolicy);
		p->mempolicy = NULL;
1722
		goto bad_fork_cleanup_threadgroup_lock;
1723
	}
L
Linus Torvalds 已提交
1724
#endif
1725 1726 1727
#ifdef CONFIG_CPUSETS
	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1728
	seqcount_init(&p->mems_allowed_seq);
1729
#endif
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
	p->hardirqs_enabled = 0;
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
1745 1746 1747

	p->pagefault_disabled = 0;

I
Ingo Molnar 已提交
1748 1749 1750 1751
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
1752
	lockdep_init_task(p);
I
Ingo Molnar 已提交
1753
#endif
L
Linus Torvalds 已提交
1754

1755 1756 1757
#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif
K
Kent Overstreet 已提交
1758 1759 1760 1761
#ifdef CONFIG_BCACHE
	p->sequential_io	= 0;
	p->sequential_io_avg	= 0;
#endif
1762

1763
	/* Perform scheduler related setup. Assign this task to a CPU. */
1764 1765 1766
	retval = sched_fork(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_policy;
1767

1768
	retval = perf_event_init_task(p);
1769 1770
	if (retval)
		goto bad_fork_cleanup_policy;
1771 1772
	retval = audit_alloc(p);
	if (retval)
P
Peter Zijlstra 已提交
1773
		goto bad_fork_cleanup_perf;
L
Linus Torvalds 已提交
1774
	/* copy all the process information */
1775
	shm_init_task(p);
1776
	retval = security_task_alloc(p, clone_flags);
1777
	if (retval)
L
Linus Torvalds 已提交
1778
		goto bad_fork_cleanup_audit;
1779 1780 1781
	retval = copy_semundo(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_security;
1782 1783
	retval = copy_files(clone_flags, p);
	if (retval)
L
Linus Torvalds 已提交
1784
		goto bad_fork_cleanup_semundo;
1785 1786
	retval = copy_fs(clone_flags, p);
	if (retval)
L
Linus Torvalds 已提交
1787
		goto bad_fork_cleanup_files;
1788 1789
	retval = copy_sighand(clone_flags, p);
	if (retval)
L
Linus Torvalds 已提交
1790
		goto bad_fork_cleanup_fs;
1791 1792
	retval = copy_signal(clone_flags, p);
	if (retval)
L
Linus Torvalds 已提交
1793
		goto bad_fork_cleanup_sighand;
1794 1795
	retval = copy_mm(clone_flags, p);
	if (retval)
L
Linus Torvalds 已提交
1796
		goto bad_fork_cleanup_signal;
1797 1798
	retval = copy_namespaces(clone_flags, p);
	if (retval)
D
David Howells 已提交
1799
		goto bad_fork_cleanup_mm;
1800 1801
	retval = copy_io(clone_flags, p);
	if (retval)
1802
		goto bad_fork_cleanup_namespaces;
1803
	retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
L
Linus Torvalds 已提交
1804
	if (retval)
1805
		goto bad_fork_cleanup_io;
L
Linus Torvalds 已提交
1806

1807
	if (pid != &init_struct_pid) {
1808
		pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1809 1810
		if (IS_ERR(pid)) {
			retval = PTR_ERR(pid);
1811
			goto bad_fork_cleanup_thread;
1812
		}
1813 1814
	}

1815 1816 1817
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif
1818
#ifdef CONFIG_FUTEX
1819 1820 1821 1822
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
1823 1824
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
1825
#endif
1826 1827 1828 1829
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1830
		sas_ss_reset(p);
1831

L
Linus Torvalds 已提交
1832
	/*
1833 1834
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
L
Linus Torvalds 已提交
1835
	 */
1836
	user_disable_single_step(p);
L
Linus Torvalds 已提交
1837
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1838 1839 1840
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
A
Arjan van de Ven 已提交
1841
	clear_all_latency_tracing(p);
L
Linus Torvalds 已提交
1842 1843

	/* ok, now we should be set up.. */
1844 1845
	p->pid = pid_nr(pid);
	if (clone_flags & CLONE_THREAD) {
1846
		p->exit_signal = -1;
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
		p->group_leader = current->group_leader;
		p->tgid = current->tgid;
	} else {
		if (clone_flags & CLONE_PARENT)
			p->exit_signal = current->group_leader->exit_signal;
		else
			p->exit_signal = (clone_flags & CSIGNAL);
		p->group_leader = p;
		p->tgid = p->pid;
	}
1857

1858 1859
	p->nr_dirtied = 0;
	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1860
	p->dirty_paused_when = 0;
1861

1862
	p->pdeath_signal = 0;
O
Oleg Nesterov 已提交
1863
	INIT_LIST_HEAD(&p->thread_group);
A
Al Viro 已提交
1864
	p->task_works = NULL;
L
Linus Torvalds 已提交
1865

1866
	cgroup_threadgroup_change_begin(current);
1867 1868 1869 1870 1871 1872
	/*
	 * Ensure that the cgroup subsystem policies allow the new process to be
	 * forked. It should be noted the the new process's css_set can be changed
	 * between here and cgroup_post_fork() if an organisation operation is in
	 * progress.
	 */
1873
	retval = cgroup_can_fork(p);
1874 1875 1876
	if (retval)
		goto bad_fork_free_pid;

1877 1878 1879 1880
	/*
	 * Make it visible to the rest of the system, but dont wake it up yet.
	 * Need tasklist lock for parent etc handling!
	 */
L
Linus Torvalds 已提交
1881 1882 1883
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
1884
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
L
Linus Torvalds 已提交
1885
		p->real_parent = current->real_parent;
1886 1887
		p->parent_exec_id = current->parent_exec_id;
	} else {
L
Linus Torvalds 已提交
1888
		p->real_parent = current;
1889 1890
		p->parent_exec_id = current->self_exec_id;
	}
L
Linus Torvalds 已提交
1891

1892 1893
	klp_copy_process(p);

1894
	spin_lock(&current->sighand->siglock);
1895

K
Kees Cook 已提交
1896 1897 1898 1899 1900 1901
	/*
	 * Copy seccomp details explicitly here, in case they were changed
	 * before holding sighand lock.
	 */
	copy_seccomp(p);

1902 1903 1904 1905 1906 1907 1908
	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
1909
	*/
D
Daniel Walker 已提交
1910
	recalc_sigpending();
1911 1912
	if (signal_pending(current)) {
		retval = -ERESTARTNOINTR;
1913
		goto bad_fork_cancel_cgroup;
1914
	}
G
Gargi Sharma 已提交
1915
	if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
1916 1917 1918
		retval = -ENOMEM;
		goto bad_fork_cancel_cgroup;
	}
1919

1920
	if (likely(p->pid)) {
T
Tejun Heo 已提交
1921
		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1922

1923
		init_task_pid(p, PIDTYPE_PID, pid);
1924
		if (thread_group_leader(p)) {
1925 1926 1927
			init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
			init_task_pid(p, PIDTYPE_SID, task_session(current));

1928
			if (is_child_reaper(pid)) {
1929
				ns_of_pid(pid)->child_reaper = p;
1930 1931
				p->signal->flags |= SIGNAL_UNKILLABLE;
			}
1932

1933
			p->signal->leader_pid = pid;
A
Alan Cox 已提交
1934
			p->signal->tty = tty_kref_get(current->signal->tty);
1935 1936 1937 1938 1939 1940 1941
			/*
			 * Inherit has_child_subreaper flag under the same
			 * tasklist_lock with adding child to the process tree
			 * for propagate_has_child_subreaper optimization.
			 */
			p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
							 p->real_parent->signal->is_child_subreaper;
1942
			list_add_tail(&p->sibling, &p->real_parent->children);
1943
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
1944 1945
			attach_pid(p, PIDTYPE_PGID);
			attach_pid(p, PIDTYPE_SID);
1946
			__this_cpu_inc(process_counts);
1947 1948 1949 1950 1951 1952
		} else {
			current->signal->nr_threads++;
			atomic_inc(&current->signal->live);
			atomic_inc(&current->signal->sigcnt);
			list_add_tail_rcu(&p->thread_group,
					  &p->group_leader->thread_group);
1953 1954
			list_add_tail_rcu(&p->thread_node,
					  &p->signal->thread_head);
1955
		}
1956
		attach_pid(p, PIDTYPE_PID);
1957
		nr_threads++;
L
Linus Torvalds 已提交
1958 1959 1960
	}

	total_forks++;
1961
	spin_unlock(&current->sighand->siglock);
1962
	syscall_tracepoint_update(p);
L
Linus Torvalds 已提交
1963
	write_unlock_irq(&tasklist_lock);
1964

1965
	proc_fork_connector(p);
1966
	cgroup_post_fork(p);
1967
	cgroup_threadgroup_change_end(current);
1968
	perf_event_fork(p);
1969 1970

	trace_task_newtask(p, clone_flags);
1971
	uprobe_copy_process(p, clone_flags);
1972

L
Linus Torvalds 已提交
1973 1974
	return p;

1975
bad_fork_cancel_cgroup:
1976 1977
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
1978
	cgroup_cancel_fork(p);
1979
bad_fork_free_pid:
1980
	cgroup_threadgroup_change_end(current);
1981 1982
	if (pid != &init_struct_pid)
		free_pid(pid);
1983 1984
bad_fork_cleanup_thread:
	exit_thread(p);
1985
bad_fork_cleanup_io:
1986 1987
	if (p->io_context)
		exit_io_context(p);
S
Serge E. Hallyn 已提交
1988
bad_fork_cleanup_namespaces:
1989
	exit_task_namespaces(p);
L
Linus Torvalds 已提交
1990
bad_fork_cleanup_mm:
D
David Rientjes 已提交
1991
	if (p->mm)
L
Linus Torvalds 已提交
1992 1993
		mmput(p->mm);
bad_fork_cleanup_signal:
1994
	if (!(clone_flags & CLONE_THREAD))
1995
		free_signal_struct(p->signal);
L
Linus Torvalds 已提交
1996
bad_fork_cleanup_sighand:
1997
	__cleanup_sighand(p->sighand);
L
Linus Torvalds 已提交
1998 1999 2000 2001 2002 2003
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
2004 2005
bad_fork_cleanup_security:
	security_task_free(p);
L
Linus Torvalds 已提交
2006 2007
bad_fork_cleanup_audit:
	audit_free(p);
P
Peter Zijlstra 已提交
2008
bad_fork_cleanup_perf:
2009
	perf_event_free_task(p);
P
Peter Zijlstra 已提交
2010
bad_fork_cleanup_policy:
2011
	lockdep_free_task(p);
L
Linus Torvalds 已提交
2012
#ifdef CONFIG_NUMA
2013
	mpol_put(p->mempolicy);
2014
bad_fork_cleanup_threadgroup_lock:
L
Linus Torvalds 已提交
2015
#endif
2016
	delayacct_tsk_free(p);
L
Linus Torvalds 已提交
2017
bad_fork_cleanup_count:
D
David Howells 已提交
2018
	atomic_dec(&p->cred->user->processes);
2019
	exit_creds(p);
L
Linus Torvalds 已提交
2020
bad_fork_free:
2021
	p->state = TASK_DEAD;
2022
	put_task_stack(p);
L
Linus Torvalds 已提交
2023
	free_task(p);
2024 2025
fork_out:
	return ERR_PTR(retval);
L
Linus Torvalds 已提交
2026 2027
}

2028 2029 2030 2031 2032 2033 2034 2035 2036 2037
static inline void init_idle_pids(struct pid_link *links)
{
	enum pid_type type;

	for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
		INIT_HLIST_NODE(&links[type].node); /* not really needed */
		links[type].pid = &init_struct_pid;
	}
}

2038
struct task_struct *fork_idle(int cpu)
L
Linus Torvalds 已提交
2039
{
2040
	struct task_struct *task;
2041 2042
	task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
			    cpu_to_node(cpu));
2043 2044
	if (!IS_ERR(task)) {
		init_idle_pids(task->pids);
2045
		init_idle(task, cpu);
2046
	}
2047

L
Linus Torvalds 已提交
2048 2049 2050 2051 2052 2053 2054 2055 2056
	return task;
}

/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
2057
long _do_fork(unsigned long clone_flags,
L
Linus Torvalds 已提交
2058 2059 2060
	      unsigned long stack_start,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
2061 2062
	      int __user *child_tidptr,
	      unsigned long tls)
L
Linus Torvalds 已提交
2063
{
2064 2065
	struct completion vfork;
	struct pid *pid;
L
Linus Torvalds 已提交
2066 2067
	struct task_struct *p;
	int trace = 0;
2068
	long nr;
L
Linus Torvalds 已提交
2069

R
Roland McGrath 已提交
2070
	/*
T
Tejun Heo 已提交
2071 2072 2073 2074
	 * Determine whether and which event to report to ptracer.  When
	 * called from kernel_thread or CLONE_UNTRACED is explicitly
	 * requested, no event is reported; otherwise, report if the event
	 * for the type of forking is enabled.
R
Roland McGrath 已提交
2075
	 */
2076
	if (!(clone_flags & CLONE_UNTRACED)) {
T
Tejun Heo 已提交
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
		if (clone_flags & CLONE_VFORK)
			trace = PTRACE_EVENT_VFORK;
		else if ((clone_flags & CSIGNAL) != SIGCHLD)
			trace = PTRACE_EVENT_CLONE;
		else
			trace = PTRACE_EVENT_FORK;

		if (likely(!ptrace_event_enabled(current, trace)))
			trace = 0;
	}
L
Linus Torvalds 已提交
2087

A
Al Viro 已提交
2088
	p = copy_process(clone_flags, stack_start, stack_size,
2089
			 child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
2090
	add_latent_entropy();
2091 2092 2093 2094

	if (IS_ERR(p))
		return PTR_ERR(p);

L
Linus Torvalds 已提交
2095 2096 2097 2098
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
2099
	trace_sched_process_fork(current, p);
2100

2101 2102
	pid = get_task_pid(p, PIDTYPE_PID);
	nr = pid_vnr(pid);
2103

2104 2105
	if (clone_flags & CLONE_PARENT_SETTID)
		put_user(nr, parent_tidptr);
2106

2107 2108 2109 2110 2111
	if (clone_flags & CLONE_VFORK) {
		p->vfork_done = &vfork;
		init_completion(&vfork);
		get_task_struct(p);
	}
L
Linus Torvalds 已提交
2112

2113
	wake_up_new_task(p);
R
Roland McGrath 已提交
2114

2115 2116 2117
	/* forking complete and child started to run, tell ptracer */
	if (unlikely(trace))
		ptrace_event_pid(trace, pid);
2118

2119 2120 2121
	if (clone_flags & CLONE_VFORK) {
		if (!wait_for_vfork_done(p, &vfork))
			ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
L
Linus Torvalds 已提交
2122
	}
2123 2124

	put_pid(pid);
2125
	return nr;
L
Linus Torvalds 已提交
2126 2127
}

2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
#ifndef CONFIG_HAVE_COPY_THREAD_TLS
/* For compatibility with architectures that call do_fork directly rather than
 * using the syscall entry points below. */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	return _do_fork(clone_flags, stack_start, stack_size,
			parent_tidptr, child_tidptr, 0);
}
#endif

2142 2143 2144 2145 2146
/*
 * Create a kernel thread.
 */
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
2147 2148
	return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
		(unsigned long)arg, NULL, NULL, 0);
2149 2150
}

A
Al Viro 已提交
2151 2152 2153 2154
#ifdef __ARCH_WANT_SYS_FORK
SYSCALL_DEFINE0(fork)
{
#ifdef CONFIG_MMU
2155
	return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0);
A
Al Viro 已提交
2156 2157
#else
	/* can not support in nommu mode */
2158
	return -EINVAL;
A
Al Viro 已提交
2159 2160 2161 2162 2163 2164 2165
#endif
}
#endif

#ifdef __ARCH_WANT_SYS_VFORK
SYSCALL_DEFINE0(vfork)
{
2166 2167
	return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
			0, NULL, NULL, 0);
A
Al Viro 已提交
2168 2169 2170 2171 2172 2173 2174
}
#endif

#ifdef __ARCH_WANT_SYS_CLONE
#ifdef CONFIG_CLONE_BACKWARDS
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
		 int __user *, parent_tidptr,
2175
		 unsigned long, tls,
A
Al Viro 已提交
2176 2177 2178 2179 2180
		 int __user *, child_tidptr)
#elif defined(CONFIG_CLONE_BACKWARDS2)
SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
		 int __user *, parent_tidptr,
		 int __user *, child_tidptr,
2181
		 unsigned long, tls)
M
Michal Simek 已提交
2182 2183 2184 2185 2186
#elif defined(CONFIG_CLONE_BACKWARDS3)
SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
		int, stack_size,
		int __user *, parent_tidptr,
		int __user *, child_tidptr,
2187
		unsigned long, tls)
A
Al Viro 已提交
2188 2189 2190 2191
#else
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
		 int __user *, parent_tidptr,
		 int __user *, child_tidptr,
2192
		 unsigned long, tls)
A
Al Viro 已提交
2193 2194
#endif
{
2195
	return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls);
A
Al Viro 已提交
2196 2197 2198
}
#endif

2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
{
	struct task_struct *leader, *parent, *child;
	int res;

	read_lock(&tasklist_lock);
	leader = top = top->group_leader;
down:
	for_each_thread(leader, parent) {
		list_for_each_entry(child, &parent->children, sibling) {
			res = visitor(child, data);
			if (res) {
				if (res < 0)
					goto out;
				leader = child;
				goto down;
			}
up:
			;
		}
	}

	if (leader != top) {
		child = leader;
		parent = child->real_parent;
		leader = parent->group_leader;
		goto up;
	}
out:
	read_unlock(&tasklist_lock);
}

2231 2232 2233 2234
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif

2235
static void sighand_ctor(void *data)
2236 2237 2238
{
	struct sighand_struct *sighand = data;

C
Christoph Lameter 已提交
2239
	spin_lock_init(&sighand->siglock);
D
Davide Libenzi 已提交
2240
	init_waitqueue_head(&sighand->signalfd_wqh);
2241 2242
}

L
Linus Torvalds 已提交
2243 2244 2245 2246
void __init proc_caches_init(void)
{
	sighand_cachep = kmem_cache_create("sighand_cache",
			sizeof(struct sighand_struct), 0,
2247
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
2248
			SLAB_ACCOUNT, sighand_ctor);
L
Linus Torvalds 已提交
2249 2250
	signal_cachep = kmem_cache_create("signal_cache",
			sizeof(struct signal_struct), 0,
2251
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2252
			NULL);
2253
	files_cachep = kmem_cache_create("files_cache",
L
Linus Torvalds 已提交
2254
			sizeof(struct files_struct), 0,
2255
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2256
			NULL);
2257
	fs_cachep = kmem_cache_create("fs_cache",
L
Linus Torvalds 已提交
2258
			sizeof(struct fs_struct), 0,
2259
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2260
			NULL);
2261 2262 2263 2264 2265 2266 2267
	/*
	 * FIXME! The "sizeof(struct mm_struct)" currently includes the
	 * whole struct cpumask for the OFFSTACK case. We could change
	 * this to *only* allocate as much of it as required by the
	 * maximum number of CPU's we can ever have.  The cpumask_allocation
	 * is at the end of the structure, exactly for that reason.
	 */
2268
	mm_cachep = kmem_cache_create_usercopy("mm_struct",
2269
			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
2270
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2271 2272
			offsetof(struct mm_struct, saved_auxv),
			sizeof_field(struct mm_struct, saved_auxv),
2273 2274
			NULL);
	vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
2275
	mmap_init();
2276
	nsproxy_cache_init();
L
Linus Torvalds 已提交
2277
}
2278 2279

/*
2280
 * Check constraints on flags passed to the unshare system call.
2281
 */
2282
static int check_unshare_flags(unsigned long unshare_flags)
2283
{
2284 2285
	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
2286
				CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
2287
				CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP))
2288
		return -EINVAL;
2289
	/*
2290 2291 2292 2293
	 * Not implemented, but pretend it works if there is nothing
	 * to unshare.  Note that unsharing the address space or the
	 * signal handlers also need to unshare the signal queues (aka
	 * CLONE_THREAD).
2294
	 */
2295
	if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
2296 2297 2298 2299 2300 2301 2302 2303 2304
		if (!thread_group_empty(current))
			return -EINVAL;
	}
	if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
		if (atomic_read(&current->sighand->count) > 1)
			return -EINVAL;
	}
	if (unshare_flags & CLONE_VM) {
		if (!current_is_single_threaded())
2305 2306
			return -EINVAL;
	}
2307 2308 2309 2310 2311

	return 0;
}

/*
2312
 * Unshare the filesystem structure if it is being shared
2313 2314 2315 2316 2317
 */
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
	struct fs_struct *fs = current->fs;

A
Al Viro 已提交
2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
	if (!(unshare_flags & CLONE_FS) || !fs)
		return 0;

	/* don't need lock here; in the worst case we'll do useless copy */
	if (fs->users == 1)
		return 0;

	*new_fsp = copy_fs_struct(fs);
	if (!*new_fsp)
		return -ENOMEM;
2328 2329 2330 2331 2332

	return 0;
}

/*
2333
 * Unshare file descriptor table if it is being shared
2334 2335 2336 2337
 */
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
{
	struct files_struct *fd = current->files;
2338
	int error = 0;
2339 2340

	if ((unshare_flags & CLONE_FILES) &&
2341 2342 2343 2344 2345
	    (fd && atomic_read(&fd->count) > 1)) {
		*new_fdp = dup_fd(fd, &error);
		if (!*new_fdp)
			return error;
	}
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357

	return 0;
}

/*
 * unshare allows a process to 'unshare' part of the process
 * context which was originally shared using clone.  copy_*
 * functions used by do_fork() cannot be used here directly
 * because they modify an inactive task_struct that is being
 * constructed. Here we are modifying the current, active,
 * task_struct.
 */
2358
int ksys_unshare(unsigned long unshare_flags)
2359 2360 2361
{
	struct fs_struct *fs, *new_fs = NULL;
	struct files_struct *fd, *new_fd = NULL;
2362
	struct cred *new_cred = NULL;
2363
	struct nsproxy *new_nsproxy = NULL;
2364
	int do_sysvsem = 0;
2365
	int err;
2366

2367
	/*
2368 2369
	 * If unsharing a user namespace must also unshare the thread group
	 * and unshare the filesystem root and working directories.
2370 2371
	 */
	if (unshare_flags & CLONE_NEWUSER)
2372
		unshare_flags |= CLONE_THREAD | CLONE_FS;
2373 2374 2375 2376 2377
	/*
	 * If unsharing vm, must also unshare signal handlers.
	 */
	if (unshare_flags & CLONE_VM)
		unshare_flags |= CLONE_SIGHAND;
2378 2379 2380 2381 2382
	/*
	 * If unsharing a signal handlers, must also unshare the signal queues.
	 */
	if (unshare_flags & CLONE_SIGHAND)
		unshare_flags |= CLONE_THREAD;
2383 2384 2385 2386 2387
	/*
	 * If unsharing namespace, must also unshare filesystem information.
	 */
	if (unshare_flags & CLONE_NEWNS)
		unshare_flags |= CLONE_FS;
2388 2389 2390 2391

	err = check_unshare_flags(unshare_flags);
	if (err)
		goto bad_unshare_out;
2392 2393 2394 2395 2396 2397
	/*
	 * CLONE_NEWIPC must also detach from the undolist: after switching
	 * to a new ipc namespace, the semaphore arrays from the old
	 * namespace are unreachable.
	 */
	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
2398
		do_sysvsem = 1;
2399 2400
	err = unshare_fs(unshare_flags, &new_fs);
	if (err)
2401
		goto bad_unshare_out;
2402 2403
	err = unshare_fd(unshare_flags, &new_fd);
	if (err)
2404
		goto bad_unshare_cleanup_fs;
2405
	err = unshare_userns(unshare_flags, &new_cred);
2406
	if (err)
2407
		goto bad_unshare_cleanup_fd;
2408 2409 2410 2411
	err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
					 new_cred, new_fs);
	if (err)
		goto bad_unshare_cleanup_cred;
2412

2413
	if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
2414 2415 2416 2417 2418 2419
		if (do_sysvsem) {
			/*
			 * CLONE_SYSVSEM is equivalent to sys_exit().
			 */
			exit_sem(current);
		}
2420 2421 2422 2423 2424
		if (unshare_flags & CLONE_NEWIPC) {
			/* Orphan segments in old ns (see sem above). */
			exit_shm(current);
			shm_init_task(current);
		}
S
Serge E. Hallyn 已提交
2425

A
Alan Cox 已提交
2426
		if (new_nsproxy)
2427
			switch_task_namespaces(current, new_nsproxy);
2428

2429 2430
		task_lock(current);

2431 2432
		if (new_fs) {
			fs = current->fs;
N
Nick Piggin 已提交
2433
			spin_lock(&fs->lock);
2434
			current->fs = new_fs;
A
Al Viro 已提交
2435 2436 2437 2438
			if (--fs->users)
				new_fs = NULL;
			else
				new_fs = fs;
N
Nick Piggin 已提交
2439
			spin_unlock(&fs->lock);
2440 2441 2442 2443 2444 2445 2446 2447 2448
		}

		if (new_fd) {
			fd = current->files;
			current->files = new_fd;
			new_fd = fd;
		}

		task_unlock(current);
2449 2450 2451 2452 2453 2454

		if (new_cred) {
			/* Install the new user namespace */
			commit_creds(new_cred);
			new_cred = NULL;
		}
2455 2456
	}

2457 2458
	perf_event_namespaces(current);

2459 2460 2461
bad_unshare_cleanup_cred:
	if (new_cred)
		put_cred(new_cred);
2462 2463 2464 2465 2466 2467
bad_unshare_cleanup_fd:
	if (new_fd)
		put_files_struct(new_fd);

bad_unshare_cleanup_fs:
	if (new_fs)
A
Al Viro 已提交
2468
		free_fs_struct(new_fs);
2469 2470 2471 2472

bad_unshare_out:
	return err;
}
2473

2474 2475 2476 2477 2478
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
	return ksys_unshare(unshare_flags);
}

2479 2480 2481 2482 2483 2484 2485 2486 2487
/*
 *	Helper to unshare the files of the current task.
 *	We don't want to expose copy_files internals to
 *	the exec layer of the kernel.
 */

int unshare_files(struct files_struct **displaced)
{
	struct task_struct *task = current;
2488
	struct files_struct *copy = NULL;
2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501
	int error;

	error = unshare_fd(CLONE_FILES, &copy);
	if (error || !copy) {
		*displaced = NULL;
		return error;
	}
	*displaced = task->files;
	task_lock(task);
	task->files = copy;
	task_unlock(task);
	return 0;
}
2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524

int sysctl_max_threads(struct ctl_table *table, int write,
		       void __user *buffer, size_t *lenp, loff_t *ppos)
{
	struct ctl_table t;
	int ret;
	int threads = max_threads;
	int min = MIN_THREADS;
	int max = MAX_THREADS;

	t = *table;
	t.data = &threads;
	t.extra1 = &min;
	t.extra2 = &max;

	ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
	if (ret || !write)
		return ret;

	set_max_threads(threads);

	return 0;
}