sched.h 78.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4
#include <uapi/linux/sched.h>
5 6 7 8 9 10


struct sched_param {
	int sched_priority;
};

L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23
#include <asm/param.h>	/* for HZ */

#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
24
#include <linux/mm_types.h>
25
#include <linux/preempt_mask.h>
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38

#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>

#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
P
Peter Zijlstra 已提交
39
#include <linux/proportions.h>
L
Linus Torvalds 已提交
40
#include <linux/seccomp.h>
I
Ingo Molnar 已提交
41
#include <linux/rcupdate.h>
42
#include <linux/rculist.h>
I
Ingo Molnar 已提交
43
#include <linux/rtmutex.h>
L
Linus Torvalds 已提交
44

45 46 47 48 49
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
50
#include <linux/task_io_accounting.h>
A
Arjan van de Ven 已提交
51
#include <linux/latencytop.h>
52
#include <linux/cred.h>
P
Peter Zijlstra 已提交
53
#include <linux/llist.h>
54
#include <linux/uidgid.h>
55
#include <linux/gfp.h>
56 57

#include <asm/processor.h>
H
H. J. Lu 已提交
58

L
Linus Torvalds 已提交
59
struct exec_domain;
60
struct futex_pi_state;
61
struct robust_list_head;
62
struct bio_list;
63
struct fs_struct;
64
struct perf_event_context;
65
struct blk_plug;
L
Linus Torvalds 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83

/*
 * List of flags we want to share for kernel threads,
 * if only because they are not used by them anyway.
 */
#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)

/*
 * These are the constant used to fake the fixed-point load-average
 * counting. Some notes:
 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 *    a load-average precision of 10 bits integer + 11 bits fractional
 *  - if you want to count load-averages more often, you need more
 *    precision, or rounding will get you. With 2-second counting freq,
 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 *    11 bit fractions.
 */
extern unsigned long avenrun[];		/* Load averages */
84
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
L
Linus Torvalds 已提交
85 86 87

#define FSHIFT		11		/* nr of bits of precision */
#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
88
#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
L
Linus Torvalds 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
#define EXP_5		2014		/* 1/exp(5sec/5min) */
#define EXP_15		2037		/* 1/exp(5sec/15min) */

#define CALC_LOAD(load,exp,n) \
	load *= exp; \
	load += n*(FIXED_1-exp); \
	load >>= FSHIFT;

extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_iowait(void);
104
extern unsigned long nr_iowait_cpu(int cpu);
105 106 107
extern unsigned long this_cpu_load(void);


108
extern void calc_global_load(unsigned long ticks);
109
extern void update_cpu_load_nohz(void);
L
Linus Torvalds 已提交
110

111 112
extern unsigned long get_parent_ip(unsigned long addr);

113 114
extern void dump_cpu_task(int cpu);

I
Ingo Molnar 已提交
115 116
struct seq_file;
struct cfs_rq;
117
struct task_group;
I
Ingo Molnar 已提交
118 119 120 121
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
122
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
I
Ingo Molnar 已提交
123
#endif
L
Linus Torvalds 已提交
124

125 126 127 128 129 130 131 132 133 134
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
L
Linus Torvalds 已提交
135 136 137
#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
M
Matthew Wilcox 已提交
138 139
#define __TASK_STOPPED		4
#define __TASK_TRACED		8
140 141 142 143
/* in tsk->exit_state */
#define EXIT_ZOMBIE		16
#define EXIT_DEAD		32
/* in tsk->state again */
144
#define TASK_DEAD		64
M
Matthew Wilcox 已提交
145
#define TASK_WAKEKILL		128
P
Peter Zijlstra 已提交
146
#define TASK_WAKING		256
147 148
#define TASK_PARKED		512
#define TASK_STATE_MAX		1024
M
Matthew Wilcox 已提交
149

150
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
151

152 153
extern char ___assert_task_state[1 - 2*!!(
		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
M
Matthew Wilcox 已提交
154 155 156 157 158

/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
L
Linus Torvalds 已提交
159

160 161
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
M
Matthew Wilcox 已提交
162
#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
163 164 165

/* get_task_state() */
#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
M
Matthew Wilcox 已提交
166 167
				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
				 __TASK_TRACED)
168

M
Matthew Wilcox 已提交
169 170
#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
171
#define task_is_stopped_or_traced(task)	\
M
Matthew Wilcox 已提交
172
			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
173
#define task_contributes_to_load(task)	\
174
				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
T
Tejun Heo 已提交
175
				 (task->flags & PF_FROZEN) == 0)
L
Linus Torvalds 已提交
176 177 178 179 180 181

#define __set_task_state(tsk, state_value)		\
	do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value)		\
	set_mb((tsk)->state, (state_value))

182 183 184 185 186 187 188 189 190 191 192
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
 *	set_current_state(TASK_UNINTERRUPTIBLE);
 *	if (do_i_need_to_sleep())
 *		schedule();
 *
 * If the caller does not need such serialisation then use __set_current_state()
 */
L
Linus Torvalds 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
#define __set_current_state(state_value)			\
	do { current->state = (state_value); } while (0)
#define set_current_state(state_value)		\
	set_mb(current->state, (state_value))

/* Task command name length */
#define TASK_COMM_LEN 16

#include <linux/spinlock.h>

/*
 * This serializes "schedule()" and also protects
 * the run-queue from deletions/modifications (but
 * _adding_ to the beginning of the run-queue has
 * a separate lock).
 */
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;

212
struct task_struct;
L
Linus Torvalds 已提交
213

214 215 216 217
#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
#endif /* #ifdef CONFIG_PROVE_RCU */

L
Linus Torvalds 已提交
218 219
extern void sched_init(void);
extern void sched_init_smp(void);
220
extern asmlinkage void schedule_tail(struct task_struct *prev);
221
extern void init_idle(struct task_struct *idle, int cpu);
I
Ingo Molnar 已提交
222
extern void init_idle_bootup_task(struct task_struct *idle);
L
Linus Torvalds 已提交
223

224
extern int runqueue_is_locked(int cpu);
I
Ingo Molnar 已提交
225

226
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
227
extern void nohz_balance_enter_idle(int cpu);
228
extern void set_cpu_sd_state_idle(void);
229
extern int get_nohz_timer_target(void);
230
#else
231
static inline void nohz_balance_enter_idle(int cpu) { }
232
static inline void set_cpu_sd_state_idle(void) { }
233
#endif
L
Linus Torvalds 已提交
234

I
Ingo Molnar 已提交
235
/*
I
Ingo Molnar 已提交
236
 * Only dump TASK_* tasks. (0 for all tasks)
I
Ingo Molnar 已提交
237 238 239 240 241
 */
extern void show_state_filter(unsigned long state_filter);

static inline void show_state(void)
{
I
Ingo Molnar 已提交
242
	show_state_filter(0);
I
Ingo Molnar 已提交
243 244
}

L
Linus Torvalds 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
extern void show_regs(struct pt_regs *);

/*
 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 * task), SP is the stack pointer of the first frame that should be shown in the back
 * trace (or NULL if the entire call-chain of the task should be shown).
 */
extern void show_stack(struct task_struct *task, unsigned long *sp);

void io_schedule(void);
long io_schedule_timeout(long timeout);

extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);

262 263
extern void sched_show_task(struct task_struct *p);

264
#ifdef CONFIG_LOCKUP_DETECTOR
I
Ingo Molnar 已提交
265
extern void touch_softlockup_watchdog(void);
266
extern void touch_softlockup_watchdog_sync(void);
267
extern void touch_all_softlockup_watchdogs(void);
268 269 270
extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
				  void __user *buffer,
				  size_t *lenp, loff_t *ppos);
I
Ingo Molnar 已提交
271
extern unsigned int  softlockup_panic;
272
void lockup_detector_init(void);
I
Ingo Molnar 已提交
273 274 275 276
#else
static inline void touch_softlockup_watchdog(void)
{
}
277 278 279
static inline void touch_softlockup_watchdog_sync(void)
{
}
280 281 282
static inline void touch_all_softlockup_watchdogs(void)
{
}
283 284 285
static inline void lockup_detector_init(void)
{
}
I
Ingo Molnar 已提交
286 287
#endif

288 289 290 291 292 293 294 295
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
#else
static inline void reset_hung_task_detector(void)
{
}
#endif

L
Linus Torvalds 已提交
296 297
/* Attach to any functions which should be ignored in wchan output. */
#define __sched		__attribute__((__section__(".sched.text")))
298 299 300 301

/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];

L
Linus Torvalds 已提交
302 303 304 305
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);

#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
306
extern signed long schedule_timeout(signed long timeout);
307
extern signed long schedule_timeout_interruptible(signed long timeout);
M
Matthew Wilcox 已提交
308
extern signed long schedule_timeout_killable(signed long timeout);
309
extern signed long schedule_timeout_uninterruptible(signed long timeout);
L
Linus Torvalds 已提交
310
asmlinkage void schedule(void);
311
extern void schedule_preempt_disabled(void);
L
Linus Torvalds 已提交
312

S
Serge E. Hallyn 已提交
313
struct nsproxy;
314
struct user_namespace;
L
Linus Torvalds 已提交
315

316 317
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
L
Linus Torvalds 已提交
318 319 320 321 322 323 324
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
		       unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
			  unsigned long len, unsigned long pgoff,
			  unsigned long flags);
325 326 327
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
L
Linus Torvalds 已提交
328

329

330 331 332
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);

333 334 335 336
#define SUID_DUMP_DISABLE	0	/* No setuid dumping */
#define SUID_DUMP_USER		1	/* Dump as user of process */
#define SUID_DUMP_ROOT		2	/* Dump as root */

337
/* mm flags */
338
/* dumpable bits */
339 340
#define MMF_DUMPABLE      0  /* core dump is permitted */
#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
H
Hugh Dickins 已提交
341

342
#define MMF_DUMPABLE_BITS 2
H
Hugh Dickins 已提交
343
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
344 345 346 347 348 349

/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE	2
#define MMF_DUMP_ANON_SHARED	3
#define MMF_DUMP_MAPPED_PRIVATE	4
#define MMF_DUMP_MAPPED_SHARED	5
R
Roland McGrath 已提交
350
#define MMF_DUMP_ELF_HEADERS	6
351 352
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED  8
H
Hugh Dickins 已提交
353

354
#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
355
#define MMF_DUMP_FILTER_BITS	7
356 357 358
#define MMF_DUMP_FILTER_MASK \
	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
359
	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
360 361 362 363 364 365 366
	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)

#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF	0
#endif
H
Hugh Dickins 已提交
367 368
					/* leave room for more dump flags */
#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
A
Andrea Arcangeli 已提交
369
#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
370
#define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
H
Hugh Dickins 已提交
371

372 373
#define MMF_HAS_UPROBES		19	/* has uprobes */
#define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
374

H
Hugh Dickins 已提交
375
#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
376

L
Linus Torvalds 已提交
377 378 379 380
struct sighand_struct {
	atomic_t		count;
	struct k_sigaction	action[_NSIG];
	spinlock_t		siglock;
D
Davide Libenzi 已提交
381
	wait_queue_head_t	signalfd_wqh;
L
Linus Torvalds 已提交
382 383
};

384
struct pacct_struct {
385 386
	int			ac_flag;
	long			ac_exitcode;
387
	unsigned long		ac_mem;
388 389
	cputime_t		ac_utime, ac_stime;
	unsigned long		ac_minflt, ac_majflt;
390 391
};

392 393 394
struct cpu_itimer {
	cputime_t expires;
	cputime_t incr;
395 396
	u32 error;
	u32 incr_error;
397 398
};

399 400 401 402 403 404 405 406 407 408 409 410
/**
 * struct cputime - snaphsot of system and user cputime
 * @utime: time spent in user mode
 * @stime: time spent in system mode
 *
 * Gathers a generic snapshot of user and system time.
 */
struct cputime {
	cputime_t utime;
	cputime_t stime;
};

411 412 413 414 415
/**
 * struct task_cputime - collected CPU time counts
 * @utime:		time spent in user mode, in &cputime_t units
 * @stime:		time spent in kernel mode, in &cputime_t units
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
416
 *
417 418 419 420 421
 * This is an extension of struct cputime that includes the total runtime
 * spent by the task from the scheduler point of view.
 *
 * As a result, this structure groups together three kinds of CPU time
 * that are tracked for threads and thread groups.  Most things considering
422 423 424 425 426 427 428 429 430 431 432 433 434
 * CPU time want to group these counts together and treat all three
 * of them in parallel.
 */
struct task_cputime {
	cputime_t utime;
	cputime_t stime;
	unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp	stime
#define virt_exp	utime
#define sched_exp	sum_exec_runtime

435 436
#define INIT_CPUTIME	\
	(struct task_cputime) {					\
437 438
		.utime = 0,					\
		.stime = 0,					\
439 440 441
		.sum_exec_runtime = 0,				\
	}

442 443 444 445 446 447
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
#else
#define PREEMPT_DISABLED	PREEMPT_ENABLED
#endif

P
Peter Zijlstra 已提交
448 449 450
/*
 * Disable preemption until the scheduler is running.
 * Reset by start_kernel()->sched_init()->init_idle().
P
Peter Zijlstra 已提交
451 452 453
 *
 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
 * before the scheduler is active -- see should_resched().
P
Peter Zijlstra 已提交
454
 */
455
#define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
P
Peter Zijlstra 已提交
456

457
/**
458 459 460 461 462
 * struct thread_group_cputimer - thread group interval timer counts
 * @cputime:		thread group interval timers.
 * @running:		non-zero when there are timers running and
 * 			@cputime receives updates.
 * @lock:		lock for fields in this struct.
463 464
 *
 * This structure contains the version of task_cputime, above, that is
465
 * used for thread group CPU timer calculations.
466
 */
467 468 469
struct thread_group_cputimer {
	struct task_cputime cputime;
	int running;
470
	raw_spinlock_t lock;
471 472
};

473
#include <linux/rwsem.h>
474 475
struct autogroup;

L
Linus Torvalds 已提交
476
/*
477
 * NOTE! "signal_struct" does not have its own
L
Linus Torvalds 已提交
478 479 480 481 482 483
 * locking, because a shared signal_struct always
 * implies a shared sighand_struct, so locking
 * sighand_struct is always a proper superset of
 * the locking of signal_struct.
 */
struct signal_struct {
484
	atomic_t		sigcnt;
L
Linus Torvalds 已提交
485
	atomic_t		live;
486
	int			nr_threads;
L
Linus Torvalds 已提交
487 488 489 490

	wait_queue_head_t	wait_chldexit;	/* for wait4() */

	/* current thread group signal load-balancing target: */
491
	struct task_struct	*curr_target;
L
Linus Torvalds 已提交
492 493 494 495 496 497 498 499 500 501 502 503

	/* shared signal handling: */
	struct sigpending	shared_pending;

	/* thread group exit support */
	int			group_exit_code;
	/* overloaded:
	 * - notify group_exit_task when ->count is equal to notify_count
	 * - everyone except group_exit_task is stopped during signal delivery
	 *   of fatal signals, group_exit_task processes the signal.
	 */
	int			notify_count;
504
	struct task_struct	*group_exit_task;
L
Linus Torvalds 已提交
505 506 507 508 509

	/* thread group stop support, overloads group_exit_code too */
	int			group_stop_count;
	unsigned int		flags; /* see SIGNAL_* flags below */

510 511 512 513 514 515 516 517 518 519 520 521
	/*
	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
	 * manager, to re-parent orphan (double-forking) child processes
	 * to this process instead of 'init'. The service manager is
	 * able to receive SIGCHLD signals and is able to investigate
	 * the process until it calls wait(). All children of this
	 * process will inherit a flag if they should look for a
	 * child_subreaper process at exit.
	 */
	unsigned int		is_child_subreaper:1;
	unsigned int		has_child_subreaper:1;

L
Linus Torvalds 已提交
522
	/* POSIX.1b Interval Timers */
523 524
	int			posix_timer_id;
	struct list_head	posix_timers;
L
Linus Torvalds 已提交
525 526

	/* ITIMER_REAL timer for the process */
527
	struct hrtimer real_timer;
528
	struct pid *leader_pid;
529
	ktime_t it_real_incr;
L
Linus Torvalds 已提交
530

531 532 533 534 535 536
	/*
	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
	 * values are defined to 0 and 1 respectively
	 */
	struct cpu_itimer it[2];
L
Linus Torvalds 已提交
537

538
	/*
539 540
	 * Thread group totals for process CPU timers.
	 * See thread_group_cputimer(), et al, for details.
541
	 */
542
	struct thread_group_cputimer cputimer;
543 544 545 546 547 548

	/* Earliest-expiration cache. */
	struct task_cputime cputime_expires;

	struct list_head cpu_timers[3];

549
	struct pid *tty_old_pgrp;
550

L
Linus Torvalds 已提交
551 552 553 554 555
	/* boolean value for session group leader */
	int leader;

	struct tty_struct *tty; /* NULL if no tty */

556 557 558
#ifdef CONFIG_SCHED_AUTOGROUP
	struct autogroup *autogroup;
#endif
L
Linus Torvalds 已提交
559 560 561 562 563 564
	/*
	 * Cumulative resource counters for dead threads in the group,
	 * and for reaped dead child processes forked by this group.
	 * Live threads maintain their own counters and add to these
	 * in __exit_signal, except for the group leader.
	 */
565
	cputime_t utime, stime, cutime, cstime;
566 567
	cputime_t gtime;
	cputime_t cgtime;
568
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
569
	struct cputime prev_cputime;
570
#endif
L
Linus Torvalds 已提交
571 572
	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
573
	unsigned long inblock, oublock, cinblock, coublock;
J
Jiri Pirko 已提交
574
	unsigned long maxrss, cmaxrss;
575
	struct task_io_accounting ioac;
L
Linus Torvalds 已提交
576

577 578 579 580 581 582 583 584
	/*
	 * Cumulative ns of schedule CPU time fo dead threads in the
	 * group, not including a zombie group leader, (This only differs
	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
	 * other than jiffies.)
	 */
	unsigned long long sum_sched_runtime;

L
Linus Torvalds 已提交
585 586 587 588 589 590 591 592 593 594 595
	/*
	 * We don't bother to synchronize most readers of this at all,
	 * because there is no reader checking a limit that actually needs
	 * to get both rlim_cur and rlim_max atomically, and either one
	 * alone is a single word that can safely be read normally.
	 * getrlimit/setrlimit use task_lock(current->group_leader) to
	 * protect this instead of the siglock, because they really
	 * have no need to disable irqs.
	 */
	struct rlimit rlim[RLIM_NLIMITS];

596 597 598
#ifdef CONFIG_BSD_PROCESS_ACCT
	struct pacct_struct pacct;	/* per-process accounting information */
#endif
599 600 601
#ifdef CONFIG_TASKSTATS
	struct taskstats *stats;
#endif
M
Miloslav Trmac 已提交
602 603
#ifdef CONFIG_AUDIT
	unsigned audit_tty;
604
	unsigned audit_tty_log_passwd;
M
Miloslav Trmac 已提交
605 606
	struct tty_audit_buf *tty_audit_buf;
#endif
607 608
#ifdef CONFIG_CGROUPS
	/*
609 610 611 612 613 614 615
	 * group_rwsem prevents new tasks from entering the threadgroup and
	 * member tasks from exiting,a more specifically, setting of
	 * PF_EXITING.  fork and exit paths are protected with this rwsem
	 * using threadgroup_change_begin/end().  Users which require
	 * threadgroup to remain stable should use threadgroup_[un]lock()
	 * which also takes care of exec path.  Currently, cgroup is the
	 * only user.
616
	 */
617
	struct rw_semaphore group_rwsem;
618
#endif
619

620
	oom_flags_t oom_flags;
621 622 623
	short oom_score_adj;		/* OOM kill score adjustment */
	short oom_score_adj_min;	/* OOM kill score adjustment min value.
					 * Only settable by CAP_SYS_RESOURCE. */
624 625 626 627

	struct mutex cred_guard_mutex;	/* guard against foreign influences on
					 * credential calculations
					 * (notably. ptrace) */
L
Linus Torvalds 已提交
628 629 630 631 632 633
};

/*
 * Bits in flags field of signal_struct.
 */
#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
634 635
#define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
636
#define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
637 638 639 640 641 642
/*
 * Pending notifications to parent.
 */
#define SIGNAL_CLD_STOPPED	0x00000010
#define SIGNAL_CLD_CONTINUED	0x00000020
#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
L
Linus Torvalds 已提交
643

644 645
#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */

646 647 648 649 650 651 652
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
		(sig->group_exit_task != NULL);
}

L
Linus Torvalds 已提交
653 654 655 656 657 658 659 660
/*
 * Some day this will be a full-fledged user tracking system..
 */
struct user_struct {
	atomic_t __count;	/* reference count */
	atomic_t processes;	/* How many processes does this user have? */
	atomic_t files;		/* How many open files does this user have? */
	atomic_t sigpending;	/* How many pending signals does this user have? */
661
#ifdef CONFIG_INOTIFY_USER
R
Robert Love 已提交
662 663 664
	atomic_t inotify_watches; /* How many inotify watches does this user have? */
	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
#endif
665 666 667
#ifdef CONFIG_FANOTIFY
	atomic_t fanotify_listeners;
#endif
668
#ifdef CONFIG_EPOLL
669
	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
670
#endif
A
Alexey Dobriyan 已提交
671
#ifdef CONFIG_POSIX_MQUEUE
L
Linus Torvalds 已提交
672 673
	/* protected by mq_lock	*/
	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
A
Alexey Dobriyan 已提交
674
#endif
L
Linus Torvalds 已提交
675 676 677 678 679 680 681 682
	unsigned long locked_shm; /* How many pages of mlocked shm ? */

#ifdef CONFIG_KEYS
	struct key *uid_keyring;	/* UID specific keyring */
	struct key *session_keyring;	/* UID's default session keyring */
#endif

	/* Hash table maintenance information */
P
Pavel Emelyanov 已提交
683
	struct hlist_node uidhash_node;
684
	kuid_t uid;
685

686
#ifdef CONFIG_PERF_EVENTS
687 688
	atomic_long_t locked_vm;
#endif
L
Linus Torvalds 已提交
689 690
};

691
extern int uids_sysfs_init(void);
692

693
extern struct user_struct *find_user(kuid_t);
L
Linus Torvalds 已提交
694 695 696 697

extern struct user_struct root_user;
#define INIT_USER (&root_user)

698

L
Linus Torvalds 已提交
699 700 701
struct backing_dev_info;
struct reclaim_state;

702
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
703 704
struct sched_info {
	/* cumulative counters */
705
	unsigned long pcount;	      /* # of times run on this cpu */
706
	unsigned long long run_delay; /* time spent waiting on a runqueue */
L
Linus Torvalds 已提交
707 708

	/* timestamps */
709 710
	unsigned long long last_arrival,/* when we last ran on a cpu */
			   last_queued;	/* when we were last queued to run */
L
Linus Torvalds 已提交
711
};
712
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
L
Linus Torvalds 已提交
713

714 715 716 717 718 719 720 721 722 723 724 725 726 727
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
	spinlock_t	lock;
	unsigned int	flags;	/* Private per-task flags */

	/* For each stat XXX, add following, aligned appropriately
	 *
	 * struct timespec XXX_start, XXX_end;
	 * u64 XXX_delay;
	 * u32 XXX_count;
	 *
	 * Atomicity of updates to XXX_delay, XXX_count protected by
	 * single lock above (split into XXX_lock if contention is an issue).
	 */
728 729 730 731 732 733 734 735 736 737 738 739 740

	/*
	 * XXX_count is incremented on every XXX operation, the delay
	 * associated with the operation is added to XXX_delay.
	 * XXX_delay contains the accumulated delay time in nanoseconds.
	 */
	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
	u64 blkio_delay;	/* wait for sync block io completion */
	u64 swapin_delay;	/* wait for swapin block io completion */
	u32 blkio_count;	/* total count of the number of sync block */
				/* io operations performed */
	u32 swapin_count;	/* total count of the number of swapin block */
				/* io operations performed */
741 742 743 744

	struct timespec freepages_start, freepages_end;
	u64 freepages_delay;	/* wait for memory reclaim */
	u32 freepages_count;	/* total count of memory reclaim */
745
};
746 747 748 749 750 751 752 753 754 755 756
#endif	/* CONFIG_TASK_DELAY_ACCT */

static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
	return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
	extern int delayacct_on;
	return delayacct_on;
#else
	return 0;
757
#endif
758
}
759

I
Ingo Molnar 已提交
760 761 762 763 764
enum cpu_idle_type {
	CPU_IDLE,
	CPU_NOT_IDLE,
	CPU_NEWLY_IDLE,
	CPU_MAX_IDLE_TYPES
L
Linus Torvalds 已提交
765 766
};

767 768 769 770 771
/*
 * Increase resolution of cpu_power calculations
 */
#define SCHED_POWER_SHIFT	10
#define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
L
Linus Torvalds 已提交
772

773 774 775
/*
 * sched-domains (multiprocessor balancing) declarations:
 */
776
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
777 778 779 780
#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
781
#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
P
Peter Zijlstra 已提交
782 783 784 785
#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
#define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
786
#define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
P
Peter Zijlstra 已提交
787
#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
788
#define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
789
#define SD_NUMA			0x4000	/* cross-node balancing */
790

791 792
extern int __weak arch_sd_sibiling_asym_packing(void);

793 794 795 796 797 798 799 800
struct sched_domain_attr {
	int relax_domain_level;
};

#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
	.relax_domain_level = -1,			\
}

801 802
extern int sched_domain_level_max;

803 804
struct sched_group;

L
Linus Torvalds 已提交
805 806 807
struct sched_domain {
	/* These fields must be setup */
	struct sched_domain *parent;	/* top domain must be null terminated */
808
	struct sched_domain *child;	/* bottom domain must be null terminated */
L
Linus Torvalds 已提交
809 810 811 812 813 814
	struct sched_group *groups;	/* the balancing groups of the domain */
	unsigned long min_interval;	/* Minimum balance interval ms */
	unsigned long max_interval;	/* Maximum balance interval ms */
	unsigned int busy_factor;	/* less balancing by factor if busy */
	unsigned int imbalance_pct;	/* No balance until over watermark */
	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
N
Nick Piggin 已提交
815 816 817 818
	unsigned int busy_idx;
	unsigned int idle_idx;
	unsigned int newidle_idx;
	unsigned int wake_idx;
N
Nick Piggin 已提交
819
	unsigned int forkexec_idx;
P
Peter Zijlstra 已提交
820
	unsigned int smt_gain;
V
Vincent Guittot 已提交
821 822

	int nohz_idle;			/* NOHZ IDLE status */
L
Linus Torvalds 已提交
823
	int flags;			/* See SD_* */
824
	int level;
L
Linus Torvalds 已提交
825 826 827 828 829 830

	/* Runtime fields. */
	unsigned long last_balance;	/* init to jiffies. units in jiffies */
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */

831
	/* idle_balance() stats */
832
	u64 max_newidle_lb_cost;
833
	unsigned long next_decay_max_lb_cost;
P
Peter Zijlstra 已提交
834

L
Linus Torvalds 已提交
835 836
#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
837 838 839 840 841 842 843 844
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
L
Linus Torvalds 已提交
845 846

	/* Active load balancing */
847 848 849
	unsigned int alb_count;
	unsigned int alb_failed;
	unsigned int alb_pushed;
L
Linus Torvalds 已提交
850

851
	/* SD_BALANCE_EXEC stats */
852 853 854
	unsigned int sbe_count;
	unsigned int sbe_balanced;
	unsigned int sbe_pushed;
L
Linus Torvalds 已提交
855

856
	/* SD_BALANCE_FORK stats */
857 858 859
	unsigned int sbf_count;
	unsigned int sbf_balanced;
	unsigned int sbf_pushed;
860

L
Linus Torvalds 已提交
861
	/* try_to_wake_up() stats */
862 863 864
	unsigned int ttwu_wake_remote;
	unsigned int ttwu_move_affine;
	unsigned int ttwu_move_balance;
L
Linus Torvalds 已提交
865
#endif
866 867 868
#ifdef CONFIG_SCHED_DEBUG
	char *name;
#endif
869 870 871 872
	union {
		void *private;		/* used during construction */
		struct rcu_head rcu;	/* used during destruction */
	};
873

874
	unsigned int span_weight;
875 876 877 878 879 880 881 882
	/*
	 * Span of all CPUs in this domain.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 */
	unsigned long span[0];
L
Linus Torvalds 已提交
883 884
};

885 886
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
887
	return to_cpumask(sd->span);
888 889
}

890
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
891
				    struct sched_domain_attr *dattr_new);
P
Paul Jackson 已提交
892

893 894 895 896
/* Allocate an array of sched domains, for partition_sched_domains(). */
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);

897 898
bool cpus_share_cache(int this_cpu, int that_cpu);

899
#else /* CONFIG_SMP */
L
Linus Torvalds 已提交
900

901
struct sched_domain_attr;
902

903
static inline void
904
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
905 906
			struct sched_domain_attr *dattr_new)
{
907
}
908 909 910 911 912 913

static inline bool cpus_share_cache(int this_cpu, int that_cpu)
{
	return true;
}

914
#endif	/* !CONFIG_SMP */
L
Linus Torvalds 已提交
915

916

L
Linus Torvalds 已提交
917 918 919
struct io_context;			/* See blkdev.h */


920
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
921
extern void prefetch_stack(struct task_struct *t);
922 923 924
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
L
Linus Torvalds 已提交
925 926 927

struct audit_context;		/* See audit.c */
struct mempolicy;
928
struct pipe_inode_info;
929
struct uts_namespace;
L
Linus Torvalds 已提交
930

I
Ingo Molnar 已提交
931
struct load_weight {
932 933
	unsigned long weight;
	u32 inv_weight;
I
Ingo Molnar 已提交
934 935
};

936 937 938
struct sched_avg {
	/*
	 * These sums represent an infinite geometric series and so are bound
939
	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
940 941 942 943
	 * choices of y < 1-2^(-32)*1024.
	 */
	u32 runnable_avg_sum, runnable_avg_period;
	u64 last_runnable_update;
944
	s64 decay_count;
945
	unsigned long load_avg_contrib;
946 947
};

948
#ifdef CONFIG_SCHEDSTATS
949
struct sched_statistics {
I
Ingo Molnar 已提交
950
	u64			wait_start;
951
	u64			wait_max;
952 953
	u64			wait_count;
	u64			wait_sum;
954 955
	u64			iowait_count;
	u64			iowait_sum;
956

I
Ingo Molnar 已提交
957 958
	u64			sleep_start;
	u64			sleep_max;
959 960 961
	s64			sum_sleep_runtime;

	u64			block_start;
I
Ingo Molnar 已提交
962 963
	u64			block_max;
	u64			exec_max;
I
Ingo Molnar 已提交
964
	u64			slice_max;
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980

	u64			nr_migrations_cold;
	u64			nr_failed_migrations_affine;
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_hot;
	u64			nr_forced_migrations;

	u64			nr_wakeups;
	u64			nr_wakeups_sync;
	u64			nr_wakeups_migrate;
	u64			nr_wakeups_local;
	u64			nr_wakeups_remote;
	u64			nr_wakeups_affine;
	u64			nr_wakeups_affine_attempts;
	u64			nr_wakeups_passive;
	u64			nr_wakeups_idle;
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
};
#endif

struct sched_entity {
	struct load_weight	load;		/* for load-balancing */
	struct rb_node		run_node;
	struct list_head	group_node;
	unsigned int		on_rq;

	u64			exec_start;
	u64			sum_exec_runtime;
	u64			vruntime;
	u64			prev_sum_exec_runtime;

	u64			nr_migrations;

#ifdef CONFIG_SCHEDSTATS
	struct sched_statistics statistics;
999 1000
#endif

I
Ingo Molnar 已提交
1001 1002 1003 1004 1005 1006 1007
#ifdef CONFIG_FAIR_GROUP_SCHED
	struct sched_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct cfs_rq		*cfs_rq;
	/* rq "owned" by this entity/group: */
	struct cfs_rq		*my_q;
#endif
1008

1009
#ifdef CONFIG_SMP
1010
	/* Per-entity load-tracking */
1011 1012
	struct sched_avg	avg;
#endif
I
Ingo Molnar 已提交
1013
};
1014

P
Peter Zijlstra 已提交
1015 1016
struct sched_rt_entity {
	struct list_head run_list;
1017
	unsigned long timeout;
1018
	unsigned long watchdog_stamp;
1019
	unsigned int time_slice;
P
Peter Zijlstra 已提交
1020

1021
	struct sched_rt_entity *back;
1022
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
1023 1024 1025 1026 1027 1028
	struct sched_rt_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct rt_rq		*rt_rq;
	/* rq "owned" by this entity/group: */
	struct rt_rq		*my_q;
#endif
P
Peter Zijlstra 已提交
1029 1030
};

1031

1032 1033
struct rcu_node;

P
Peter Zijlstra 已提交
1034 1035 1036
enum perf_event_task_context {
	perf_invalid_context = -1,
	perf_hw_context = 0,
1037
	perf_sw_context,
P
Peter Zijlstra 已提交
1038 1039 1040
	perf_nr_task_contexts,
};

L
Linus Torvalds 已提交
1041 1042
struct task_struct {
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
R
Roman Zippel 已提交
1043
	void *stack;
L
Linus Torvalds 已提交
1044
	atomic_t usage;
1045 1046
	unsigned int flags;	/* per process flags, defined below */
	unsigned int ptrace;
L
Linus Torvalds 已提交
1047

1048
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
1049
	struct llist_node wake_entry;
P
Peter Zijlstra 已提交
1050
	int on_cpu;
1051 1052 1053
	struct task_struct *last_wakee;
	unsigned long wakee_flips;
	unsigned long wakee_flip_decay_ts;
1054 1055

	int wake_cpu;
1056
#endif
P
Peter Zijlstra 已提交
1057
	int on_rq;
1058

1059
	int prio, static_prio, normal_prio;
1060
	unsigned int rt_priority;
1061
	const struct sched_class *sched_class;
I
Ingo Molnar 已提交
1062
	struct sched_entity se;
P
Peter Zijlstra 已提交
1063
	struct sched_rt_entity rt;
P
Peter Zijlstra 已提交
1064 1065 1066
#ifdef CONFIG_CGROUP_SCHED
	struct task_group *sched_task_group;
#endif
L
Linus Torvalds 已提交
1067

1068 1069 1070 1071 1072
#ifdef CONFIG_PREEMPT_NOTIFIERS
	/* list of struct preempt_notifier: */
	struct hlist_head preempt_notifiers;
#endif

1073
#ifdef CONFIG_BLK_DEV_IO_TRACE
1074
	unsigned int btrace_seq;
1075
#endif
L
Linus Torvalds 已提交
1076

1077
	unsigned int policy;
1078
	int nr_cpus_allowed;
L
Linus Torvalds 已提交
1079 1080
	cpumask_t cpus_allowed;

P
Paul E. McKenney 已提交
1081
#ifdef CONFIG_PREEMPT_RCU
P
Paul E. McKenney 已提交
1082
	int rcu_read_lock_nesting;
1083 1084
	char rcu_read_unlock_special;
	struct list_head rcu_node_entry;
P
Paul E. McKenney 已提交
1085 1086 1087
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
	struct rcu_node *rcu_blocked_node;
1088
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1089 1090 1091
#ifdef CONFIG_RCU_BOOST
	struct rt_mutex *rcu_boost_mutex;
#endif /* #ifdef CONFIG_RCU_BOOST */
P
Paul E. McKenney 已提交
1092

1093
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
1094 1095 1096 1097
	struct sched_info sched_info;
#endif

	struct list_head tasks;
1098
#ifdef CONFIG_SMP
1099
	struct plist_node pushable_tasks;
1100
#endif
L
Linus Torvalds 已提交
1101 1102

	struct mm_struct *mm, *active_mm;
1103 1104 1105
#ifdef CONFIG_COMPAT_BRK
	unsigned brk_randomized:1;
#endif
1106 1107 1108
#if defined(SPLIT_RSS_COUNTING)
	struct task_rss_stat	rss_stat;
#endif
L
Linus Torvalds 已提交
1109
/* task state */
1110
	int exit_state;
L
Linus Torvalds 已提交
1111 1112
	int exit_code, exit_signal;
	int pdeath_signal;  /*  The signal sent when the parent dies  */
1113
	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
1114 1115

	/* Used for emulating ABI behavior of previous Linux versions */
1116
	unsigned int personality;
1117

L
Linus Torvalds 已提交
1118
	unsigned did_exec:1;
1119 1120
	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
				 * execve */
1121 1122
	unsigned in_iowait:1;

1123 1124
	/* task may not gain privileges */
	unsigned no_new_privs:1;
1125 1126 1127

	/* Revert to default priority/policy when forking */
	unsigned sched_reset_on_fork:1;
1128
	unsigned sched_contributes_to_load:1;
1129

L
Linus Torvalds 已提交
1130 1131
	pid_t pid;
	pid_t tgid;
1132

1133
#ifdef CONFIG_CC_STACKPROTECTOR
1134 1135
	/* Canary value for the -fstack-protector gcc feature */
	unsigned long stack_canary;
1136
#endif
1137
	/*
L
Linus Torvalds 已提交
1138
	 * pointers to (original) parent process, youngest child, younger sibling,
1139
	 * older sibling, respectively.  (p->father can be replaced with
R
Roland McGrath 已提交
1140
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
1141
	 */
1142 1143
	struct task_struct __rcu *real_parent; /* real parent process */
	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
L
Linus Torvalds 已提交
1144
	/*
R
Roland McGrath 已提交
1145
	 * children/sibling forms the list of my natural children
L
Linus Torvalds 已提交
1146 1147 1148 1149 1150
	 */
	struct list_head children;	/* list of my children */
	struct list_head sibling;	/* linkage in my parent's children list */
	struct task_struct *group_leader;	/* threadgroup leader */

R
Roland McGrath 已提交
1151 1152 1153 1154 1155 1156 1157 1158
	/*
	 * ptraced is the list of tasks this task is using ptrace on.
	 * This includes both natural children and PTRACE_ATTACH targets.
	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
	 */
	struct list_head ptraced;
	struct list_head ptrace_entry;

L
Linus Torvalds 已提交
1159
	/* PID/PID hash table linkage. */
1160
	struct pid_link pids[PIDTYPE_MAX];
O
Oleg Nesterov 已提交
1161
	struct list_head thread_group;
L
Linus Torvalds 已提交
1162 1163 1164 1165 1166

	struct completion *vfork_done;		/* for vfork() */
	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */

1167
	cputime_t utime, stime, utimescaled, stimescaled;
1168
	cputime_t gtime;
1169
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1170
	struct cputime prev_cputime;
1171 1172 1173 1174 1175 1176 1177 1178 1179
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
	seqlock_t vtime_seqlock;
	unsigned long long vtime_snap;
	enum {
		VTIME_SLEEPING = 0,
		VTIME_USER,
		VTIME_SYS,
	} vtime_snap_whence;
1180
#endif
L
Linus Torvalds 已提交
1181
	unsigned long nvcsw, nivcsw; /* context switch counts */
1182 1183
	struct timespec start_time; 		/* monotonic time */
	struct timespec real_start_time;	/* boot based time */
L
Linus Torvalds 已提交
1184 1185 1186
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
	unsigned long min_flt, maj_flt;

1187
	struct task_cputime cputime_expires;
L
Linus Torvalds 已提交
1188 1189 1190
	struct list_head cpu_timers[3];

/* process credentials */
A
Arnd Bergmann 已提交
1191
	const struct cred __rcu *real_cred; /* objective and real subjective task
1192
					 * credentials (COW) */
A
Arnd Bergmann 已提交
1193
	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1194
					 * credentials (COW) */
1195 1196 1197
	char comm[TASK_COMM_LEN]; /* executable name excluding path
				     - access with [gs]et_task_comm (which lock
				       it with task_lock())
1198
				     - initialized normally by setup_new_exec */
L
Linus Torvalds 已提交
1199 1200
/* file system info */
	int link_count, total_link_count;
1201
#ifdef CONFIG_SYSVIPC
L
Linus Torvalds 已提交
1202 1203
/* ipc stuff */
	struct sysv_sem sysvsem;
1204
#endif
1205
#ifdef CONFIG_DETECT_HUNG_TASK
1206 1207 1208
/* hung task detection */
	unsigned long last_switch_count;
#endif
L
Linus Torvalds 已提交
1209 1210 1211 1212 1213 1214
/* CPU-specific state of this task */
	struct thread_struct thread;
/* filesystem information */
	struct fs_struct *fs;
/* open file information */
	struct files_struct *files;
1215
/* namespaces */
S
Serge E. Hallyn 已提交
1216
	struct nsproxy *nsproxy;
L
Linus Torvalds 已提交
1217 1218 1219 1220 1221
/* signal handlers */
	struct signal_struct *signal;
	struct sighand_struct *sighand;

	sigset_t blocked, real_blocked;
1222
	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
L
Linus Torvalds 已提交
1223 1224 1225 1226 1227 1228 1229
	struct sigpending pending;

	unsigned long sas_ss_sp;
	size_t sas_ss_size;
	int (*notifier)(void *priv);
	void *notifier_data;
	sigset_t *notifier_mask;
1230
	struct callback_head *task_works;
1231

L
Linus Torvalds 已提交
1232
	struct audit_context *audit_context;
A
Al Viro 已提交
1233
#ifdef CONFIG_AUDITSYSCALL
1234
	kuid_t loginuid;
1235
	unsigned int sessionid;
A
Al Viro 已提交
1236
#endif
1237
	struct seccomp seccomp;
L
Linus Torvalds 已提交
1238 1239 1240 1241

/* Thread group tracking */
   	u32 parent_exec_id;
   	u32 self_exec_id;
1242 1243
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
 * mempolicy */
L
Linus Torvalds 已提交
1244 1245
	spinlock_t alloc_lock;

1246
	/* Protection of the PI data structures: */
1247
	raw_spinlock_t pi_lock;
1248

I
Ingo Molnar 已提交
1249 1250 1251 1252 1253 1254 1255
#ifdef CONFIG_RT_MUTEXES
	/* PI waiters blocked on a rt_mutex held by this task */
	struct plist_head pi_waiters;
	/* Deadlock detection and priority inheritance handling */
	struct rt_mutex_waiter *pi_blocked_on;
#endif

1256 1257 1258 1259
#ifdef CONFIG_DEBUG_MUTEXES
	/* mutex deadlock detection */
	struct mutex_waiter *blocked_on;
#endif
1260 1261 1262 1263
#ifdef CONFIG_TRACE_IRQFLAGS
	unsigned int irq_events;
	unsigned long hardirq_enable_ip;
	unsigned long hardirq_disable_ip;
1264
	unsigned int hardirq_enable_event;
1265
	unsigned int hardirq_disable_event;
1266 1267
	int hardirqs_enabled;
	int hardirq_context;
1268 1269
	unsigned long softirq_disable_ip;
	unsigned long softirq_enable_ip;
1270
	unsigned int softirq_disable_event;
1271
	unsigned int softirq_enable_event;
1272
	int softirqs_enabled;
1273 1274
	int softirq_context;
#endif
I
Ingo Molnar 已提交
1275
#ifdef CONFIG_LOCKDEP
1276
# define MAX_LOCK_DEPTH 48UL
I
Ingo Molnar 已提交
1277 1278 1279
	u64 curr_chain_key;
	int lockdep_depth;
	unsigned int lockdep_recursion;
1280
	struct held_lock held_locks[MAX_LOCK_DEPTH];
1281
	gfp_t lockdep_reclaim_gfp;
I
Ingo Molnar 已提交
1282
#endif
1283

L
Linus Torvalds 已提交
1284 1285 1286
/* journalling filesystem info */
	void *journal_info;

1287
/* stacked block device info */
1288
	struct bio_list *bio_list;
1289

1290 1291 1292 1293 1294
#ifdef CONFIG_BLOCK
/* stack plugging */
	struct blk_plug *plug;
#endif

L
Linus Torvalds 已提交
1295 1296 1297 1298 1299 1300 1301 1302 1303
/* VM state */
	struct reclaim_state *reclaim_state;

	struct backing_dev_info *backing_dev_info;

	struct io_context *io_context;

	unsigned long ptrace_message;
	siginfo_t *last_siginfo; /* For ptrace use.  */
1304
	struct task_io_accounting ioac;
1305
#if defined(CONFIG_TASK_XACCT)
L
Linus Torvalds 已提交
1306 1307
	u64 acct_rss_mem1;	/* accumulated rss usage */
	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1308
	cputime_t acct_timexpd;	/* stime + utime since last update */
L
Linus Torvalds 已提交
1309 1310
#endif
#ifdef CONFIG_CPUSETS
1311
	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1312
	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1313
	int cpuset_mem_spread_rotor;
1314
	int cpuset_slab_spread_rotor;
L
Linus Torvalds 已提交
1315
#endif
1316
#ifdef CONFIG_CGROUPS
1317
	/* Control Group info protected by css_set_lock */
A
Arnd Bergmann 已提交
1318
	struct css_set __rcu *cgroups;
1319 1320
	/* cg_list protected by css_set_lock and tsk->alloc_lock */
	struct list_head cg_list;
1321
#endif
1322
#ifdef CONFIG_FUTEX
1323
	struct robust_list_head __user *robust_list;
1324 1325 1326
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
1327 1328
	struct list_head pi_state_list;
	struct futex_pi_state *pi_state_cache;
1329
#endif
1330
#ifdef CONFIG_PERF_EVENTS
P
Peter Zijlstra 已提交
1331
	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1332 1333
	struct mutex perf_event_mutex;
	struct list_head perf_event_list;
1334
#endif
1335
#ifdef CONFIG_NUMA
1336
	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1337
	short il_next;
1338
	short pref_node_fork;
1339
#endif
1340 1341 1342
#ifdef CONFIG_NUMA_BALANCING
	int numa_scan_seq;
	unsigned int numa_scan_period;
1343
	unsigned int numa_scan_period_max;
1344 1345
	int numa_preferred_nid;
	int numa_migrate_deferred;
1346
	unsigned long numa_migrate_retry;
1347 1348
	u64 node_stamp;			/* migration stamp  */
	struct callback_head numa_work;
1349

1350 1351 1352
	struct list_head numa_entry;
	struct numa_group *numa_group;

1353 1354 1355 1356 1357
	/*
	 * Exponential decaying average of faults on a per-node basis.
	 * Scheduling placement decisions are made based on the these counts.
	 * The values remain static for the duration of a PTE scan
	 */
1358
	unsigned long *numa_faults;
1359
	unsigned long total_numa_faults;
1360 1361 1362 1363 1364 1365 1366 1367

	/*
	 * numa_faults_buffer records faults per node during the current
	 * scan window. When the scan completes, the counts in numa_faults
	 * decay and these values are copied.
	 */
	unsigned long *numa_faults_buffer;

1368 1369 1370 1371 1372 1373 1374 1375
	/*
	 * numa_faults_locality tracks if faults recorded during the last
	 * scan window were remote/local. The task scan period is adapted
	 * based on the locality of the faults with different weights
	 * depending on whether they were shared or private faults
	 */
	unsigned long numa_faults_locality[2];

I
Ingo Molnar 已提交
1376
	unsigned long numa_pages_migrated;
1377 1378
#endif /* CONFIG_NUMA_BALANCING */

I
Ingo Molnar 已提交
1379
	struct rcu_head rcu;
1380 1381 1382 1383 1384

	/*
	 * cache last used pipe for splice
	 */
	struct pipe_inode_info *splice_pipe;
1385 1386 1387

	struct page_frag task_frag;

1388 1389
#ifdef	CONFIG_TASK_DELAY_ACCT
	struct task_delay_info *delays;
1390 1391 1392
#endif
#ifdef CONFIG_FAULT_INJECTION
	int make_it_fail;
1393
#endif
1394 1395 1396 1397 1398 1399
	/*
	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
	 * balance_dirty_pages() for some dirty throttling pause
	 */
	int nr_dirtied;
	int nr_dirtied_pause;
1400
	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1401

A
Arjan van de Ven 已提交
1402 1403 1404 1405
#ifdef CONFIG_LATENCYTOP
	int latency_record_count;
	struct latency_record latency_record[LT_SAVECOUNT];
#endif
1406 1407 1408 1409 1410 1411
	/*
	 * time slack values; these are used to round up poll() and
	 * select() etc timeout values. These are in nanoseconds.
	 */
	unsigned long timer_slack_ns;
	unsigned long default_timer_slack_ns;
1412

1413
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
D
Daniel Mack 已提交
1414
	/* Index of current stored address in ret_stack */
1415 1416 1417
	int curr_ret_stack;
	/* Stack of return addresses for return function tracing */
	struct ftrace_ret_stack	*ret_stack;
1418 1419
	/* time stamp for last schedule */
	unsigned long long ftrace_timestamp;
1420 1421 1422 1423 1424
	/*
	 * Number of functions that haven't been traced
	 * because of depth overrun.
	 */
	atomic_t trace_overrun;
1425 1426
	/* Pause for the tracing */
	atomic_t tracing_graph_pause;
1427
#endif
1428 1429 1430
#ifdef CONFIG_TRACING
	/* state flags for use by tracers */
	unsigned long trace;
1431
	/* bitmask and counter of trace recursion */
1432 1433
	unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
A
Andrew Morton 已提交
1434
#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1435 1436 1437
	struct memcg_batch_info {
		int do_batch;	/* incremented when batch uncharge started */
		struct mem_cgroup *memcg; /* target memcg of uncharge */
1438 1439
		unsigned long nr_pages;	/* uncharged usage */
		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1440
	} memcg_batch;
1441
	unsigned int memcg_kmem_skip_account;
1442
	struct memcg_oom_info {
1443 1444 1445
		struct mem_cgroup *memcg;
		gfp_t gfp_mask;
		int order;
1446 1447
		unsigned int may_oom:1;
	} memcg_oom;
1448
#endif
1449 1450 1451
#ifdef CONFIG_UPROBES
	struct uprobe_task *utask;
#endif
K
Kent Overstreet 已提交
1452 1453 1454 1455
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
	unsigned int	sequential_io;
	unsigned int	sequential_io_avg;
#endif
L
Linus Torvalds 已提交
1456 1457
};

1458
/* Future-safe accessor for struct task_struct's cpus_allowed. */
1459
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1460

1461 1462
#define TNF_MIGRATED	0x01
#define TNF_NO_GROUP	0x02
1463
#define TNF_SHARED	0x04
1464
#define TNF_FAULT_LOCAL	0x08
1465

1466
#ifdef CONFIG_NUMA_BALANCING
1467
extern void task_numa_fault(int last_node, int node, int pages, int flags);
1468
extern pid_t task_numa_group_id(struct task_struct *p);
1469
extern void set_numabalancing_state(bool enabled);
1470
extern void task_numa_free(struct task_struct *p);
1471 1472

extern unsigned int sysctl_numa_balancing_migrate_deferred;
1473
#else
1474
static inline void task_numa_fault(int last_node, int node, int pages,
1475
				   int flags)
1476 1477
{
}
1478 1479 1480 1481
static inline pid_t task_numa_group_id(struct task_struct *p)
{
	return 0;
}
1482 1483 1484
static inline void set_numabalancing_state(bool enabled)
{
}
1485 1486 1487
static inline void task_numa_free(struct task_struct *p)
{
}
1488 1489
#endif

A
Alexey Dobriyan 已提交
1490
static inline struct pid *task_pid(struct task_struct *task)
1491 1492 1493 1494
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1495
static inline struct pid *task_tgid(struct task_struct *task)
1496 1497 1498 1499
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

1500 1501 1502 1503 1504
/*
 * Without tasklist or rcu lock it is not safe to dereference
 * the result of task_pgrp/task_session even if task == current,
 * we can race with another thread doing sys_setsid/sys_setpgid.
 */
A
Alexey Dobriyan 已提交
1505
static inline struct pid *task_pgrp(struct task_struct *task)
1506 1507 1508 1509
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
1510
static inline struct pid *task_session(struct task_struct *task)
1511 1512 1513 1514
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

1515 1516 1517 1518 1519 1520 1521
struct pid_namespace;

/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
1522 1523
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
1524 1525 1526 1527 1528 1529
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * set_task_vxid()   : assigns a virtual id to a task;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
1530 1531
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
			struct pid_namespace *ns);
1532

A
Alexey Dobriyan 已提交
1533
static inline pid_t task_pid_nr(struct task_struct *tsk)
1534 1535 1536 1537
{
	return tsk->pid;
}

1538 1539 1540 1541 1542
static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
1543 1544 1545

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
1546
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1547 1548 1549
}


A
Alexey Dobriyan 已提交
1550
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1551 1552 1553 1554
{
	return tsk->tgid;
}

1555
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1556 1557 1558 1559 1560 1561 1562

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_tgid(tsk));
}


1563 1564
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1565
{
1566
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1567 1568 1569 1570
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
1571
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1572 1573 1574
}


1575 1576
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1577
{
1578
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1579 1580 1581 1582
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
1583
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1584 1585
}

1586 1587 1588 1589 1590
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
1591

L
Linus Torvalds 已提交
1592 1593 1594 1595 1596 1597 1598
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
1599 1600
 *
 * Return: 1 if the process is alive. 0 otherwise.
L
Linus Torvalds 已提交
1601
 */
A
Alexey Dobriyan 已提交
1602
static inline int pid_alive(struct task_struct *p)
L
Linus Torvalds 已提交
1603
{
1604
	return p->pids[PIDTYPE_PID].pid != NULL;
L
Linus Torvalds 已提交
1605 1606
}

1607
/**
1608
 * is_global_init - check if a task structure is init
1609 1610 1611
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1612 1613
 *
 * Return: 1 if the task structure is init. 0 otherwise.
1614
 */
A
Alexey Dobriyan 已提交
1615
static inline int is_global_init(struct task_struct *tsk)
1616 1617 1618
{
	return tsk->pid == 1;
}
1619

1620 1621
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1622 1623
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
I
Ingo Molnar 已提交
1624

1625
extern void __put_task_struct(struct task_struct *t);
I
Ingo Molnar 已提交
1626 1627 1628 1629

static inline void put_task_struct(struct task_struct *t)
{
	if (atomic_dec_and_test(&t->usage))
1630
		__put_task_struct(t);
I
Ingo Molnar 已提交
1631
}
L
Linus Torvalds 已提交
1632

1633 1634 1635 1636 1637 1638 1639
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
			 cputime_t *utime, cputime_t *stime);
extern void task_cputime_scaled(struct task_struct *t,
				cputime_t *utimescaled, cputime_t *stimescaled);
extern cputime_t task_gtime(struct task_struct *t);
#else
1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
static inline void task_cputime(struct task_struct *t,
				cputime_t *utime, cputime_t *stime)
{
	if (utime)
		*utime = t->utime;
	if (stime)
		*stime = t->stime;
}

static inline void task_cputime_scaled(struct task_struct *t,
				       cputime_t *utimescaled,
				       cputime_t *stimescaled)
{
	if (utimescaled)
		*utimescaled = t->utimescaled;
	if (stimescaled)
		*stimescaled = t->stimescaled;
}
1658 1659 1660 1661 1662 1663

static inline cputime_t task_gtime(struct task_struct *t)
{
	return t->gtime;
}
#endif
1664 1665
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1666

L
Linus Torvalds 已提交
1667 1668 1669 1670
/*
 * Per process flags
 */
#define PF_EXITING	0x00000004	/* getting shut down */
1671
#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1672
#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
T
Tejun Heo 已提交
1673
#define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
L
Linus Torvalds 已提交
1674
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1675
#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
L
Linus Torvalds 已提交
1676 1677 1678 1679
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
#define PF_DUMPCORE	0x00000200	/* dumped core */
#define PF_SIGNALED	0x00000400	/* killed by a signal */
#define PF_MEMALLOC	0x00000800	/* Allocating memory */
1680
#define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
L
Linus Torvalds 已提交
1681
#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1682
#define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
L
Linus Torvalds 已提交
1683 1684 1685 1686
#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
#define PF_FROZEN	0x00010000	/* frozen for system suspend */
#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
#define PF_KSWAPD	0x00040000	/* I am kswapd */
1687
#define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
L
Linus Torvalds 已提交
1688
#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1689
#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
J
Jens Axboe 已提交
1690 1691 1692 1693
#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1694
#define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
1695
#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1696
#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1697
#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1698
#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1699
#define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
L
Linus Torvalds 已提交
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) \
	conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)

1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
static inline gfp_t memalloc_noio_flags(gfp_t flags)
{
	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
		flags &= ~__GFP_IO;
	return flags;
}

static inline unsigned int memalloc_noio_save(void)
{
	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
	current->flags |= PF_MEMALLOC_NOIO;
	return flags;
}

static inline void memalloc_noio_restore(unsigned int flags)
{
	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}

1746
/*
1747
 * task->jobctl flags
1748
 */
1749
#define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1750

1751 1752 1753
#define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
#define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
#define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
1754
#define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1755
#define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1756
#define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
T
Tejun Heo 已提交
1757
#define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1758 1759 1760 1761

#define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
#define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
1762
#define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1763
#define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1764
#define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
T
Tejun Heo 已提交
1765
#define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1766

1767
#define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1768
#define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1769

1770 1771
extern bool task_set_jobctl_pending(struct task_struct *task,
				    unsigned int mask);
1772
extern void task_clear_jobctl_trapping(struct task_struct *task);
1773 1774
extern void task_clear_jobctl_pending(struct task_struct *task,
				      unsigned int mask);
1775

P
Paul E. McKenney 已提交
1776
#ifdef CONFIG_PREEMPT_RCU
1777 1778

#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1779
#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1780 1781 1782 1783 1784

static inline void rcu_copy_process(struct task_struct *p)
{
	p->rcu_read_lock_nesting = 0;
	p->rcu_read_unlock_special = 0;
P
Paul E. McKenney 已提交
1785
#ifdef CONFIG_TREE_PREEMPT_RCU
1786
	p->rcu_blocked_node = NULL;
1787 1788 1789 1790
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST
	p->rcu_boost_mutex = NULL;
#endif /* #ifdef CONFIG_RCU_BOOST */
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
	INIT_LIST_HEAD(&p->rcu_node_entry);
}

#else

static inline void rcu_copy_process(struct task_struct *p)
{
}

#endif

1802 1803 1804 1805 1806 1807 1808
static inline void tsk_restore_flags(struct task_struct *task,
				unsigned long orig_flags, unsigned long flags)
{
	task->flags &= ~flags;
	task->flags |= orig_flags & flags;
}

L
Linus Torvalds 已提交
1809
#ifdef CONFIG_SMP
1810 1811 1812
extern void do_set_cpus_allowed(struct task_struct *p,
			       const struct cpumask *new_mask);

1813
extern int set_cpus_allowed_ptr(struct task_struct *p,
1814
				const struct cpumask *new_mask);
L
Linus Torvalds 已提交
1815
#else
1816 1817 1818 1819
static inline void do_set_cpus_allowed(struct task_struct *p,
				      const struct cpumask *new_mask)
{
}
1820
static inline int set_cpus_allowed_ptr(struct task_struct *p,
1821
				       const struct cpumask *new_mask)
L
Linus Torvalds 已提交
1822
{
1823
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
1824 1825 1826 1827
		return -EINVAL;
	return 0;
}
#endif
1828

1829
#ifdef CONFIG_NO_HZ_COMMON
1830 1831 1832 1833 1834
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
#else
static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
1835
#endif /* CONFIG_NO_HZ_COMMON */
1836

1837
#ifndef CONFIG_CPUMASK_OFFSTACK
1838 1839 1840 1841
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
	return set_cpus_allowed_ptr(p, &new_mask);
}
1842
#endif
L
Linus Torvalds 已提交
1843

1844
/*
1845 1846 1847 1848 1849 1850
 * Do not use outside of architecture code which knows its limitations.
 *
 * sched_clock() has no promise of monotonicity or bounded drift between
 * CPUs, use (which you should not) requires disabling IRQs.
 *
 * Please use one of the three interfaces below.
1851
 */
1852
extern unsigned long long notrace sched_clock(void);
1853
/*
1854
 * See the comment in kernel/sched/clock.c
1855 1856 1857 1858 1859
 */
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);
extern u64 sched_clock_cpu(int cpu);

1860

1861
extern void sched_clock_init(void);
1862

1863
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
static inline void sched_clock_tick(void)
{
}

static inline void sched_clock_idle_sleep_event(void)
{
}

static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
#else
1876 1877 1878 1879 1880 1881 1882 1883
/*
 * Architectures can set this to 1 if they have specified
 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
 * but then during bootup it turns out that sched_clock()
 * is reliable after all:
 */
extern int sched_clock_stable;

1884 1885 1886 1887 1888
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif

1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
 * The reason for this explicit opt-in is not to have perf penalty with
 * slow sched_clocks.
 */
extern void enable_sched_clock_irqtime(void);
extern void disable_sched_clock_irqtime(void);
#else
static inline void enable_sched_clock_irqtime(void) {}
static inline void disable_sched_clock_irqtime(void) {}
#endif

1902
extern unsigned long long
1903
task_sched_runtime(struct task_struct *task);
L
Linus Torvalds 已提交
1904 1905 1906 1907 1908 1909 1910 1911

/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec()   {}
#endif

1912 1913
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1914

L
Linus Torvalds 已提交
1915 1916 1917 1918 1919 1920
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
#endif

1921
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
1922
extern void wake_up_nohz_cpu(int cpu);
1923
#else
1924
static inline void wake_up_nohz_cpu(int cpu) { }
1925 1926
#endif

1927 1928
#ifdef CONFIG_NO_HZ_FULL
extern bool sched_can_stop_tick(void);
1929
extern u64 scheduler_tick_max_deferment(void);
1930 1931
#else
static inline bool sched_can_stop_tick(void) { return false; }
1932 1933
#endif

1934 1935 1936 1937 1938 1939 1940
#ifdef CONFIG_SCHED_AUTOGROUP
extern void sched_autogroup_create_attach(struct task_struct *p);
extern void sched_autogroup_detach(struct task_struct *p);
extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
1941
extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
1942 1943 1944 1945 1946 1947 1948 1949
#endif
#else
static inline void sched_autogroup_create_attach(struct task_struct *p) { }
static inline void sched_autogroup_detach(struct task_struct *p) { }
static inline void sched_autogroup_fork(struct signal_struct *sig) { }
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
#endif

1950
extern bool yield_to(struct task_struct *p, bool preempt);
1951 1952 1953 1954 1955
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
extern int task_nice(const struct task_struct *p);
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
1956
extern int idle_cpu(int cpu);
1957 1958
extern int sched_setscheduler(struct task_struct *, int,
			      const struct sched_param *);
1959
extern int sched_setscheduler_nocheck(struct task_struct *, int,
1960
				      const struct sched_param *);
1961
extern struct task_struct *idle_task(int cpu);
1962 1963
/**
 * is_idle_task - is the specified task an idle task?
1964
 * @p: the task in question.
1965 1966
 *
 * Return: 1 if @p is an idle task. 0 otherwise.
1967
 */
1968
static inline bool is_idle_task(const struct task_struct *p)
1969 1970 1971
{
	return p->pid == 0;
}
1972 1973
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001

void yield(void);

/*
 * The default (Linux) execution domain.
 */
extern struct exec_domain	default_exec_domain;

union thread_union {
	struct thread_info thread_info;
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
	/* Reliable end of stack detection:
	 * Some APM bios versions misalign the stack
	 */
	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif

extern union thread_union init_thread_union;
extern struct task_struct init_task;

extern struct   mm_struct init_mm;

2002 2003 2004 2005 2006 2007 2008
extern struct pid_namespace init_pid_ns;

/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
2009 2010
 * find_task_by_vpid():
 *      finds a task by its virtual pid
2011
 *
2012
 * see also find_vpid() etc in include/linux/pid.h
2013 2014
 */

2015 2016 2017
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
		struct pid_namespace *ns);
2018

L
Linus Torvalds 已提交
2019
/* per-UID process charging. */
2020
extern struct user_struct * alloc_uid(kuid_t);
L
Linus Torvalds 已提交
2021 2022 2023 2024 2025 2026 2027 2028 2029
static inline struct user_struct *get_uid(struct user_struct *u)
{
	atomic_inc(&u->__count);
	return u;
}
extern void free_uid(struct user_struct *);

#include <asm/current.h>

T
Torben Hohn 已提交
2030
extern void xtime_update(unsigned long ticks);
L
Linus Torvalds 已提交
2031

2032 2033
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
2034
extern void wake_up_new_task(struct task_struct *tsk);
L
Linus Torvalds 已提交
2035 2036 2037 2038 2039
#ifdef CONFIG_SMP
 extern void kick_process(struct task_struct *tsk);
#else
 static inline void kick_process(struct task_struct *tsk) { }
#endif
2040
extern void sched_fork(unsigned long clone_flags, struct task_struct *p);
2041
extern void sched_dead(struct task_struct *p);
L
Linus Torvalds 已提交
2042 2043 2044

extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
2045
extern void __flush_signals(struct task_struct *);
2046
extern void ignore_signals(struct task_struct *);
L
Linus Torvalds 已提交
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);

static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&tsk->sighand->siglock, flags);
	ret = dequeue_signal(tsk, mask, info);
	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);

	return ret;
2060
}
L
Linus Torvalds 已提交
2061 2062 2063 2064 2065 2066 2067 2068

extern void block_all_signals(int (*notifier)(void *priv), void *priv,
			      sigset_t *mask);
extern void unblock_all_signals(void);
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2069 2070
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2071 2072
extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
				const struct cred *, u32);
2073 2074
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
2075
extern int kill_proc_info(int, struct siginfo *, pid_t);
2076
extern __must_check bool do_notify_parent(struct task_struct *, int);
2077
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
L
Linus Torvalds 已提交
2078 2079
extern void force_sig(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
2080
extern int zap_other_threads(struct task_struct *p);
L
Linus Torvalds 已提交
2081 2082
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
2083
extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2084
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
L
Linus Torvalds 已提交
2085

A
Al Viro 已提交
2086 2087 2088
static inline void restore_saved_sigmask(void)
{
	if (test_and_clear_restore_sigmask())
2089
		__set_current_blocked(&current->saved_sigmask);
A
Al Viro 已提交
2090 2091
}

A
Al Viro 已提交
2092 2093 2094 2095 2096 2097 2098 2099
static inline sigset_t *sigmask_to_save(void)
{
	sigset_t *res = &current->blocked;
	if (unlikely(test_restore_sigmask()))
		res = &current->saved_sigmask;
	return res;
}

2100 2101 2102 2103 2104
static inline int kill_cad_pid(int sig, int priv)
{
	return kill_pid(cad_pid, sig, priv);
}

L
Linus Torvalds 已提交
2105 2106 2107 2108 2109
/* These can be the second arg to send_sig_info/send_group_sig_info.  */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV	((struct siginfo *) 1)
#define SEND_SIG_FORCED	((struct siginfo *) 2)

2110 2111 2112
/*
 * True if we are on the alternate signal stack.
 */
L
Linus Torvalds 已提交
2113 2114
static inline int on_sig_stack(unsigned long sp)
{
2115 2116 2117 2118 2119 2120 2121
#ifdef CONFIG_STACK_GROWSUP
	return sp >= current->sas_ss_sp &&
		sp - current->sas_ss_sp < current->sas_ss_size;
#else
	return sp > current->sas_ss_sp &&
		sp - current->sas_ss_sp <= current->sas_ss_size;
#endif
L
Linus Torvalds 已提交
2122 2123 2124 2125 2126 2127 2128 2129
}

static inline int sas_ss_flags(unsigned long sp)
{
	return (current->sas_ss_size == 0 ? SS_DISABLE
		: on_sig_stack(sp) ? SS_ONSTACK : 0);
}

A
Al Viro 已提交
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
{
	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
#ifdef CONFIG_STACK_GROWSUP
		return current->sas_ss_sp;
#else
		return current->sas_ss_sp + current->sas_ss_size;
#endif
	return sp;
}

L
Linus Torvalds 已提交
2141 2142 2143 2144 2145 2146
/*
 * Routines for handling mm_structs
 */
extern struct mm_struct * mm_alloc(void);

/* mmdrop drops the mm and the page tables */
2147
extern void __mmdrop(struct mm_struct *);
L
Linus Torvalds 已提交
2148 2149
static inline void mmdrop(struct mm_struct * mm)
{
I
Ingo Molnar 已提交
2150
	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
L
Linus Torvalds 已提交
2151 2152 2153 2154 2155 2156 2157
		__mmdrop(mm);
}

/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
2158 2159 2160 2161 2162 2163
/*
 * Grab a reference to a task's mm, if it is not already going away
 * and ptrace_may_access with the mode parameter passed to it
 * succeeds.
 */
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
L
Linus Torvalds 已提交
2164 2165
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
2166 2167
/* Allocate a new mm structure and copy contents from tsk->mm */
extern struct mm_struct *dup_mm(struct task_struct *tsk);
L
Linus Torvalds 已提交
2168

A
Alexey Dobriyan 已提交
2169
extern int copy_thread(unsigned long, unsigned long, unsigned long,
2170
			struct task_struct *);
L
Linus Torvalds 已提交
2171 2172 2173 2174
extern void flush_thread(void);
extern void exit_thread(void);

extern void exit_files(struct task_struct *);
2175
extern void __cleanup_sighand(struct sighand_struct *);
2176

L
Linus Torvalds 已提交
2177
extern void exit_itimers(struct signal_struct *);
2178
extern void flush_itimer_signals(void);
L
Linus Torvalds 已提交
2179

2180
extern void do_group_exit(int);
L
Linus Torvalds 已提交
2181 2182 2183 2184

extern int allow_signal(int);
extern int disallow_signal(int);

2185 2186
extern int do_execve(const char *,
		     const char __user * const __user *,
2187
		     const char __user * const __user *);
2188
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2189
struct task_struct *fork_idle(int);
2190
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
L
Linus Torvalds 已提交
2191 2192

extern void set_task_comm(struct task_struct *tsk, char *from);
2193
extern char *get_task_comm(char *to, struct task_struct *tsk);
L
Linus Torvalds 已提交
2194 2195

#ifdef CONFIG_SMP
2196
void scheduler_ipi(void);
R
Roland McGrath 已提交
2197
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
2198
#else
2199
static inline void scheduler_ipi(void) { }
R
Roland McGrath 已提交
2200 2201 2202 2203 2204
static inline unsigned long wait_task_inactive(struct task_struct *p,
					       long match_state)
{
	return 1;
}
L
Linus Torvalds 已提交
2205 2206
#endif

2207 2208
#define next_task(p) \
	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
L
Linus Torvalds 已提交
2209 2210 2211 2212

#define for_each_process(p) \
	for (p = &init_task ; (p = next_task(p)) != &init_task ; )

2213
extern bool current_is_single_threaded(void);
D
David Howells 已提交
2214

L
Linus Torvalds 已提交
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
/*
 * Careful: do_each_thread/while_each_thread is a double loop so
 *          'break' will not work as expected - use goto instead.
 */
#define do_each_thread(g, t) \
	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do

#define while_each_thread(g, t) \
	while ((t = next_thread(t)) != g)

2225 2226
static inline int get_nr_threads(struct task_struct *tsk)
{
2227
	return tsk->signal->nr_threads;
2228 2229
}

2230 2231 2232 2233
static inline bool thread_group_leader(struct task_struct *p)
{
	return p->exit_signal >= 0;
}
L
Linus Torvalds 已提交
2234

2235 2236 2237 2238 2239 2240
/* Do to the insanities of de_thread it is possible for a process
 * to have the pid of the thread group leader without actually being
 * the thread group leader.  For iteration through the pids in proc
 * all we care about is that we have a task with the appropriate
 * pid, we don't actually care if we have the right task.
 */
2241
static inline bool has_group_leader_pid(struct task_struct *p)
2242
{
2243
	return task_pid(p) == p->signal->leader_pid;
2244 2245
}

2246
static inline
2247
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2248
{
2249
	return p1->signal == p2->signal;
2250 2251
}

2252
static inline struct task_struct *next_thread(const struct task_struct *p)
O
Oleg Nesterov 已提交
2253
{
2254 2255
	return list_entry_rcu(p->thread_group.next,
			      struct task_struct, thread_group);
O
Oleg Nesterov 已提交
2256 2257
}

A
Alexey Dobriyan 已提交
2258
static inline int thread_group_empty(struct task_struct *p)
L
Linus Torvalds 已提交
2259
{
O
Oleg Nesterov 已提交
2260
	return list_empty(&p->thread_group);
L
Linus Torvalds 已提交
2261 2262 2263 2264 2265 2266
}

#define delay_group_leader(p) \
		(thread_group_leader(p) && !thread_group_empty(p))

/*
2267
 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2268
 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2269
 * pins the final release of task.io_context.  Also protects ->cpuset and
O
Oleg Nesterov 已提交
2270
 * ->cgroup.subsys[]. And ->vfork_done.
L
Linus Torvalds 已提交
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285
 *
 * Nests both inside and outside of read_lock(&tasklist_lock).
 * It must not be nested with write_lock_irq(&tasklist_lock),
 * neither inside nor outside.
 */
static inline void task_lock(struct task_struct *p)
{
	spin_lock(&p->alloc_lock);
}

static inline void task_unlock(struct task_struct *p)
{
	spin_unlock(&p->alloc_lock);
}

2286
extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2287 2288
							unsigned long *flags);

2289 2290 2291 2292 2293 2294 2295 2296 2297
static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
						       unsigned long *flags)
{
	struct sighand_struct *ret;

	ret = __lock_task_sighand(tsk, flags);
	(void)__cond_lock(&tsk->sighand->siglock, ret);
	return ret;
}
2298

2299 2300 2301 2302 2303 2304
static inline void unlock_task_sighand(struct task_struct *tsk,
						unsigned long *flags)
{
	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}

2305
#ifdef CONFIG_CGROUPS
2306
static inline void threadgroup_change_begin(struct task_struct *tsk)
2307
{
2308
	down_read(&tsk->signal->group_rwsem);
2309
}
2310
static inline void threadgroup_change_end(struct task_struct *tsk)
2311
{
2312
	up_read(&tsk->signal->group_rwsem);
2313
}
2314 2315 2316 2317 2318 2319 2320

/**
 * threadgroup_lock - lock threadgroup
 * @tsk: member task of the threadgroup to lock
 *
 * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2321 2322
 * change ->group_leader/pid.  This is useful for cases where the threadgroup
 * needs to stay stable across blockable operations.
2323 2324 2325 2326 2327
 *
 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
 * synchronization.  While held, no new task will be added to threadgroup
 * and no existing live task will have its PF_EXITING set.
 *
2328 2329
 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
 * sub-thread becomes a new leader.
2330
 */
2331
static inline void threadgroup_lock(struct task_struct *tsk)
2332
{
2333
	down_write(&tsk->signal->group_rwsem);
2334
}
2335 2336 2337 2338 2339 2340 2341

/**
 * threadgroup_unlock - unlock threadgroup
 * @tsk: member task of the threadgroup to unlock
 *
 * Reverse threadgroup_lock().
 */
2342
static inline void threadgroup_unlock(struct task_struct *tsk)
2343
{
2344
	up_write(&tsk->signal->group_rwsem);
2345 2346
}
#else
2347 2348 2349 2350
static inline void threadgroup_change_begin(struct task_struct *tsk) {}
static inline void threadgroup_change_end(struct task_struct *tsk) {}
static inline void threadgroup_lock(struct task_struct *tsk) {}
static inline void threadgroup_unlock(struct task_struct *tsk) {}
2351 2352
#endif

A
Al Viro 已提交
2353 2354
#ifndef __HAVE_THREAD_FUNCTIONS

R
Roman Zippel 已提交
2355 2356
#define task_thread_info(task)	((struct thread_info *)(task)->stack)
#define task_stack_page(task)	((task)->stack)
A
Al Viro 已提交
2357

2358 2359 2360 2361 2362 2363 2364 2365
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
	*task_thread_info(p) = *task_thread_info(org);
	task_thread_info(p)->task = p;
}

static inline unsigned long *end_of_stack(struct task_struct *p)
{
R
Roman Zippel 已提交
2366
	return (unsigned long *)(task_thread_info(p) + 1);
2367 2368
}

A
Al Viro 已提交
2369 2370
#endif

2371 2372 2373 2374 2375 2376 2377
static inline int object_is_on_stack(void *obj)
{
	void *stack = task_stack_page(current);

	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}

2378 2379
extern void thread_info_cache_init(void);

2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
{
	unsigned long *n = end_of_stack(p);

	do { 	/* Skip over canary */
		n++;
	} while (!*n);

	return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif

L
Linus Torvalds 已提交
2393 2394 2395 2396 2397
/* set thread flags in other task's structures
 * - see asm/thread_info.h for TIF_xxxx flags available
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2398
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2399 2400 2401 2402
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2403
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2404 2405 2406 2407
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2408
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2409 2410 2411 2412
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2413
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2414 2415 2416 2417
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2418
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

2431 2432 2433 2434 2435
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

2436 2437 2438 2439 2440 2441
static inline int restart_syscall(void)
{
	set_tsk_thread_flag(current, TIF_SIGPENDING);
	return -ERESTARTNOINTR;
}

L
Linus Torvalds 已提交
2442 2443 2444 2445
static inline int signal_pending(struct task_struct *p)
{
	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
M
Matthew Wilcox 已提交
2446

2447 2448 2449 2450
static inline int __fatal_signal_pending(struct task_struct *p)
{
	return unlikely(sigismember(&p->pending.signal, SIGKILL));
}
M
Matthew Wilcox 已提交
2451 2452 2453 2454 2455 2456

static inline int fatal_signal_pending(struct task_struct *p)
{
	return signal_pending(p) && __fatal_signal_pending(p);
}

2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
static inline int signal_pending_state(long state, struct task_struct *p)
{
	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
		return 0;
	if (!signal_pending(p))
		return 0;

	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}

L
Linus Torvalds 已提交
2467 2468 2469 2470 2471 2472 2473
/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
2474
extern int _cond_resched(void);
2475

2476 2477 2478 2479
#define cond_resched() ({			\
	__might_sleep(__FILE__, __LINE__, 0);	\
	_cond_resched();			\
})
2480

2481 2482
extern int __cond_resched_lock(spinlock_t *lock);

2483
#ifdef CONFIG_PREEMPT_COUNT
2484
#define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2485
#else
2486
#define PREEMPT_LOCK_OFFSET	0
2487
#endif
2488

2489
#define cond_resched_lock(lock) ({				\
2490
	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2491 2492 2493 2494 2495
	__cond_resched_lock(lock);				\
})

extern int __cond_resched_softirq(void);

2496 2497 2498
#define cond_resched_softirq() ({					\
	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
	__cond_resched_softirq();					\
2499
})
L
Linus Torvalds 已提交
2500

2501 2502 2503 2504 2505 2506 2507 2508 2509
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
	rcu_read_unlock();
	cond_resched();
	rcu_read_lock();
#endif
}

L
Linus Torvalds 已提交
2510 2511
/*
 * Does a critical section need to be broken due to another
N
Nick Piggin 已提交
2512 2513
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * but a general need for low latency)
L
Linus Torvalds 已提交
2514
 */
N
Nick Piggin 已提交
2515
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
2516
{
N
Nick Piggin 已提交
2517 2518 2519
#ifdef CONFIG_PREEMPT
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
2520
	return 0;
N
Nick Piggin 已提交
2521
#endif
L
Linus Torvalds 已提交
2522 2523
}

2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
/*
 * Idle thread specific functions to determine the need_resched
 * polling state. We have two versions, one based on TS_POLLING in
 * thread_info.status and one based on TIF_POLLING_NRFLAG in
 * thread_info.flags
 */
#ifdef TS_POLLING
static inline int tsk_is_polling(struct task_struct *p)
{
	return task_thread_info(p)->status & TS_POLLING;
}
2535
static inline void __current_set_polling(void)
2536 2537 2538 2539
{
	current_thread_info()->status |= TS_POLLING;
}

2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553
static inline bool __must_check current_set_polling_and_test(void)
{
	__current_set_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 */
	smp_mb();

	return unlikely(tif_need_resched());
}

static inline void __current_clr_polling(void)
2554 2555
{
	current_thread_info()->status &= ~TS_POLLING;
2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
}

static inline bool __must_check current_clr_polling_and_test(void)
{
	__current_clr_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 */
	smp_mb();

	return unlikely(tif_need_resched());
2569
}
2570 2571 2572 2573 2574
#elif defined(TIF_POLLING_NRFLAG)
static inline int tsk_is_polling(struct task_struct *p)
{
	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
}
2575 2576

static inline void __current_set_polling(void)
2577 2578 2579 2580
{
	set_thread_flag(TIF_POLLING_NRFLAG);
}

2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596
static inline bool __must_check current_set_polling_and_test(void)
{
	__current_set_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 *
	 * XXX: assumes set/clear bit are identical barrier wise.
	 */
	smp_mb__after_clear_bit();

	return unlikely(tif_need_resched());
}

static inline void __current_clr_polling(void)
2597 2598 2599
{
	clear_thread_flag(TIF_POLLING_NRFLAG);
}
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613

static inline bool __must_check current_clr_polling_and_test(void)
{
	__current_clr_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 */
	smp_mb__after_clear_bit();

	return unlikely(tif_need_resched());
}

2614 2615
#else
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }

static inline bool __must_check current_set_polling_and_test(void)
{
	return unlikely(tif_need_resched());
}
static inline bool __must_check current_clr_polling_and_test(void)
{
	return unlikely(tif_need_resched());
}
2627 2628
#endif

2629 2630 2631 2632 2633
static __always_inline bool need_resched(void)
{
	return unlikely(tif_need_resched());
}

2634 2635 2636
/*
 * Thread group CPU time accounting.
 */
2637
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2638
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2639

2640
static inline void thread_group_cputime_init(struct signal_struct *sig)
2641
{
2642
	raw_spin_lock_init(&sig->cputimer.lock);
2643 2644
}

R
Roland McGrath 已提交
2645 2646 2647 2648 2649 2650 2651
/*
 * Reevaluate whether the task has signals pending delivery.
 * Wake the task if so.
 * This is required every time the blocked sigset_t changes.
 * callers must hold sighand->siglock.
 */
extern void recalc_sigpending_and_wake(struct task_struct *t);
L
Linus Torvalds 已提交
2652 2653
extern void recalc_sigpending(void);

2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);

static inline void signal_wake_up(struct task_struct *t, bool resume)
{
	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
L
Linus Torvalds 已提交
2664 2665 2666 2667 2668 2669 2670 2671

/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
A
Al Viro 已提交
2672
	return task_thread_info(p)->cpu;
L
Linus Torvalds 已提交
2673 2674
}

I
Ingo Molnar 已提交
2675 2676 2677 2678 2679
static inline int task_node(const struct task_struct *p)
{
	return cpu_to_node(task_cpu(p));
}

I
Ingo Molnar 已提交
2680
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

2695 2696
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2697

D
Dhaval Giani 已提交
2698
#ifdef CONFIG_CGROUP_SCHED
2699
extern struct task_group root_task_group;
P
Peter Zijlstra 已提交
2700
#endif /* CONFIG_CGROUP_SCHED */
2701

2702 2703 2704
extern int task_can_switch_user(struct user_struct *up,
					struct task_struct *tsk);

2705 2706 2707
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
2708
	tsk->ioac.rchar += amt;
2709 2710 2711 2712
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
2713
	tsk->ioac.wchar += amt;
2714 2715 2716 2717
}

static inline void inc_syscr(struct task_struct *tsk)
{
2718
	tsk->ioac.syscr++;
2719 2720 2721 2722
}

static inline void inc_syscw(struct task_struct *tsk)
{
2723
	tsk->ioac.syscw++;
2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void inc_syscr(struct task_struct *tsk)
{
}

static inline void inc_syscw(struct task_struct *tsk)
{
}
#endif

D
Dave Hansen 已提交
2743 2744 2745 2746
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759
#ifdef CONFIG_MM_OWNER
extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
#else
static inline void mm_update_next_owner(struct mm_struct *mm)
{
}

static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
}
#endif /* CONFIG_MM_OWNER */

2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781
static inline unsigned long task_rlimit(const struct task_struct *tsk,
		unsigned int limit)
{
	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
}

static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
		unsigned int limit)
{
	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
}

static inline unsigned long rlimit(unsigned int limit)
{
	return task_rlimit(current, limit);
}

static inline unsigned long rlimit_max(unsigned int limit)
{
	return task_rlimit_max(current, limit);
}

L
Linus Torvalds 已提交
2782
#endif