sched.h 60.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * cloning flags:
 */
#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
#define CLONE_VM	0x00000100	/* set if VM shared between processes */
#define CLONE_FS	0x00000200	/* set if fs info shared between processes */
#define CLONE_FILES	0x00000400	/* set if open files shared between processes */
#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
#define CLONE_THREAD	0x00010000	/* Same thread group? */
#define CLONE_NEWNS	0x00020000	/* New namespace group? */
#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
#define CLONE_DETACHED		0x00400000	/* Unused, ignored */
#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
#define CLONE_STOPPED		0x02000000	/* Start in stopped state */
25
#define CLONE_NEWUTS		0x04000000	/* New utsname group? */
K
Kirill Korotaev 已提交
26
#define CLONE_NEWIPC		0x08000000	/* New ipcs */
S
Serge E. Hallyn 已提交
27
#define CLONE_NEWUSER		0x10000000	/* New user namespace */
28
#define CLONE_NEWPID		0x20000000	/* New pid namespace */
29
#define CLONE_NEWNET		0x40000000	/* New network namespace */
30
#define CLONE_IO		0x80000000	/* Clone io context */
31 32 33 34 35 36 37 38

/*
 * Scheduling policies
 */
#define SCHED_NORMAL		0
#define SCHED_FIFO		1
#define SCHED_RR		2
#define SCHED_BATCH		3
I
Ingo Molnar 已提交
39 40
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE		5
41

42
#ifdef __KERNEL__
43 44 45 46 47

struct sched_param {
	int sched_priority;
};

L
Linus Torvalds 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60
#include <asm/param.h>	/* for HZ */

#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
61
#include <linux/mm_types.h>
L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

#include <asm/system.h>
#include <asm/semaphore.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>

#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
#include <linux/securebits.h>
#include <linux/fs_struct.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
P
Peter Zijlstra 已提交
79
#include <linux/proportions.h>
L
Linus Torvalds 已提交
80
#include <linux/seccomp.h>
I
Ingo Molnar 已提交
81
#include <linux/rcupdate.h>
I
Ingo Molnar 已提交
82
#include <linux/rtmutex.h>
L
Linus Torvalds 已提交
83

84 85 86 87 88
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
89
#include <linux/task_io_accounting.h>
90
#include <linux/kobject.h>
A
Arjan van de Ven 已提交
91
#include <linux/latencytop.h>
92 93

#include <asm/processor.h>
H
H. J. Lu 已提交
94

L
Linus Torvalds 已提交
95
struct exec_domain;
96
struct futex_pi_state;
97
struct robust_list_head;
98
struct bio;
L
Linus Torvalds 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119

/*
 * List of flags we want to share for kernel threads,
 * if only because they are not used by them anyway.
 */
#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)

/*
 * These are the constant used to fake the fixed-point load-average
 * counting. Some notes:
 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 *    a load-average precision of 10 bits integer + 11 bits fractional
 *  - if you want to count load-averages more often, you need more
 *    precision, or rounding will get you. With 2-second counting freq,
 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 *    11 bit fractions.
 */
extern unsigned long avenrun[];		/* Load averages */

#define FSHIFT		11		/* nr of bits of precision */
#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
120
#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
L
Linus Torvalds 已提交
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
#define EXP_5		2014		/* 1/exp(5sec/5min) */
#define EXP_15		2037		/* 1/exp(5sec/15min) */

#define CALC_LOAD(load,exp,n) \
	load *= exp; \
	load += n*(FIXED_1-exp); \
	load >>= FSHIFT;

extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
136
extern unsigned long nr_active(void);
L
Linus Torvalds 已提交
137
extern unsigned long nr_iowait(void);
138
extern unsigned long weighted_cpuload(const int cpu);
L
Linus Torvalds 已提交
139

I
Ingo Molnar 已提交
140 141
struct seq_file;
struct cfs_rq;
142
struct task_group;
I
Ingo Molnar 已提交
143 144 145 146
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
147
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
I
Ingo Molnar 已提交
148 149 150 151 152 153 154 155 156
#else
static inline void
proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
}
static inline void proc_sched_set_task(struct task_struct *p)
{
}
static inline void
157
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
I
Ingo Molnar 已提交
158 159 160
{
}
#endif
L
Linus Torvalds 已提交
161

162 163 164 165 166 167 168 169 170 171
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
L
Linus Torvalds 已提交
172 173 174
#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
175 176 177 178 179 180
#define TASK_STOPPED		4
#define TASK_TRACED		8
/* in tsk->exit_state */
#define EXIT_ZOMBIE		16
#define EXIT_DEAD		32
/* in tsk->state again */
181
#define TASK_DEAD		64
L
Linus Torvalds 已提交
182 183 184 185 186 187

#define __set_task_state(tsk, state_value)		\
	do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value)		\
	set_mb((tsk)->state, (state_value))

188 189 190 191 192 193 194 195 196 197 198
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
 *	set_current_state(TASK_UNINTERRUPTIBLE);
 *	if (do_i_need_to_sleep())
 *		schedule();
 *
 * If the caller does not need such serialisation then use __set_current_state()
 */
L
Linus Torvalds 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
#define __set_current_state(state_value)			\
	do { current->state = (state_value); } while (0)
#define set_current_state(state_value)		\
	set_mb(current->state, (state_value))

/* Task command name length */
#define TASK_COMM_LEN 16

#include <linux/spinlock.h>

/*
 * This serializes "schedule()" and also protects
 * the run-queue from deletions/modifications (but
 * _adding_ to the beginning of the run-queue has
 * a separate lock).
 */
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;

218
struct task_struct;
L
Linus Torvalds 已提交
219 220 221

extern void sched_init(void);
extern void sched_init_smp(void);
222
extern void init_idle(struct task_struct *idle, int cpu);
I
Ingo Molnar 已提交
223
extern void init_idle_bootup_task(struct task_struct *idle);
L
Linus Torvalds 已提交
224 225

extern cpumask_t nohz_cpu_mask;
226 227 228 229 230 231 232 233
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
#else
static inline int select_nohz_load_balancer(int cpu)
{
	return 0;
}
#endif
L
Linus Torvalds 已提交
234

P
Peter Zijlstra 已提交
235 236
extern unsigned long rt_needs_cpu(int cpu);

I
Ingo Molnar 已提交
237
/*
I
Ingo Molnar 已提交
238
 * Only dump TASK_* tasks. (0 for all tasks)
I
Ingo Molnar 已提交
239 240 241 242 243
 */
extern void show_state_filter(unsigned long state_filter);

static inline void show_state(void)
{
I
Ingo Molnar 已提交
244
	show_state_filter(0);
I
Ingo Molnar 已提交
245 246
}

L
Linus Torvalds 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259 260
extern void show_regs(struct pt_regs *);

/*
 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 * task), SP is the stack pointer of the first frame that should be shown in the back
 * trace (or NULL if the entire call-chain of the task should be shown).
 */
extern void show_stack(struct task_struct *task, unsigned long *sp);

void io_schedule(void);
long io_schedule_timeout(long timeout);

extern void cpu_init (void);
extern void trap_init(void);
261
extern void account_process_tick(struct task_struct *task, int user);
L
Linus Torvalds 已提交
262 263
extern void update_process_times(int user);
extern void scheduler_tick(void);
P
Peter Zijlstra 已提交
264
extern void hrtick_resched(void);
L
Linus Torvalds 已提交
265

266 267
extern void sched_show_task(struct task_struct *p);

I
Ingo Molnar 已提交
268
#ifdef CONFIG_DETECT_SOFTLOCKUP
269
extern void softlockup_tick(void);
I
Ingo Molnar 已提交
270 271
extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
272
extern void touch_all_softlockup_watchdogs(void);
I
Ingo Molnar 已提交
273
extern unsigned long  softlockup_thresh;
274 275
extern unsigned long sysctl_hung_task_check_count;
extern unsigned long sysctl_hung_task_timeout_secs;
I
Ingo Molnar 已提交
276
extern unsigned long sysctl_hung_task_warnings;
I
Ingo Molnar 已提交
277
#else
278
static inline void softlockup_tick(void)
I
Ingo Molnar 已提交
279 280 281 282 283 284 285 286
{
}
static inline void spawn_softlockup_task(void)
{
}
static inline void touch_softlockup_watchdog(void)
{
}
287 288 289
static inline void touch_all_softlockup_watchdogs(void)
{
}
I
Ingo Molnar 已提交
290 291 292
#endif


L
Linus Torvalds 已提交
293 294
/* Attach to any functions which should be ignored in wchan output. */
#define __sched		__attribute__((__section__(".sched.text")))
295 296 297 298

/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];

L
Linus Torvalds 已提交
299 300 301 302 303
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);

#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
extern signed long FASTCALL(schedule_timeout(signed long timeout));
304 305
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
L
Linus Torvalds 已提交
306 307
asmlinkage void schedule(void);

S
Serge E. Hallyn 已提交
308
struct nsproxy;
309
struct user_namespace;
L
Linus Torvalds 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324

/* Maximum number of active map areas.. This is a random (large) number */
#define DEFAULT_MAX_MAP_COUNT	65536

extern int sysctl_max_map_count;

#include <linux/aio.h>

extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
		       unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
			  unsigned long len, unsigned long pgoff,
			  unsigned long flags);
325 326
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
L
Linus Torvalds 已提交
327

328 329 330 331 332
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
/*
 * The mm counters are not protected by its page_table_lock,
 * so must be incremented atomically.
 */
333 334 335 336 337
#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
338 339 340 341 342 343

#else  /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
/*
 * The mm counters are protected by its page_table_lock,
 * so can be incremented directly.
 */
L
Linus Torvalds 已提交
344 345 346 347 348
#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
#define get_mm_counter(mm, member) ((mm)->_##member)
#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
#define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member--
349 350

#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
351

352 353
#define get_mm_rss(mm)					\
	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
354 355 356 357 358 359 360 361 362 363
#define update_hiwater_rss(mm)	do {			\
	unsigned long _rss = get_mm_rss(mm);		\
	if ((mm)->hiwater_rss < _rss)			\
		(mm)->hiwater_rss = _rss;		\
} while (0)
#define update_hiwater_vm(mm)	do {			\
	if ((mm)->hiwater_vm < (mm)->total_vm)		\
		(mm)->hiwater_vm = (mm)->total_vm;	\
} while (0)

364 365 366 367
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);

/* mm flags */
368
/* dumpable bits */
369 370
#define MMF_DUMPABLE      0  /* core dump is permitted */
#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
371 372 373 374 375 376 377
#define MMF_DUMPABLE_BITS 2

/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE	2
#define MMF_DUMP_ANON_SHARED	3
#define MMF_DUMP_MAPPED_PRIVATE	4
#define MMF_DUMP_MAPPED_SHARED	5
R
Roland McGrath 已提交
378
#define MMF_DUMP_ELF_HEADERS	6
379
#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
R
Roland McGrath 已提交
380
#define MMF_DUMP_FILTER_BITS	5
381 382 383 384
#define MMF_DUMP_FILTER_MASK \
	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED))
385

L
Linus Torvalds 已提交
386 387 388 389
struct sighand_struct {
	atomic_t		count;
	struct k_sigaction	action[_NSIG];
	spinlock_t		siglock;
D
Davide Libenzi 已提交
390
	wait_queue_head_t	signalfd_wqh;
L
Linus Torvalds 已提交
391 392
};

393
struct pacct_struct {
394 395
	int			ac_flag;
	long			ac_exitcode;
396
	unsigned long		ac_mem;
397 398
	cputime_t		ac_utime, ac_stime;
	unsigned long		ac_minflt, ac_majflt;
399 400
};

L
Linus Torvalds 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413 414
/*
 * NOTE! "signal_struct" does not have it's own
 * locking, because a shared signal_struct always
 * implies a shared sighand_struct, so locking
 * sighand_struct is always a proper superset of
 * the locking of signal_struct.
 */
struct signal_struct {
	atomic_t		count;
	atomic_t		live;

	wait_queue_head_t	wait_chldexit;	/* for wait4() */

	/* current thread group signal load-balancing target: */
415
	struct task_struct	*curr_target;
L
Linus Torvalds 已提交
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437

	/* shared signal handling: */
	struct sigpending	shared_pending;

	/* thread group exit support */
	int			group_exit_code;
	/* overloaded:
	 * - notify group_exit_task when ->count is equal to notify_count
	 * - everyone except group_exit_task is stopped during signal delivery
	 *   of fatal signals, group_exit_task processes the signal.
	 */
	struct task_struct	*group_exit_task;
	int			notify_count;

	/* thread group stop support, overloads group_exit_code too */
	int			group_stop_count;
	unsigned int		flags; /* see SIGNAL_* flags below */

	/* POSIX.1b Interval Timers */
	struct list_head posix_timers;

	/* ITIMER_REAL timer for the process */
438
	struct hrtimer real_timer;
439
	struct task_struct *tsk;
440
	ktime_t it_real_incr;
L
Linus Torvalds 已提交
441 442 443 444 445 446

	/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
	cputime_t it_prof_expires, it_virt_expires;
	cputime_t it_prof_incr, it_virt_incr;

	/* job control IDs */
447 448 449 450 451 452 453 454 455 456 457

	/*
	 * pgrp and session fields are deprecated.
	 * use the task_session_Xnr and task_pgrp_Xnr routines below
	 */

	union {
		pid_t pgrp __deprecated;
		pid_t __pgrp;
	};

458
	struct pid *tty_old_pgrp;
459 460 461 462 463 464

	union {
		pid_t session __deprecated;
		pid_t __session;
	};

L
Linus Torvalds 已提交
465 466 467 468 469 470 471 472 473 474 475 476
	/* boolean value for session group leader */
	int leader;

	struct tty_struct *tty; /* NULL if no tty */

	/*
	 * Cumulative resource counters for dead threads in the group,
	 * and for reaped dead child processes forked by this group.
	 * Live threads maintain their own counters and add to these
	 * in __exit_signal, except for the group leader.
	 */
	cputime_t utime, stime, cutime, cstime;
477 478
	cputime_t gtime;
	cputime_t cgtime;
L
Linus Torvalds 已提交
479 480
	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
481
	unsigned long inblock, oublock, cinblock, coublock;
L
Linus Torvalds 已提交
482 483 484 485 486 487 488

	/*
	 * Cumulative ns of scheduled CPU time for dead threads in the
	 * group, not including a zombie group leader.  (This only differs
	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
	 * other than jiffies.)
	 */
489
	unsigned long long sum_sched_runtime;
L
Linus Torvalds 已提交
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509

	/*
	 * We don't bother to synchronize most readers of this at all,
	 * because there is no reader checking a limit that actually needs
	 * to get both rlim_cur and rlim_max atomically, and either one
	 * alone is a single word that can safely be read normally.
	 * getrlimit/setrlimit use task_lock(current->group_leader) to
	 * protect this instead of the siglock, because they really
	 * have no need to disable irqs.
	 */
	struct rlimit rlim[RLIM_NLIMITS];

	struct list_head cpu_timers[3];

	/* keep the process-shared keyrings here so that they do the right
	 * thing in threads created with CLONE_THREAD */
#ifdef CONFIG_KEYS
	struct key *session_keyring;	/* keyring inherited over fork */
	struct key *process_keyring;	/* keyring private to this process */
#endif
510 511 512
#ifdef CONFIG_BSD_PROCESS_ACCT
	struct pacct_struct pacct;	/* per-process accounting information */
#endif
513 514 515
#ifdef CONFIG_TASKSTATS
	struct taskstats *stats;
#endif
M
Miloslav Trmac 已提交
516 517 518 519
#ifdef CONFIG_AUDIT
	unsigned audit_tty;
	struct tty_audit_buf *tty_audit_buf;
#endif
L
Linus Torvalds 已提交
520 521
};

522 523 524 525 526
/* Context switch must be unlocked if interrupts are to be enabled */
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
# define __ARCH_WANT_UNLOCKED_CTXSW
#endif

L
Linus Torvalds 已提交
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
/*
 * Bits in flags field of signal_struct.
 */
#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */
#define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */

/*
 * Some day this will be a full-fledged user tracking system..
 */
struct user_struct {
	atomic_t __count;	/* reference count */
	atomic_t processes;	/* How many processes does this user have? */
	atomic_t files;		/* How many open files does this user have? */
	atomic_t sigpending;	/* How many pending signals does this user have? */
543
#ifdef CONFIG_INOTIFY_USER
R
Robert Love 已提交
544 545 546
	atomic_t inotify_watches; /* How many inotify watches does this user have? */
	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
#endif
A
Alexey Dobriyan 已提交
547
#ifdef CONFIG_POSIX_MQUEUE
L
Linus Torvalds 已提交
548 549
	/* protected by mq_lock	*/
	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
A
Alexey Dobriyan 已提交
550
#endif
L
Linus Torvalds 已提交
551 552 553 554 555 556 557 558
	unsigned long locked_shm; /* How many pages of mlocked shm ? */

#ifdef CONFIG_KEYS
	struct key *uid_keyring;	/* UID specific keyring */
	struct key *session_keyring;	/* UID's default session keyring */
#endif

	/* Hash table maintenance information */
P
Pavel Emelyanov 已提交
559
	struct hlist_node uidhash_node;
L
Linus Torvalds 已提交
560
	uid_t uid;
561 562

#ifdef CONFIG_FAIR_USER_SCHED
563
	struct task_group *tg;
D
Dhaval Giani 已提交
564
#ifdef CONFIG_SYSFS
565
	struct kobject kobj;
566
	struct work_struct work;
567
#endif
D
Dhaval Giani 已提交
568
#endif
L
Linus Torvalds 已提交
569 570
};

571
extern int uids_sysfs_init(void);
572

L
Linus Torvalds 已提交
573 574 575 576 577 578 579 580
extern struct user_struct *find_user(uid_t);

extern struct user_struct root_user;
#define INIT_USER (&root_user)

struct backing_dev_info;
struct reclaim_state;

581
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
582 583
struct sched_info {
	/* cumulative counters */
584
	unsigned long pcount;	      /* # of times run on this cpu */
585 586
	unsigned long long cpu_time,  /* time spent on the cpu */
			   run_delay; /* time spent waiting on a runqueue */
L
Linus Torvalds 已提交
587 588

	/* timestamps */
589 590
	unsigned long long last_arrival,/* when we last ran on a cpu */
			   last_queued;	/* when we were last queued to run */
I
Ingo Molnar 已提交
591 592
#ifdef CONFIG_SCHEDSTATS
	/* BKL stats */
593
	unsigned int bkl_count;
I
Ingo Molnar 已提交
594
#endif
L
Linus Torvalds 已提交
595
};
596
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
L
Linus Torvalds 已提交
597

598
#ifdef CONFIG_SCHEDSTATS
599
extern const struct file_operations proc_schedstat_operations;
600
#endif /* CONFIG_SCHEDSTATS */
L
Linus Torvalds 已提交
601

602 603 604 605 606 607 608 609 610 611 612 613 614 615
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
	spinlock_t	lock;
	unsigned int	flags;	/* Private per-task flags */

	/* For each stat XXX, add following, aligned appropriately
	 *
	 * struct timespec XXX_start, XXX_end;
	 * u64 XXX_delay;
	 * u32 XXX_count;
	 *
	 * Atomicity of updates to XXX_delay, XXX_count protected by
	 * single lock above (split into XXX_lock if contention is an issue).
	 */
616 617 618 619 620 621 622 623 624 625 626 627 628

	/*
	 * XXX_count is incremented on every XXX operation, the delay
	 * associated with the operation is added to XXX_delay.
	 * XXX_delay contains the accumulated delay time in nanoseconds.
	 */
	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
	u64 blkio_delay;	/* wait for sync block io completion */
	u64 swapin_delay;	/* wait for swapin block io completion */
	u32 blkio_count;	/* total count of the number of sync block */
				/* io operations performed */
	u32 swapin_count;	/* total count of the number of swapin block */
				/* io operations performed */
629
};
630 631 632 633 634 635 636 637 638 639 640
#endif	/* CONFIG_TASK_DELAY_ACCT */

static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
	return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
	extern int delayacct_on;
	return delayacct_on;
#else
	return 0;
641
#endif
642
}
643

I
Ingo Molnar 已提交
644 645 646 647 648
enum cpu_idle_type {
	CPU_IDLE,
	CPU_NOT_IDLE,
	CPU_NEWLY_IDLE,
	CPU_MAX_IDLE_TYPES
L
Linus Torvalds 已提交
649 650 651 652 653
};

/*
 * sched-domains (multiprocessor balancing) declarations:
 */
654 655 656 657 658 659 660

/*
 * Increase resolution of nice-level calculations:
 */
#define SCHED_LOAD_SHIFT	10
#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)

661
#define SCHED_LOAD_SCALE_FUZZ	SCHED_LOAD_SCALE
L
Linus Torvalds 已提交
662

663
#ifdef CONFIG_SMP
L
Linus Torvalds 已提交
664 665 666
#define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE	2	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		4	/* Balance on exec */
N
Nick Piggin 已提交
667 668 669 670 671
#define SD_BALANCE_FORK		8	/* Balance on fork, clone */
#define SD_WAKE_IDLE		16	/* Wake to idle CPU on task wakeup */
#define SD_WAKE_AFFINE		32	/* Wake task to waking CPU */
#define SD_WAKE_BALANCE		64	/* Perform balancing at task wakeup */
#define SD_SHARE_CPUPOWER	128	/* Domain members share cpu power */
672
#define SD_POWERSAVINGS_BALANCE	256	/* Balance for power savings */
673
#define SD_SHARE_PKG_RESOURCES	512	/* Domain members share cpu pkg resources */
674
#define SD_SERIALIZE		1024	/* Only a single load balancing instance */
675

676 677 678 679 680 681 682 683 684
#define BALANCE_FOR_MC_POWER	\
	(sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)

#define BALANCE_FOR_PKG_POWER	\
	((sched_mc_power_savings || sched_smt_power_savings) ?	\
	 SD_POWERSAVINGS_BALANCE : 0)

#define test_sd_parent(sd, flag)	((sd->parent &&		\
					 (sd->parent->flags & flag)) ? 1 : 0)
685

L
Linus Torvalds 已提交
686 687 688 689 690 691 692 693

struct sched_group {
	struct sched_group *next;	/* Must be a circular list */
	cpumask_t cpumask;

	/*
	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
	 * single CPU. This is read only (except for setup, hotplug CPU).
694
	 * Note : Never change cpu_power without recompute its reciprocal
L
Linus Torvalds 已提交
695
	 */
696 697 698 699 700 701
	unsigned int __cpu_power;
	/*
	 * reciprocal value of cpu_power to avoid expensive divides
	 * (see include/linux/reciprocal_div.h)
	 */
	u32 reciprocal_cpu_power;
L
Linus Torvalds 已提交
702 703 704 705 706
};

struct sched_domain {
	/* These fields must be setup */
	struct sched_domain *parent;	/* top domain must be null terminated */
707
	struct sched_domain *child;	/* bottom domain must be null terminated */
L
Linus Torvalds 已提交
708 709 710 711 712 713 714
	struct sched_group *groups;	/* the balancing groups of the domain */
	cpumask_t span;			/* span of all CPUs in this domain */
	unsigned long min_interval;	/* Minimum balance interval ms */
	unsigned long max_interval;	/* Maximum balance interval ms */
	unsigned int busy_factor;	/* less balancing by factor if busy */
	unsigned int imbalance_pct;	/* No balance until over watermark */
	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
N
Nick Piggin 已提交
715 716 717 718
	unsigned int busy_idx;
	unsigned int idle_idx;
	unsigned int newidle_idx;
	unsigned int wake_idx;
N
Nick Piggin 已提交
719
	unsigned int forkexec_idx;
L
Linus Torvalds 已提交
720 721 722 723 724 725 726 727 728
	int flags;			/* See SD_* */

	/* Runtime fields. */
	unsigned long last_balance;	/* init to jiffies. units in jiffies */
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */

#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
729 730 731 732 733 734 735 736
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
L
Linus Torvalds 已提交
737 738

	/* Active load balancing */
739 740 741
	unsigned int alb_count;
	unsigned int alb_failed;
	unsigned int alb_pushed;
L
Linus Torvalds 已提交
742

743
	/* SD_BALANCE_EXEC stats */
744 745 746
	unsigned int sbe_count;
	unsigned int sbe_balanced;
	unsigned int sbe_pushed;
L
Linus Torvalds 已提交
747

748
	/* SD_BALANCE_FORK stats */
749 750 751
	unsigned int sbf_count;
	unsigned int sbf_balanced;
	unsigned int sbf_pushed;
752

L
Linus Torvalds 已提交
753
	/* try_to_wake_up() stats */
754 755 756
	unsigned int ttwu_wake_remote;
	unsigned int ttwu_move_affine;
	unsigned int ttwu_move_balance;
L
Linus Torvalds 已提交
757 758 759
#endif
};

P
Paul Jackson 已提交
760 761
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);

762
#endif	/* CONFIG_SMP */
L
Linus Torvalds 已提交
763

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
/*
 * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
 * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
 * task of nice 0 or enough lower priority tasks to bring up the
 * weighted_cpuload
 */
static inline int above_background_load(void)
{
	unsigned long cpu;

	for_each_online_cpu(cpu) {
		if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
			return 1;
	}
	return 0;
}
L
Linus Torvalds 已提交
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806

struct io_context;			/* See blkdev.h */
#define NGROUPS_SMALL		32
#define NGROUPS_PER_BLOCK	((int)(PAGE_SIZE / sizeof(gid_t)))
struct group_info {
	int ngroups;
	atomic_t usage;
	gid_t small_block[NGROUPS_SMALL];
	int nblocks;
	gid_t *blocks[0];
};

/*
 * get_group_info() must be called with the owning task locked (via task_lock())
 * when task != current.  The reason being that the vast majority of callers are
 * looking at current->group_info, which can not be changed except by the
 * current task.  Changing current->group_info requires the task lock, too.
 */
#define get_group_info(group_info) do { \
	atomic_inc(&(group_info)->usage); \
} while (0)

#define put_group_info(group_info) do { \
	if (atomic_dec_and_test(&(group_info)->usage)) \
		groups_free(group_info); \
} while (0)

807 808 809 810
extern struct group_info *groups_alloc(int gidsetsize);
extern void groups_free(struct group_info *group_info);
extern int set_current_groups(struct group_info *group_info);
extern int groups_search(struct group_info *group_info, gid_t grp);
L
Linus Torvalds 已提交
811 812 813 814
/* access the groups "array" with this macro */
#define GROUP_AT(gi, i) \
    ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])

815
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
816
extern void prefetch_stack(struct task_struct *t);
817 818 819
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
L
Linus Torvalds 已提交
820 821 822

struct audit_context;		/* See audit.c */
struct mempolicy;
823
struct pipe_inode_info;
824
struct uts_namespace;
L
Linus Torvalds 已提交
825

I
Ingo Molnar 已提交
826 827 828 829
struct rq;
struct sched_domain;

struct sched_class {
830
	const struct sched_class *next;
I
Ingo Molnar 已提交
831

832
	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
833
	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
834
	void (*yield_task) (struct rq *rq);
835
	int  (*select_task_rq)(struct task_struct *p, int sync);
I
Ingo Molnar 已提交
836 837 838

	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);

839
	struct task_struct * (*pick_next_task) (struct rq *rq);
840
	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
I
Ingo Molnar 已提交
841

842
#ifdef CONFIG_SMP
P
Peter Williams 已提交
843
	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
844
			struct rq *busiest, unsigned long max_load_move,
I
Ingo Molnar 已提交
845
			struct sched_domain *sd, enum cpu_idle_type idle,
846
			int *all_pinned, int *this_best_prio);
I
Ingo Molnar 已提交
847

848 849 850
	int (*move_one_task) (struct rq *this_rq, int this_cpu,
			      struct rq *busiest, struct sched_domain *sd,
			      enum cpu_idle_type idle);
851 852 853
	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
	void (*post_schedule) (struct rq *this_rq);
	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
854
#endif
855

856
	void (*set_curr_task) (struct rq *rq);
P
Peter Zijlstra 已提交
857
	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
858
	void (*task_new) (struct rq *rq, struct task_struct *p);
859
	void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask);
G
Gregory Haskins 已提交
860 861 862

	void (*join_domain)(struct rq *rq);
	void (*leave_domain)(struct rq *rq);
863 864 865 866 867 868 869

	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
			       int running);
	void (*switched_to) (struct rq *this_rq, struct task_struct *task,
			     int running);
	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
			     int oldprio, int running);
I
Ingo Molnar 已提交
870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
};

struct load_weight {
	unsigned long weight, inv_weight;
};

/*
 * CFS stats for a schedulable entity (task, task-group etc)
 *
 * Current field usage histogram:
 *
 *     4 se->block_start
 *     4 se->run_node
 *     4 se->sleep_start
 *     6 se->load.weight
 */
struct sched_entity {
	struct load_weight	load;		/* for load-balancing */
	struct rb_node		run_node;
	unsigned int		on_rq;

891 892
	u64			exec_start;
	u64			sum_exec_runtime;
I
Ingo Molnar 已提交
893
	u64			vruntime;
894
	u64			prev_sum_exec_runtime;
895 896

#ifdef CONFIG_SCHEDSTATS
I
Ingo Molnar 已提交
897
	u64			wait_start;
898
	u64			wait_max;
899 900
	u64			wait_count;
	u64			wait_sum;
901

I
Ingo Molnar 已提交
902 903
	u64			sleep_start;
	u64			sleep_max;
904 905 906
	s64			sum_sleep_runtime;

	u64			block_start;
I
Ingo Molnar 已提交
907 908
	u64			block_max;
	u64			exec_max;
I
Ingo Molnar 已提交
909
	u64			slice_max;
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927

	u64			nr_migrations;
	u64			nr_migrations_cold;
	u64			nr_failed_migrations_affine;
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_hot;
	u64			nr_forced_migrations;
	u64			nr_forced2_migrations;

	u64			nr_wakeups;
	u64			nr_wakeups_sync;
	u64			nr_wakeups_migrate;
	u64			nr_wakeups_local;
	u64			nr_wakeups_remote;
	u64			nr_wakeups_affine;
	u64			nr_wakeups_affine_attempts;
	u64			nr_wakeups_passive;
	u64			nr_wakeups_idle;
928 929
#endif

I
Ingo Molnar 已提交
930 931 932 933 934 935 936 937
#ifdef CONFIG_FAIR_GROUP_SCHED
	struct sched_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct cfs_rq		*cfs_rq;
	/* rq "owned" by this entity/group: */
	struct cfs_rq		*my_q;
#endif
};
938

P
Peter Zijlstra 已提交
939 940 941
struct sched_rt_entity {
	struct list_head run_list;
	unsigned int time_slice;
942
	unsigned long timeout;
P
Peter Zijlstra 已提交
943 944 945 946 947 948 949 950 951
	int nr_cpus_allowed;

#ifdef CONFIG_FAIR_GROUP_SCHED
	struct sched_rt_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct rt_rq		*rt_rq;
	/* rq "owned" by this entity/group: */
	struct rt_rq		*my_q;
#endif
P
Peter Zijlstra 已提交
952 953
};

L
Linus Torvalds 已提交
954 955
struct task_struct {
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
R
Roman Zippel 已提交
956
	void *stack;
L
Linus Torvalds 已提交
957
	atomic_t usage;
958 959
	unsigned int flags;	/* per process flags, defined below */
	unsigned int ptrace;
L
Linus Torvalds 已提交
960

961
	int lock_depth;		/* BKL lock depth */
L
Linus Torvalds 已提交
962

963 964
#ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
965 966
	int oncpu;
#endif
967
#endif
968

969
	int prio, static_prio, normal_prio;
970
	const struct sched_class *sched_class;
I
Ingo Molnar 已提交
971
	struct sched_entity se;
P
Peter Zijlstra 已提交
972
	struct sched_rt_entity rt;
L
Linus Torvalds 已提交
973

974 975 976 977 978
#ifdef CONFIG_PREEMPT_NOTIFIERS
	/* list of struct preempt_notifier: */
	struct hlist_head preempt_notifiers;
#endif

979 980 981 982 983 984 985 986 987 988
	/*
	 * fpu_counter contains the number of consecutive context switches
	 * that the FPU is used. If this is over a threshold, the lazy fpu
	 * saving becomes unlazy to save the trap. This is an unsigned char
	 * so that after 256 times the counter wraps and the behavior turns
	 * lazy again; this to deal with bursty apps that only use FPU for
	 * a short time
	 */
	unsigned char fpu_counter;
	s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
989
#ifdef CONFIG_BLK_DEV_IO_TRACE
990
	unsigned int btrace_seq;
991
#endif
L
Linus Torvalds 已提交
992

993
	unsigned int policy;
L
Linus Torvalds 已提交
994 995
	cpumask_t cpus_allowed;

P
Paul E. McKenney 已提交
996 997 998 999 1000
#ifdef CONFIG_PREEMPT_RCU
	int rcu_read_lock_nesting;
	int rcu_flipctr_idx;
#endif /* #ifdef CONFIG_PREEMPT_RCU */

1001
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
	struct sched_info sched_info;
#endif

	struct list_head tasks;
	/*
	 * ptrace_list/ptrace_children forms the list of my children
	 * that were stolen by a ptracer.
	 */
	struct list_head ptrace_children;
	struct list_head ptrace_list;

	struct mm_struct *mm, *active_mm;

/* task state */
	struct linux_binfmt *binfmt;
1017
	int exit_state;
L
Linus Torvalds 已提交
1018 1019 1020
	int exit_code, exit_signal;
	int pdeath_signal;  /*  The signal sent when the parent dies  */
	/* ??? */
1021
	unsigned int personality;
L
Linus Torvalds 已提交
1022 1023 1024
	unsigned did_exec:1;
	pid_t pid;
	pid_t tgid;
1025 1026 1027 1028 1029

#ifdef CONFIG_CC_STACKPROTECTOR
	/* Canary value for the -fstack-protector gcc feature */
	unsigned long stack_canary;
#endif
L
Linus Torvalds 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
	/* 
	 * pointers to (original) parent process, youngest child, younger sibling,
	 * older sibling, respectively.  (p->father can be replaced with 
	 * p->parent->pid)
	 */
	struct task_struct *real_parent; /* real parent process (when being debugged) */
	struct task_struct *parent;	/* parent process */
	/*
	 * children/sibling forms the list of my children plus the
	 * tasks I'm ptracing.
	 */
	struct list_head children;	/* list of my children */
	struct list_head sibling;	/* linkage in my parent's children list */
	struct task_struct *group_leader;	/* threadgroup leader */

	/* PID/PID hash table linkage. */
1046
	struct pid_link pids[PIDTYPE_MAX];
O
Oleg Nesterov 已提交
1047
	struct list_head thread_group;
L
Linus Torvalds 已提交
1048 1049 1050 1051 1052

	struct completion *vfork_done;		/* for vfork() */
	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */

1053
	unsigned int rt_priority;
1054
	cputime_t utime, stime, utimescaled, stimescaled;
1055
	cputime_t gtime;
1056
	cputime_t prev_utime, prev_stime;
L
Linus Torvalds 已提交
1057
	unsigned long nvcsw, nivcsw; /* context switch counts */
1058 1059
	struct timespec start_time; 		/* monotonic time */
	struct timespec real_start_time;	/* boot based time */
L
Linus Torvalds 已提交
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
	unsigned long min_flt, maj_flt;

  	cputime_t it_prof_expires, it_virt_expires;
	unsigned long long it_sched_expires;
	struct list_head cpu_timers[3];

/* process credentials */
	uid_t uid,euid,suid,fsuid;
	gid_t gid,egid,sgid,fsgid;
	struct group_info *group_info;
	kernel_cap_t   cap_effective, cap_inheritable, cap_permitted;
	unsigned keep_capabilities:1;
	struct user_struct *user;
#ifdef CONFIG_KEYS
1075
	struct key *request_key_auth;	/* assumed request_key authority */
L
Linus Torvalds 已提交
1076
	struct key *thread_keyring;	/* keyring private to this thread */
1077
	unsigned char jit_keyring;	/* default keyring to attach requested keys to */
L
Linus Torvalds 已提交
1078
#endif
1079 1080 1081 1082
	char comm[TASK_COMM_LEN]; /* executable name excluding path
				     - access with [gs]et_task_comm (which lock
				       it with task_lock())
				     - initialized normally by flush_old_exec */
L
Linus Torvalds 已提交
1083 1084
/* file system info */
	int link_count, total_link_count;
1085
#ifdef CONFIG_SYSVIPC
L
Linus Torvalds 已提交
1086 1087
/* ipc stuff */
	struct sysv_sem sysvsem;
1088
#endif
1089 1090 1091 1092 1093
#ifdef CONFIG_DETECT_SOFTLOCKUP
/* hung task detection */
	unsigned long last_switch_timestamp;
	unsigned long last_switch_count;
#endif
L
Linus Torvalds 已提交
1094 1095 1096 1097 1098 1099
/* CPU-specific state of this task */
	struct thread_struct thread;
/* filesystem information */
	struct fs_struct *fs;
/* open file information */
	struct files_struct *files;
1100
/* namespaces */
S
Serge E. Hallyn 已提交
1101
	struct nsproxy *nsproxy;
L
Linus Torvalds 已提交
1102 1103 1104 1105 1106
/* signal handlers */
	struct signal_struct *signal;
	struct sighand_struct *sighand;

	sigset_t blocked, real_blocked;
1107
	sigset_t saved_sigmask;		/* To be restored with TIF_RESTORE_SIGMASK */
L
Linus Torvalds 已提交
1108 1109 1110 1111 1112 1113 1114
	struct sigpending pending;

	unsigned long sas_ss_sp;
	size_t sas_ss_size;
	int (*notifier)(void *priv);
	void *notifier_data;
	sigset_t *notifier_mask;
1115
#ifdef CONFIG_SECURITY
L
Linus Torvalds 已提交
1116
	void *security;
1117
#endif
L
Linus Torvalds 已提交
1118 1119 1120 1121 1122 1123 1124 1125 1126
	struct audit_context *audit_context;
	seccomp_t seccomp;

/* Thread group tracking */
   	u32 parent_exec_id;
   	u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
	spinlock_t alloc_lock;

1127 1128 1129
	/* Protection of the PI data structures: */
	spinlock_t pi_lock;

I
Ingo Molnar 已提交
1130 1131 1132 1133 1134 1135 1136
#ifdef CONFIG_RT_MUTEXES
	/* PI waiters blocked on a rt_mutex held by this task */
	struct plist_head pi_waiters;
	/* Deadlock detection and priority inheritance handling */
	struct rt_mutex_waiter *pi_blocked_on;
#endif

1137 1138 1139 1140
#ifdef CONFIG_DEBUG_MUTEXES
	/* mutex deadlock detection */
	struct mutex_waiter *blocked_on;
#endif
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
#ifdef CONFIG_TRACE_IRQFLAGS
	unsigned int irq_events;
	int hardirqs_enabled;
	unsigned long hardirq_enable_ip;
	unsigned int hardirq_enable_event;
	unsigned long hardirq_disable_ip;
	unsigned int hardirq_disable_event;
	int softirqs_enabled;
	unsigned long softirq_disable_ip;
	unsigned int softirq_disable_event;
	unsigned long softirq_enable_ip;
	unsigned int softirq_enable_event;
	int hardirq_context;
	int softirq_context;
#endif
I
Ingo Molnar 已提交
1156 1157 1158 1159 1160 1161 1162
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 30UL
	u64 curr_chain_key;
	int lockdep_depth;
	struct held_lock held_locks[MAX_LOCK_DEPTH];
	unsigned int lockdep_recursion;
#endif
1163

L
Linus Torvalds 已提交
1164 1165 1166
/* journalling filesystem info */
	void *journal_info;

1167 1168 1169
/* stacked block device info */
	struct bio *bio_list, **bio_tail;

L
Linus Torvalds 已提交
1170 1171 1172 1173 1174 1175 1176 1177 1178
/* VM state */
	struct reclaim_state *reclaim_state;

	struct backing_dev_info *backing_dev_info;

	struct io_context *io_context;

	unsigned long ptrace_message;
	siginfo_t *last_siginfo; /* For ptrace use.  */
1179
#ifdef CONFIG_TASK_XACCT
L
Linus Torvalds 已提交
1180 1181
/* i/o counters(bytes read/written, #syscalls */
	u64 rchar, wchar, syscr, syscw;
1182
#endif
1183
	struct task_io_accounting ioac;
1184
#if defined(CONFIG_TASK_XACCT)
L
Linus Torvalds 已提交
1185 1186
	u64 acct_rss_mem1;	/* accumulated rss usage */
	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
J
Jay Lan 已提交
1187
	cputime_t acct_stimexpd;/* stime since last update */
L
Linus Torvalds 已提交
1188 1189 1190 1191 1192 1193 1194 1195
#endif
#ifdef CONFIG_NUMA
  	struct mempolicy *mempolicy;
	short il_next;
#endif
#ifdef CONFIG_CPUSETS
	nodemask_t mems_allowed;
	int cpuset_mems_generation;
1196
	int cpuset_mem_spread_rotor;
L
Linus Torvalds 已提交
1197
#endif
1198
#ifdef CONFIG_CGROUPS
1199 1200 1201 1202
	/* Control Group info protected by css_set_lock */
	struct css_set *cgroups;
	/* cg_list protected by css_set_lock and tsk->alloc_lock */
	struct list_head cg_list;
1203
#endif
1204
#ifdef CONFIG_FUTEX
1205
	struct robust_list_head __user *robust_list;
1206 1207 1208
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
1209 1210
	struct list_head pi_state_list;
	struct futex_pi_state *pi_state_cache;
1211
#endif
1212
	atomic_t fs_excl;	/* holding fs exclusive resources */
I
Ingo Molnar 已提交
1213
	struct rcu_head rcu;
1214 1215 1216 1217 1218

	/*
	 * cache last used pipe for splice
	 */
	struct pipe_inode_info *splice_pipe;
1219 1220
#ifdef	CONFIG_TASK_DELAY_ACCT
	struct task_delay_info *delays;
1221 1222 1223
#endif
#ifdef CONFIG_FAULT_INJECTION
	int make_it_fail;
1224
#endif
P
Peter Zijlstra 已提交
1225
	struct prop_local_single dirties;
A
Arjan van de Ven 已提交
1226 1227 1228 1229
#ifdef CONFIG_LATENCYTOP
	int latency_record_count;
	struct latency_record latency_record[LT_SAVECOUNT];
#endif
L
Linus Torvalds 已提交
1230 1231
};

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
/*
 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
 * values are inverted: lower p->prio value means higher priority.
 *
 * The MAX_USER_RT_PRIO value allows the actual maximum
 * RT priority to be separate from the value exported to
 * user-space.  This allows kernel threads to set their
 * priority to a value higher than any user task. Note:
 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
 */

#define MAX_USER_RT_PRIO	100
#define MAX_RT_PRIO		MAX_USER_RT_PRIO

#define MAX_PRIO		(MAX_RT_PRIO + 40)
#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)

static inline int rt_prio(int prio)
{
	if (unlikely(prio < MAX_RT_PRIO))
		return 1;
	return 0;
}

A
Alexey Dobriyan 已提交
1258
static inline int rt_task(struct task_struct *p)
1259 1260 1261 1262
{
	return rt_prio(p->prio);
}

P
Pavel Emelianov 已提交
1263
static inline void set_task_session(struct task_struct *tsk, pid_t session)
1264
{
P
Pavel Emelianov 已提交
1265
	tsk->signal->__session = session;
1266 1267
}

1268 1269 1270 1271 1272
static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
{
	tsk->signal->__pgrp = pgrp;
}

A
Alexey Dobriyan 已提交
1273
static inline struct pid *task_pid(struct task_struct *task)
1274 1275 1276 1277
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1278
static inline struct pid *task_tgid(struct task_struct *task)
1279 1280 1281 1282
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1283
static inline struct pid *task_pgrp(struct task_struct *task)
1284 1285 1286 1287
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
1288
static inline struct pid *task_session(struct task_struct *task)
1289 1290 1291 1292
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
struct pid_namespace;

/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
 * task_xid_vnr()    : virtual id, i.e. the id seen from the namespace the task
 *                     belongs to. this only makes sence when called in the
 *                     context of the task that belongs to the same namespace;
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * set_task_vxid()   : assigns a virtual id to a task;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */

A
Alexey Dobriyan 已提交
1310
static inline pid_t task_pid_nr(struct task_struct *tsk)
1311 1312 1313 1314
{
	return tsk->pid;
}

1315
pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1316 1317 1318 1319 1320 1321 1322

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_pid(tsk));
}


A
Alexey Dobriyan 已提交
1323
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1324 1325 1326 1327
{
	return tsk->tgid;
}

1328
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1329 1330 1331 1332 1333 1334 1335

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_tgid(tsk));
}


A
Alexey Dobriyan 已提交
1336
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1337
{
1338
	return tsk->signal->__pgrp;
1339 1340
}

1341
pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1342 1343 1344 1345 1346 1347 1348

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_pgrp(tsk));
}


A
Alexey Dobriyan 已提交
1349
static inline pid_t task_session_nr(struct task_struct *tsk)
1350 1351 1352 1353
{
	return tsk->signal->__session;
}

1354
pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1355 1356 1357 1358 1359 1360 1361

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_session(tsk));
}


L
Linus Torvalds 已提交
1362 1363 1364 1365 1366 1367 1368 1369
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
 */
A
Alexey Dobriyan 已提交
1370
static inline int pid_alive(struct task_struct *p)
L
Linus Torvalds 已提交
1371
{
1372
	return p->pids[PIDTYPE_PID].pid != NULL;
L
Linus Torvalds 已提交
1373 1374
}

1375
/**
1376
 * is_global_init - check if a task structure is init
1377 1378 1379
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1380
 */
A
Alexey Dobriyan 已提交
1381
static inline int is_global_init(struct task_struct *tsk)
1382 1383 1384
{
	return tsk->pid == 1;
}
1385 1386 1387 1388

/*
 * is_container_init:
 * check whether in the task is init in its own pid namespace.
1389
 */
1390
extern int is_container_init(struct task_struct *tsk);
1391

1392 1393
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1394 1395
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
I
Ingo Molnar 已提交
1396

1397
extern void __put_task_struct(struct task_struct *t);
I
Ingo Molnar 已提交
1398 1399 1400 1401

static inline void put_task_struct(struct task_struct *t)
{
	if (atomic_dec_and_test(&t->usage))
1402
		__put_task_struct(t);
I
Ingo Molnar 已提交
1403
}
L
Linus Torvalds 已提交
1404 1405 1406 1407 1408 1409 1410 1411

/*
 * Per process flags
 */
#define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */
					/* Not implemented yet, only for 486*/
#define PF_STARTING	0x00000002	/* being created */
#define PF_EXITING	0x00000004	/* getting shut down */
1412
#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1413
#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
L
Linus Torvalds 已提交
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
#define PF_DUMPCORE	0x00000200	/* dumped core */
#define PF_SIGNALED	0x00000400	/* killed by a signal */
#define PF_MEMALLOC	0x00000800	/* Allocating memory */
#define PF_FLUSHER	0x00001000	/* responsible for disk writeback */
#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
#define PF_FROZEN	0x00010000	/* frozen for system suspend */
#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
#define PF_KSWAPD	0x00040000	/* I am kswapd */
#define PF_SWAPOFF	0x00080000	/* I am in swapoff */
#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
J
Jens Axboe 已提交
1427 1428 1429 1430 1431
#define PF_BORROWED_MM	0x00200000	/* I am a kthread doing use_mm */
#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1432
#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1433
#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
R
Rafael J. Wysocki 已提交
1434
#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
L
Linus Torvalds 已提交
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) \
	conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)

#ifdef CONFIG_SMP
1462
extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
L
Linus Torvalds 已提交
1463
#else
1464
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
L
Linus Torvalds 已提交
1465
{
1466
	if (!cpu_isset(0, new_mask))
L
Linus Torvalds 已提交
1467 1468 1469 1470 1471 1472
		return -EINVAL;
	return 0;
}
#endif

extern unsigned long long sched_clock(void);
1473 1474 1475 1476 1477 1478 1479

/*
 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
 * clock constructed from sched_clock():
 */
extern unsigned long long cpu_clock(int cpu);

1480
extern unsigned long long
1481
task_sched_runtime(struct task_struct *task);
L
Linus Torvalds 已提交
1482 1483 1484 1485 1486 1487 1488 1489

/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec()   {}
#endif

1490 1491
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1492

L
Linus Torvalds 已提交
1493 1494 1495 1496 1497 1498 1499
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
#endif

extern void sched_idle_next(void);
1500

1501
#ifdef CONFIG_SCHED_DEBUG
1502
extern unsigned int sysctl_sched_latency;
1503
extern unsigned int sysctl_sched_min_granularity;
1504 1505 1506 1507
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_batch_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
1508
extern unsigned int sysctl_sched_migration_cost;
1509
extern unsigned int sysctl_sched_nr_migrate;
P
Peter Zijlstra 已提交
1510 1511
extern unsigned int sysctl_sched_rt_period;
extern unsigned int sysctl_sched_rt_ratio;
1512 1513 1514 1515
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
extern unsigned int sysctl_sched_min_bal_int_shares;
extern unsigned int sysctl_sched_max_bal_int_shares;
#endif
1516 1517 1518 1519

int sched_nr_latency_handler(struct ctl_table *table, int write,
		struct file *file, void __user *buffer, size_t *length,
		loff_t *ppos);
1520 1521 1522
#endif

extern unsigned int sysctl_sched_compat_yield;
1523

1524
#ifdef CONFIG_RT_MUTEXES
1525 1526 1527
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
1528
#else
A
Alexey Dobriyan 已提交
1529
static inline int rt_mutex_getprio(struct task_struct *p)
1530 1531 1532
{
	return p->normal_prio;
}
1533
# define rt_mutex_adjust_pi(p)		do { } while (0)
1534 1535
#endif

1536 1537 1538 1539 1540
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
extern int task_nice(const struct task_struct *p);
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
1541 1542
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1543 1544 1545
extern struct task_struct *idle_task(int cpu);
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573

void yield(void);

/*
 * The default (Linux) execution domain.
 */
extern struct exec_domain	default_exec_domain;

union thread_union {
	struct thread_info thread_info;
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
	/* Reliable end of stack detection:
	 * Some APM bios versions misalign the stack
	 */
	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif

extern union thread_union init_thread_union;
extern struct task_struct init_task;

extern struct   mm_struct init_mm;

1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
extern struct pid_namespace init_pid_ns;

/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_type_ns():
 *      it is the most generic call - it finds a task by all id,
 *      type and namespace specified
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
1584 1585
 * find_task_by_vpid():
 *      finds a task by its virtual pid
1586 1587 1588 1589 1590 1591 1592 1593 1594
 * find_task_by_pid():
 *      finds a task by its global pid
 *
 * see also find_pid() etc in include/linux/pid.h
 */

extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
		struct pid_namespace *ns);

1595 1596 1597 1598
extern struct task_struct *find_task_by_pid(pid_t nr);
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
		struct pid_namespace *ns);
1599

L
Linus Torvalds 已提交
1600 1601 1602
extern void __set_special_pids(pid_t session, pid_t pgrp);

/* per-UID process charging. */
1603
extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
L
Linus Torvalds 已提交
1604 1605 1606 1607 1608 1609 1610
static inline struct user_struct *get_uid(struct user_struct *u)
{
	atomic_inc(&u->__count);
	return u;
}
extern void free_uid(struct user_struct *);
extern void switch_uid(struct user_struct *);
1611
extern void release_uids(struct user_namespace *ns);
L
Linus Torvalds 已提交
1612 1613 1614

#include <asm/current.h>

1615
extern void do_timer(unsigned long ticks);
L
Linus Torvalds 已提交
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625

extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
extern int FASTCALL(wake_up_process(struct task_struct * tsk));
extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
						unsigned long clone_flags));
#ifdef CONFIG_SMP
 extern void kick_process(struct task_struct *tsk);
#else
 static inline void kick_process(struct task_struct *tsk) { }
#endif
1626 1627
extern void sched_fork(struct task_struct *p, int clone_flags);
extern void sched_dead(struct task_struct *p);
L
Linus Torvalds 已提交
1628 1629 1630 1631 1632 1633

extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);

extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
1634
extern void ignore_signals(struct task_struct *);
L
Linus Torvalds 已提交
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);

static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&tsk->sighand->siglock, flags);
	ret = dequeue_signal(tsk, mask, info);
	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);

	return ret;
}	

extern void block_all_signals(int (*notifier)(void *priv), void *priv,
			      sigset_t *mask);
extern void unblock_all_signals(void);
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int send_group_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
1658 1659 1660
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
1661
extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
1662 1663
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
1664
extern int kill_proc_info(int, struct siginfo *, pid_t);
L
Linus Torvalds 已提交
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
extern void do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern int kill_proc(pid_t, int, int);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(int, struct sigqueue *,  struct task_struct *);
extern int send_group_sigqueue(int, struct sigqueue *,  struct task_struct *);
1675
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
L
Linus Torvalds 已提交
1676 1677
extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);

1678 1679 1680 1681 1682
static inline int kill_cad_pid(int sig, int priv)
{
	return kill_pid(cad_pid, sig, priv);
}

L
Linus Torvalds 已提交
1683 1684 1685 1686 1687
/* These can be the second arg to send_sig_info/send_group_sig_info.  */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV	((struct siginfo *) 1)
#define SEND_SIG_FORCED	((struct siginfo *) 2)

1688 1689 1690 1691 1692
static inline int is_si_special(const struct siginfo *info)
{
	return info <= SEND_SIG_FORCED;
}

L
Linus Torvalds 已提交
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
/* True if we are on the alternate signal stack.  */

static inline int on_sig_stack(unsigned long sp)
{
	return (sp - current->sas_ss_sp < current->sas_ss_size);
}

static inline int sas_ss_flags(unsigned long sp)
{
	return (current->sas_ss_size == 0 ? SS_DISABLE
		: on_sig_stack(sp) ? SS_ONSTACK : 0);
}

/*
 * Routines for handling mm_structs
 */
extern struct mm_struct * mm_alloc(void);

/* mmdrop drops the mm and the page tables */
extern void FASTCALL(__mmdrop(struct mm_struct *));
static inline void mmdrop(struct mm_struct * mm)
{
I
Ingo Molnar 已提交
1715
	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
L
Linus Torvalds 已提交
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730
		__mmdrop(mm);
}

/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);

extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
extern void flush_thread(void);
extern void exit_thread(void);

extern void exit_files(struct task_struct *);
1731
extern void __cleanup_signal(struct signal_struct *);
1732
extern void __cleanup_sighand(struct sighand_struct *);
L
Linus Torvalds 已提交
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
extern void exit_itimers(struct signal_struct *);

extern NORET_TYPE void do_group_exit(int);

extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern int disallow_signal(int);

extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
1743
struct task_struct *fork_idle(int);
L
Linus Torvalds 已提交
1744 1745 1746 1747 1748

extern void set_task_comm(struct task_struct *tsk, char *from);
extern void get_task_comm(char *to, struct task_struct *tsk);

#ifdef CONFIG_SMP
1749
extern void wait_task_inactive(struct task_struct * p);
L
Linus Torvalds 已提交
1750 1751 1752 1753 1754
#else
#define wait_task_inactive(p)	do { } while (0)
#endif

#define remove_parent(p)	list_del_init(&(p)->sibling)
1755
#define add_parent(p)		list_add_tail(&(p)->sibling,&(p)->parent->children)
L
Linus Torvalds 已提交
1756

1757
#define next_task(p)	list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
L
Linus Torvalds 已提交
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771

#define for_each_process(p) \
	for (p = &init_task ; (p = next_task(p)) != &init_task ; )

/*
 * Careful: do_each_thread/while_each_thread is a double loop so
 *          'break' will not work as expected - use goto instead.
 */
#define do_each_thread(g, t) \
	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do

#define while_each_thread(g, t) \
	while ((t = next_thread(t)) != g)

1772 1773
/* de_thread depends on thread_group_leader not being a pid based check */
#define thread_group_leader(p)	(p == p->group_leader)
L
Linus Torvalds 已提交
1774

1775 1776 1777 1778 1779 1780
/* Do to the insanities of de_thread it is possible for a process
 * to have the pid of the thread group leader without actually being
 * the thread group leader.  For iteration through the pids in proc
 * all we care about is that we have a task with the appropriate
 * pid, we don't actually care if we have the right task.
 */
A
Alexey Dobriyan 已提交
1781
static inline int has_group_leader_pid(struct task_struct *p)
1782 1783 1784 1785
{
	return p->pid == p->tgid;
}

1786 1787 1788 1789 1790 1791
static inline
int same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
	return p1->tgid == p2->tgid;
}

1792
static inline struct task_struct *next_thread(const struct task_struct *p)
O
Oleg Nesterov 已提交
1793 1794
{
	return list_entry(rcu_dereference(p->thread_group.next),
1795
			  struct task_struct, thread_group);
O
Oleg Nesterov 已提交
1796 1797
}

A
Alexey Dobriyan 已提交
1798
static inline int thread_group_empty(struct task_struct *p)
L
Linus Torvalds 已提交
1799
{
O
Oleg Nesterov 已提交
1800
	return list_empty(&p->thread_group);
L
Linus Torvalds 已提交
1801 1802 1803 1804 1805 1806
}

#define delay_group_leader(p) \
		(thread_group_leader(p) && !thread_group_empty(p))

/*
1807
 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
1808
 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
1809 1810
 * pins the final release of task.io_context.  Also protects ->cpuset and
 * ->cgroup.subsys[].
L
Linus Torvalds 已提交
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
 *
 * Nests both inside and outside of read_lock(&tasklist_lock).
 * It must not be nested with write_lock_irq(&tasklist_lock),
 * neither inside nor outside.
 */
static inline void task_lock(struct task_struct *p)
{
	spin_lock(&p->alloc_lock);
}

static inline void task_unlock(struct task_struct *p)
{
	spin_unlock(&p->alloc_lock);
}

1826 1827 1828 1829 1830 1831 1832 1833 1834
extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
							unsigned long *flags);

static inline void unlock_task_sighand(struct task_struct *tsk,
						unsigned long *flags)
{
	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}

A
Al Viro 已提交
1835 1836
#ifndef __HAVE_THREAD_FUNCTIONS

R
Roman Zippel 已提交
1837 1838
#define task_thread_info(task)	((struct thread_info *)(task)->stack)
#define task_stack_page(task)	((task)->stack)
A
Al Viro 已提交
1839

1840 1841 1842 1843 1844 1845 1846 1847
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
	*task_thread_info(p) = *task_thread_info(org);
	task_thread_info(p)->task = p;
}

static inline unsigned long *end_of_stack(struct task_struct *p)
{
R
Roman Zippel 已提交
1848
	return (unsigned long *)(task_thread_info(p) + 1);
1849 1850
}

A
Al Viro 已提交
1851 1852
#endif

L
Linus Torvalds 已提交
1853 1854 1855 1856 1857
/* set thread flags in other task's structures
 * - see asm/thread_info.h for TIF_xxxx flags available
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1858
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1859 1860 1861 1862
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1863
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1864 1865 1866 1867
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1868
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1869 1870 1871 1872
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1873
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1874 1875 1876 1877
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1878
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline int signal_pending(struct task_struct *p)
{
	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
  
static inline int need_resched(void)
{
	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
}

/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
#ifdef CONFIG_PREEMPT
static inline int cond_resched(void)
{
	return 0;
}
#else
extern int _cond_resched(void);
static inline int cond_resched(void)
{
	return _cond_resched();
}
#endif
L
Linus Torvalds 已提交
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
extern int cond_resched_lock(spinlock_t * lock);
extern int cond_resched_softirq(void);

/*
 * Does a critical section need to be broken due to another
 * task waiting?:
 */
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
# define need_lockbreak(lock) ((lock)->break_lock)
#else
# define need_lockbreak(lock) 0
#endif

/*
 * Does a critical section need to be broken due to another
 * task waiting or preemption being signalled:
 */
static inline int lock_need_resched(spinlock_t *lock)
{
	if (need_lockbreak(lock) || need_resched())
		return 1;
	return 0;
}

R
Roland McGrath 已提交
1944 1945 1946 1947 1948 1949 1950
/*
 * Reevaluate whether the task has signals pending delivery.
 * Wake the task if so.
 * This is required every time the blocked sigset_t changes.
 * callers must hold sighand->siglock.
 */
extern void recalc_sigpending_and_wake(struct task_struct *t);
L
Linus Torvalds 已提交
1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961
extern void recalc_sigpending(void);

extern void signal_wake_up(struct task_struct *t, int resume_stopped);

/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
A
Al Viro 已提交
1962
	return task_thread_info(p)->cpu;
L
Linus Torvalds 已提交
1963 1964
}

I
Ingo Molnar 已提交
1965
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
extern void arch_pick_mmap_layout(struct mm_struct *mm);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm)
{
	mm->mmap_base = TASK_UNMAPPED_BASE;
	mm->get_unmapped_area = arch_get_unmapped_area;
	mm->unmap_area = arch_unmap_area;
}
#endif

extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);

1994 1995
extern int sched_mc_power_savings, sched_smt_power_savings;

L
Linus Torvalds 已提交
1996 1997
extern void normalize_rt_tasks(void);

1998 1999
#ifdef CONFIG_FAIR_GROUP_SCHED

2000
extern struct task_group init_task_group;
2001

2002 2003
extern struct task_group *sched_create_group(void);
extern void sched_destroy_group(struct task_group *tg);
2004
extern void sched_move_task(struct task_struct *tsk);
2005
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2006
extern unsigned long sched_group_shares(struct task_group *tg);
2007 2008 2009

#endif

2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
	tsk->rchar += amt;
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
	tsk->wchar += amt;
}

static inline void inc_syscr(struct task_struct *tsk)
{
	tsk->syscr++;
}

static inline void inc_syscw(struct task_struct *tsk)
{
	tsk->syscw++;
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void inc_syscr(struct task_struct *tsk)
{
}

static inline void inc_syscw(struct task_struct *tsk)
{
}
#endif

2048 2049 2050 2051 2052 2053 2054 2055
#ifdef CONFIG_SMP
void migration_init(void);
#else
static inline void migration_init(void)
{
}
#endif

L
Linus Torvalds 已提交
2056 2057 2058
#endif /* __KERNEL__ */

#endif