sched.h 71.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * cloning flags:
 */
#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
#define CLONE_VM	0x00000100	/* set if VM shared between processes */
#define CLONE_FS	0x00000200	/* set if fs info shared between processes */
#define CLONE_FILES	0x00000400	/* set if open files shared between processes */
#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
#define CLONE_THREAD	0x00010000	/* Same thread group? */
#define CLONE_NEWNS	0x00020000	/* New namespace group? */
#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
#define CLONE_DETACHED		0x00400000	/* Unused, ignored */
#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
#define CLONE_STOPPED		0x02000000	/* Start in stopped state */
25
#define CLONE_NEWUTS		0x04000000	/* New utsname group? */
K
Kirill Korotaev 已提交
26
#define CLONE_NEWIPC		0x08000000	/* New ipcs */
S
Serge E. Hallyn 已提交
27
#define CLONE_NEWUSER		0x10000000	/* New user namespace */
28
#define CLONE_NEWPID		0x20000000	/* New pid namespace */
29
#define CLONE_NEWNET		0x40000000	/* New network namespace */
30
#define CLONE_IO		0x80000000	/* Clone io context */
31 32 33 34 35 36 37 38

/*
 * Scheduling policies
 */
#define SCHED_NORMAL		0
#define SCHED_FIFO		1
#define SCHED_RR		2
#define SCHED_BATCH		3
I
Ingo Molnar 已提交
39 40
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE		5
41

42
#ifdef __KERNEL__
43 44 45 46 47

struct sched_param {
	int sched_priority;
};

L
Linus Torvalds 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60
#include <asm/param.h>	/* for HZ */

#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
61
#include <linux/mm_types.h>
L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70

#include <asm/system.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>

#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
71
#include <linux/path.h>
L
Linus Torvalds 已提交
72 73 74 75 76
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
P
Peter Zijlstra 已提交
77
#include <linux/proportions.h>
L
Linus Torvalds 已提交
78
#include <linux/seccomp.h>
I
Ingo Molnar 已提交
79
#include <linux/rcupdate.h>
80
#include <linux/rculist.h>
I
Ingo Molnar 已提交
81
#include <linux/rtmutex.h>
L
Linus Torvalds 已提交
82

83 84 85 86 87
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
88
#include <linux/task_io_accounting.h>
89
#include <linux/kobject.h>
A
Arjan van de Ven 已提交
90
#include <linux/latencytop.h>
91
#include <linux/cred.h>
92 93

#include <asm/processor.h>
H
H. J. Lu 已提交
94

95
struct mem_cgroup;
L
Linus Torvalds 已提交
96
struct exec_domain;
97
struct futex_pi_state;
98
struct robust_list_head;
99
struct bio;
100
struct fs_struct;
101
struct bts_context;
102
struct perf_counter_context;
L
Linus Torvalds 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120

/*
 * List of flags we want to share for kernel threads,
 * if only because they are not used by them anyway.
 */
#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)

/*
 * These are the constant used to fake the fixed-point load-average
 * counting. Some notes:
 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 *    a load-average precision of 10 bits integer + 11 bits fractional
 *  - if you want to count load-averages more often, you need more
 *    precision, or rounding will get you. With 2-second counting freq,
 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 *    11 bit fractions.
 */
extern unsigned long avenrun[];		/* Load averages */
121
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
L
Linus Torvalds 已提交
122 123 124

#define FSHIFT		11		/* nr of bits of precision */
#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
125
#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
L
Linus Torvalds 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
#define EXP_5		2014		/* 1/exp(5sec/5min) */
#define EXP_15		2037		/* 1/exp(5sec/15min) */

#define CALC_LOAD(load,exp,n) \
	load *= exp; \
	load += n*(FIXED_1-exp); \
	load >>= FSHIFT;

extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
142
extern void calc_global_load(void);
143
extern u64 cpu_nr_migrations(int cpu);
L
Linus Torvalds 已提交
144

145 146
extern unsigned long get_parent_ip(unsigned long addr);

I
Ingo Molnar 已提交
147 148
struct seq_file;
struct cfs_rq;
149
struct task_group;
I
Ingo Molnar 已提交
150 151 152 153
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
154
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
I
Ingo Molnar 已提交
155 156 157 158 159 160 161 162 163
#else
static inline void
proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
}
static inline void proc_sched_set_task(struct task_struct *p)
{
}
static inline void
164
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
I
Ingo Molnar 已提交
165 166 167
{
}
#endif
L
Linus Torvalds 已提交
168

169 170
extern unsigned long long time_sync_thresh;

171 172 173 174 175 176 177 178 179 180
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
L
Linus Torvalds 已提交
181 182 183
#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
M
Matthew Wilcox 已提交
184 185
#define __TASK_STOPPED		4
#define __TASK_TRACED		8
186 187 188 189
/* in tsk->exit_state */
#define EXIT_ZOMBIE		16
#define EXIT_DEAD		32
/* in tsk->state again */
190
#define TASK_DEAD		64
M
Matthew Wilcox 已提交
191 192 193 194 195 196
#define TASK_WAKEKILL		128

/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
L
Linus Torvalds 已提交
197

198 199
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
M
Matthew Wilcox 已提交
200
#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
201 202 203

/* get_task_state() */
#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
M
Matthew Wilcox 已提交
204 205
				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
				 __TASK_TRACED)
206

M
Matthew Wilcox 已提交
207 208
#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
209
#define task_is_stopped_or_traced(task)	\
M
Matthew Wilcox 已提交
210
			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
211
#define task_contributes_to_load(task)	\
212 213
				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
				 (task->flags & PF_FROZEN) == 0)
L
Linus Torvalds 已提交
214 215 216 217 218 219

#define __set_task_state(tsk, state_value)		\
	do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value)		\
	set_mb((tsk)->state, (state_value))

220 221 222 223 224 225 226 227 228 229 230
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
 *	set_current_state(TASK_UNINTERRUPTIBLE);
 *	if (do_i_need_to_sleep())
 *		schedule();
 *
 * If the caller does not need such serialisation then use __set_current_state()
 */
L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
#define __set_current_state(state_value)			\
	do { current->state = (state_value); } while (0)
#define set_current_state(state_value)		\
	set_mb(current->state, (state_value))

/* Task command name length */
#define TASK_COMM_LEN 16

#include <linux/spinlock.h>

/*
 * This serializes "schedule()" and also protects
 * the run-queue from deletions/modifications (but
 * _adding_ to the beginning of the run-queue has
 * a separate lock).
 */
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;

250
struct task_struct;
L
Linus Torvalds 已提交
251 252 253

extern void sched_init(void);
extern void sched_init_smp(void);
254
extern asmlinkage void schedule_tail(struct task_struct *prev);
255
extern void init_idle(struct task_struct *idle, int cpu);
I
Ingo Molnar 已提交
256
extern void init_idle_bootup_task(struct task_struct *idle);
L
Linus Torvalds 已提交
257

I
Ingo Molnar 已提交
258
extern int runqueue_is_locked(void);
259
extern void task_rq_unlock_wait(struct task_struct *p);
I
Ingo Molnar 已提交
260

261
extern cpumask_var_t nohz_cpu_mask;
262 263
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
264
extern int get_nohz_load_balancer(void);
265 266 267 268 269 270
#else
static inline int select_nohz_load_balancer(int cpu)
{
	return 0;
}
#endif
L
Linus Torvalds 已提交
271

I
Ingo Molnar 已提交
272
/*
I
Ingo Molnar 已提交
273
 * Only dump TASK_* tasks. (0 for all tasks)
I
Ingo Molnar 已提交
274 275 276 277 278
 */
extern void show_state_filter(unsigned long state_filter);

static inline void show_state(void)
{
I
Ingo Molnar 已提交
279
	show_state_filter(0);
I
Ingo Molnar 已提交
280 281
}

L
Linus Torvalds 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
extern void show_regs(struct pt_regs *);

/*
 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 * task), SP is the stack pointer of the first frame that should be shown in the back
 * trace (or NULL if the entire call-chain of the task should be shown).
 */
extern void show_stack(struct task_struct *task, unsigned long *sp);

void io_schedule(void);
long io_schedule_timeout(long timeout);

extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);

299 300
extern void sched_show_task(struct task_struct *p);

I
Ingo Molnar 已提交
301
#ifdef CONFIG_DETECT_SOFTLOCKUP
302
extern void softlockup_tick(void);
I
Ingo Molnar 已提交
303
extern void touch_softlockup_watchdog(void);
304
extern void touch_all_softlockup_watchdogs(void);
305 306 307
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
				    struct file *filp, void __user *buffer,
				    size_t *lenp, loff_t *ppos);
I
Ingo Molnar 已提交
308
extern unsigned int  softlockup_panic;
309
extern int softlockup_thresh;
I
Ingo Molnar 已提交
310
#else
311
static inline void softlockup_tick(void)
I
Ingo Molnar 已提交
312 313 314 315 316
{
}
static inline void touch_softlockup_watchdog(void)
{
}
317 318 319
static inline void touch_all_softlockup_watchdogs(void)
{
}
I
Ingo Molnar 已提交
320 321
#endif

322 323 324 325 326 327 328 329 330
#ifdef CONFIG_DETECT_HUNG_TASK
extern unsigned int  sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_check_count;
extern unsigned long sysctl_hung_task_timeout_secs;
extern unsigned long sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
					 struct file *filp, void __user *buffer,
					 size_t *lenp, loff_t *ppos);
#endif
I
Ingo Molnar 已提交
331

L
Linus Torvalds 已提交
332 333
/* Attach to any functions which should be ignored in wchan output. */
#define __sched		__attribute__((__section__(".sched.text")))
334 335 336 337

/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];

L
Linus Torvalds 已提交
338 339 340 341
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);

#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
342
extern signed long schedule_timeout(signed long timeout);
343
extern signed long schedule_timeout_interruptible(signed long timeout);
M
Matthew Wilcox 已提交
344
extern signed long schedule_timeout_killable(signed long timeout);
345
extern signed long schedule_timeout_uninterruptible(signed long timeout);
P
Peter Zijlstra 已提交
346
asmlinkage void __schedule(void);
L
Linus Torvalds 已提交
347
asmlinkage void schedule(void);
348
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
L
Linus Torvalds 已提交
349

S
Serge E. Hallyn 已提交
350
struct nsproxy;
351
struct user_namespace;
L
Linus Torvalds 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366

/* Maximum number of active map areas.. This is a random (large) number */
#define DEFAULT_MAX_MAP_COUNT	65536

extern int sysctl_max_map_count;

#include <linux/aio.h>

extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
		       unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
			  unsigned long len, unsigned long pgoff,
			  unsigned long flags);
367 368
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
L
Linus Torvalds 已提交
369

370
#if USE_SPLIT_PTLOCKS
371 372 373 374
/*
 * The mm counters are not protected by its page_table_lock,
 * so must be incremented atomically.
 */
375 376 377 378 379
#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
380

381
#else  /* !USE_SPLIT_PTLOCKS */
382 383 384 385
/*
 * The mm counters are protected by its page_table_lock,
 * so can be incremented directly.
 */
L
Linus Torvalds 已提交
386 387 388 389 390
#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
#define get_mm_counter(mm, member) ((mm)->_##member)
#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
#define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member--
391

392
#endif /* !USE_SPLIT_PTLOCKS */
393

394 395
#define get_mm_rss(mm)					\
	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
396 397 398 399 400 401 402 403 404 405
#define update_hiwater_rss(mm)	do {			\
	unsigned long _rss = get_mm_rss(mm);		\
	if ((mm)->hiwater_rss < _rss)			\
		(mm)->hiwater_rss = _rss;		\
} while (0)
#define update_hiwater_vm(mm)	do {			\
	if ((mm)->hiwater_vm < (mm)->total_vm)		\
		(mm)->hiwater_vm = (mm)->total_vm;	\
} while (0)

406 407 408 409 410 411 412 413 414
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
	return max(mm->hiwater_rss, get_mm_rss(mm));
}

static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
	return max(mm->hiwater_vm, mm->total_vm);
}
415

416 417 418 419
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);

/* mm flags */
420
/* dumpable bits */
421 422
#define MMF_DUMPABLE      0  /* core dump is permitted */
#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
423 424 425 426 427 428 429
#define MMF_DUMPABLE_BITS 2

/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE	2
#define MMF_DUMP_ANON_SHARED	3
#define MMF_DUMP_MAPPED_PRIVATE	4
#define MMF_DUMP_MAPPED_SHARED	5
R
Roland McGrath 已提交
430
#define MMF_DUMP_ELF_HEADERS	6
431 432
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED  8
433
#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
434
#define MMF_DUMP_FILTER_BITS	7
435 436 437
#define MMF_DUMP_FILTER_MASK \
	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
438
	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
439 440 441 442 443 444 445
	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)

#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF	0
#endif
446

L
Linus Torvalds 已提交
447 448 449 450
struct sighand_struct {
	atomic_t		count;
	struct k_sigaction	action[_NSIG];
	spinlock_t		siglock;
D
Davide Libenzi 已提交
451
	wait_queue_head_t	signalfd_wqh;
L
Linus Torvalds 已提交
452 453
};

454
struct pacct_struct {
455 456
	int			ac_flag;
	long			ac_exitcode;
457
	unsigned long		ac_mem;
458 459
	cputime_t		ac_utime, ac_stime;
	unsigned long		ac_minflt, ac_majflt;
460 461
};

462 463 464 465 466
/**
 * struct task_cputime - collected CPU time counts
 * @utime:		time spent in user mode, in &cputime_t units
 * @stime:		time spent in kernel mode, in &cputime_t units
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
467
 *
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
 * This structure groups together three kinds of CPU time that are
 * tracked for threads and thread groups.  Most things considering
 * CPU time want to group these counts together and treat all three
 * of them in parallel.
 */
struct task_cputime {
	cputime_t utime;
	cputime_t stime;
	unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp	stime
#define virt_exp	utime
#define sched_exp	sum_exec_runtime

483 484 485 486 487 488 489
#define INIT_CPUTIME	\
	(struct task_cputime) {					\
		.utime = cputime_zero,				\
		.stime = cputime_zero,				\
		.sum_exec_runtime = 0,				\
	}

490
/**
491 492 493 494 495
 * struct thread_group_cputimer - thread group interval timer counts
 * @cputime:		thread group interval timers.
 * @running:		non-zero when there are timers running and
 * 			@cputime receives updates.
 * @lock:		lock for fields in this struct.
496 497
 *
 * This structure contains the version of task_cputime, above, that is
498
 * used for thread group CPU timer calculations.
499
 */
500 501 502 503
struct thread_group_cputimer {
	struct task_cputime cputime;
	int running;
	spinlock_t lock;
504 505
};

L
Linus Torvalds 已提交
506 507 508 509 510 511 512 513 514 515 516 517 518 519
/*
 * NOTE! "signal_struct" does not have it's own
 * locking, because a shared signal_struct always
 * implies a shared sighand_struct, so locking
 * sighand_struct is always a proper superset of
 * the locking of signal_struct.
 */
struct signal_struct {
	atomic_t		count;
	atomic_t		live;

	wait_queue_head_t	wait_chldexit;	/* for wait4() */

	/* current thread group signal load-balancing target: */
520
	struct task_struct	*curr_target;
L
Linus Torvalds 已提交
521 522 523 524 525 526 527 528 529 530 531 532

	/* shared signal handling: */
	struct sigpending	shared_pending;

	/* thread group exit support */
	int			group_exit_code;
	/* overloaded:
	 * - notify group_exit_task when ->count is equal to notify_count
	 * - everyone except group_exit_task is stopped during signal delivery
	 *   of fatal signals, group_exit_task processes the signal.
	 */
	int			notify_count;
533
	struct task_struct	*group_exit_task;
L
Linus Torvalds 已提交
534 535 536 537 538 539 540 541 542

	/* thread group stop support, overloads group_exit_code too */
	int			group_stop_count;
	unsigned int		flags; /* see SIGNAL_* flags below */

	/* POSIX.1b Interval Timers */
	struct list_head posix_timers;

	/* ITIMER_REAL timer for the process */
543
	struct hrtimer real_timer;
544
	struct pid *leader_pid;
545
	ktime_t it_real_incr;
L
Linus Torvalds 已提交
546 547 548 549 550

	/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
	cputime_t it_prof_expires, it_virt_expires;
	cputime_t it_prof_incr, it_virt_incr;

551
	/*
552 553
	 * Thread group totals for process CPU timers.
	 * See thread_group_cputimer(), et al, for details.
554
	 */
555
	struct thread_group_cputimer cputimer;
556 557 558 559 560 561

	/* Earliest-expiration cache. */
	struct task_cputime cputime_expires;

	struct list_head cpu_timers[3];

562
	struct pid *tty_old_pgrp;
563

L
Linus Torvalds 已提交
564 565 566 567 568 569 570 571 572 573 574
	/* boolean value for session group leader */
	int leader;

	struct tty_struct *tty; /* NULL if no tty */

	/*
	 * Cumulative resource counters for dead threads in the group,
	 * and for reaped dead child processes forked by this group.
	 * Live threads maintain their own counters and add to these
	 * in __exit_signal, except for the group leader.
	 */
575
	cputime_t utime, stime, cutime, cstime;
576 577
	cputime_t gtime;
	cputime_t cgtime;
L
Linus Torvalds 已提交
578 579
	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
580
	unsigned long inblock, oublock, cinblock, coublock;
581
	struct task_io_accounting ioac;
L
Linus Torvalds 已提交
582

583 584 585 586 587 588 589 590
	/*
	 * Cumulative ns of schedule CPU time fo dead threads in the
	 * group, not including a zombie group leader, (This only differs
	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
	 * other than jiffies.)
	 */
	unsigned long long sum_sched_runtime;

L
Linus Torvalds 已提交
591 592 593 594 595 596 597 598 599 600 601
	/*
	 * We don't bother to synchronize most readers of this at all,
	 * because there is no reader checking a limit that actually needs
	 * to get both rlim_cur and rlim_max atomically, and either one
	 * alone is a single word that can safely be read normally.
	 * getrlimit/setrlimit use task_lock(current->group_leader) to
	 * protect this instead of the siglock, because they really
	 * have no need to disable irqs.
	 */
	struct rlimit rlim[RLIM_NLIMITS];

602 603 604
#ifdef CONFIG_BSD_PROCESS_ACCT
	struct pacct_struct pacct;	/* per-process accounting information */
#endif
605 606 607
#ifdef CONFIG_TASKSTATS
	struct taskstats *stats;
#endif
M
Miloslav Trmac 已提交
608 609 610 611
#ifdef CONFIG_AUDIT
	unsigned audit_tty;
	struct tty_audit_buf *tty_audit_buf;
#endif
L
Linus Torvalds 已提交
612 613
};

614 615 616 617 618
/* Context switch must be unlocked if interrupts are to be enabled */
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
# define __ARCH_WANT_UNLOCKED_CTXSW
#endif

L
Linus Torvalds 已提交
619 620 621 622 623 624 625
/*
 * Bits in flags field of signal_struct.
 */
#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */
#define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */
626 627 628 629 630 631
/*
 * Pending notifications to parent.
 */
#define SIGNAL_CLD_STOPPED	0x00000010
#define SIGNAL_CLD_CONTINUED	0x00000020
#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
L
Linus Torvalds 已提交
632

633 634
#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */

635 636 637 638 639 640 641
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
		(sig->group_exit_task != NULL);
}

L
Linus Torvalds 已提交
642 643 644 645 646 647 648 649
/*
 * Some day this will be a full-fledged user tracking system..
 */
struct user_struct {
	atomic_t __count;	/* reference count */
	atomic_t processes;	/* How many processes does this user have? */
	atomic_t files;		/* How many open files does this user have? */
	atomic_t sigpending;	/* How many pending signals does this user have? */
650
#ifdef CONFIG_INOTIFY_USER
R
Robert Love 已提交
651 652 653
	atomic_t inotify_watches; /* How many inotify watches does this user have? */
	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
#endif
654 655 656
#ifdef CONFIG_EPOLL
	atomic_t epoll_watches;	/* The number of file descriptors currently watched */
#endif
A
Alexey Dobriyan 已提交
657
#ifdef CONFIG_POSIX_MQUEUE
L
Linus Torvalds 已提交
658 659
	/* protected by mq_lock	*/
	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
A
Alexey Dobriyan 已提交
660
#endif
L
Linus Torvalds 已提交
661 662 663 664 665 666 667 668
	unsigned long locked_shm; /* How many pages of mlocked shm ? */

#ifdef CONFIG_KEYS
	struct key *uid_keyring;	/* UID specific keyring */
	struct key *session_keyring;	/* UID's default session keyring */
#endif

	/* Hash table maintenance information */
P
Pavel Emelyanov 已提交
669
	struct hlist_node uidhash_node;
L
Linus Torvalds 已提交
670
	uid_t uid;
671
	struct user_namespace *user_ns;
672

673
#ifdef CONFIG_USER_SCHED
674
	struct task_group *tg;
D
Dhaval Giani 已提交
675
#ifdef CONFIG_SYSFS
676
	struct kobject kobj;
677
	struct work_struct work;
678
#endif
D
Dhaval Giani 已提交
679
#endif
680 681 682 683

#ifdef CONFIG_PERF_COUNTERS
	atomic_long_t locked_vm;
#endif
L
Linus Torvalds 已提交
684 685
};

686
extern int uids_sysfs_init(void);
687

L
Linus Torvalds 已提交
688 689 690 691 692
extern struct user_struct *find_user(uid_t);

extern struct user_struct root_user;
#define INIT_USER (&root_user)

693

L
Linus Torvalds 已提交
694 695 696
struct backing_dev_info;
struct reclaim_state;

697
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
698 699
struct sched_info {
	/* cumulative counters */
700
	unsigned long pcount;	      /* # of times run on this cpu */
701
	unsigned long long run_delay; /* time spent waiting on a runqueue */
L
Linus Torvalds 已提交
702 703

	/* timestamps */
704 705
	unsigned long long last_arrival,/* when we last ran on a cpu */
			   last_queued;	/* when we were last queued to run */
I
Ingo Molnar 已提交
706 707
#ifdef CONFIG_SCHEDSTATS
	/* BKL stats */
708
	unsigned int bkl_count;
I
Ingo Molnar 已提交
709
#endif
L
Linus Torvalds 已提交
710
};
711
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
L
Linus Torvalds 已提交
712

713 714 715 716 717 718 719 720 721 722 723 724 725 726
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
	spinlock_t	lock;
	unsigned int	flags;	/* Private per-task flags */

	/* For each stat XXX, add following, aligned appropriately
	 *
	 * struct timespec XXX_start, XXX_end;
	 * u64 XXX_delay;
	 * u32 XXX_count;
	 *
	 * Atomicity of updates to XXX_delay, XXX_count protected by
	 * single lock above (split into XXX_lock if contention is an issue).
	 */
727 728 729 730 731 732 733 734 735 736 737 738 739

	/*
	 * XXX_count is incremented on every XXX operation, the delay
	 * associated with the operation is added to XXX_delay.
	 * XXX_delay contains the accumulated delay time in nanoseconds.
	 */
	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
	u64 blkio_delay;	/* wait for sync block io completion */
	u64 swapin_delay;	/* wait for swapin block io completion */
	u32 blkio_count;	/* total count of the number of sync block */
				/* io operations performed */
	u32 swapin_count;	/* total count of the number of swapin block */
				/* io operations performed */
740 741 742 743

	struct timespec freepages_start, freepages_end;
	u64 freepages_delay;	/* wait for memory reclaim */
	u32 freepages_count;	/* total count of memory reclaim */
744
};
745 746 747 748 749 750 751 752 753 754 755
#endif	/* CONFIG_TASK_DELAY_ACCT */

static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
	return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
	extern int delayacct_on;
	return delayacct_on;
#else
	return 0;
756
#endif
757
}
758

I
Ingo Molnar 已提交
759 760 761 762 763
enum cpu_idle_type {
	CPU_IDLE,
	CPU_NOT_IDLE,
	CPU_NEWLY_IDLE,
	CPU_MAX_IDLE_TYPES
L
Linus Torvalds 已提交
764 765 766 767 768
};

/*
 * sched-domains (multiprocessor balancing) declarations:
 */
769 770 771 772 773 774 775

/*
 * Increase resolution of nice-level calculations:
 */
#define SCHED_LOAD_SHIFT	10
#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)

776
#define SCHED_LOAD_SCALE_FUZZ	SCHED_LOAD_SCALE
L
Linus Torvalds 已提交
777

778
#ifdef CONFIG_SMP
L
Linus Torvalds 已提交
779 780 781
#define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE	2	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		4	/* Balance on exec */
N
Nick Piggin 已提交
782 783 784 785 786
#define SD_BALANCE_FORK		8	/* Balance on fork, clone */
#define SD_WAKE_IDLE		16	/* Wake to idle CPU on task wakeup */
#define SD_WAKE_AFFINE		32	/* Wake task to waking CPU */
#define SD_WAKE_BALANCE		64	/* Perform balancing at task wakeup */
#define SD_SHARE_CPUPOWER	128	/* Domain members share cpu power */
787
#define SD_POWERSAVINGS_BALANCE	256	/* Balance for power savings */
788
#define SD_SHARE_PKG_RESOURCES	512	/* Domain members share cpu pkg resources */
789
#define SD_SERIALIZE		1024	/* Only a single load balancing instance */
790
#define SD_WAKE_IDLE_FAR	2048	/* Gain latency sacrificing cache hit */
791

792 793 794 795 796 797 798 799 800 801
enum powersavings_balance_level {
	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
					 * first for long running threads
					 */
	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
					 * cpu package for power savings
					 */
	MAX_POWERSAVINGS_BALANCE_LEVELS
};
802

803
extern int sched_mc_power_savings, sched_smt_power_savings;
804

805 806 807 808
static inline int sd_balance_for_mc_power(void)
{
	if (sched_smt_power_savings)
		return SD_POWERSAVINGS_BALANCE;
809

810 811
	return 0;
}
812

813 814 815 816 817 818 819
static inline int sd_balance_for_package_power(void)
{
	if (sched_mc_power_savings | sched_smt_power_savings)
		return SD_POWERSAVINGS_BALANCE;

	return 0;
}
820

821 822 823 824 825 826 827 828 829 830 831 832 833
/*
 * Optimise SD flags for power savings:
 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
 * Keep default SD flags if sched_{smt,mc}_power_saving=0
 */

static inline int sd_power_saving_flags(void)
{
	if (sched_mc_power_savings | sched_smt_power_savings)
		return SD_BALANCE_NEWIDLE;

	return 0;
}
L
Linus Torvalds 已提交
834 835 836 837 838 839 840

struct sched_group {
	struct sched_group *next;	/* Must be a circular list */

	/*
	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
	 * single CPU. This is read only (except for setup, hotplug CPU).
841
	 * Note : Never change cpu_power without recompute its reciprocal
L
Linus Torvalds 已提交
842
	 */
843 844 845 846 847 848
	unsigned int __cpu_power;
	/*
	 * reciprocal value of cpu_power to avoid expensive divides
	 * (see include/linux/reciprocal_div.h)
	 */
	u32 reciprocal_cpu_power;
849

850 851 852 853 854 855 856 857 858 859 860
	/*
	 * The CPUs this group covers.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 *
	 * It is also be embedded into static data structures at build
	 * time. (See 'struct static_sched_group' in kernel/sched.c)
	 */
	unsigned long cpumask[0];
L
Linus Torvalds 已提交
861 862
};

863 864
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
865
	return to_cpumask(sg->cpumask);
866 867
}

868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
enum sched_domain_level {
	SD_LV_NONE = 0,
	SD_LV_SIBLING,
	SD_LV_MC,
	SD_LV_CPU,
	SD_LV_NODE,
	SD_LV_ALLNODES,
	SD_LV_MAX
};

struct sched_domain_attr {
	int relax_domain_level;
};

#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
	.relax_domain_level = -1,			\
}

L
Linus Torvalds 已提交
886 887 888
struct sched_domain {
	/* These fields must be setup */
	struct sched_domain *parent;	/* top domain must be null terminated */
889
	struct sched_domain *child;	/* bottom domain must be null terminated */
L
Linus Torvalds 已提交
890 891 892 893 894 895
	struct sched_group *groups;	/* the balancing groups of the domain */
	unsigned long min_interval;	/* Minimum balance interval ms */
	unsigned long max_interval;	/* Maximum balance interval ms */
	unsigned int busy_factor;	/* less balancing by factor if busy */
	unsigned int imbalance_pct;	/* No balance until over watermark */
	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
N
Nick Piggin 已提交
896 897 898 899
	unsigned int busy_idx;
	unsigned int idle_idx;
	unsigned int newidle_idx;
	unsigned int wake_idx;
N
Nick Piggin 已提交
900
	unsigned int forkexec_idx;
L
Linus Torvalds 已提交
901
	int flags;			/* See SD_* */
902
	enum sched_domain_level level;
L
Linus Torvalds 已提交
903 904 905 906 907 908

	/* Runtime fields. */
	unsigned long last_balance;	/* init to jiffies. units in jiffies */
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */

P
Peter Zijlstra 已提交
909 910
	u64 last_update;

L
Linus Torvalds 已提交
911 912
#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
913 914 915 916 917 918 919 920
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
L
Linus Torvalds 已提交
921 922

	/* Active load balancing */
923 924 925
	unsigned int alb_count;
	unsigned int alb_failed;
	unsigned int alb_pushed;
L
Linus Torvalds 已提交
926

927
	/* SD_BALANCE_EXEC stats */
928 929 930
	unsigned int sbe_count;
	unsigned int sbe_balanced;
	unsigned int sbe_pushed;
L
Linus Torvalds 已提交
931

932
	/* SD_BALANCE_FORK stats */
933 934 935
	unsigned int sbf_count;
	unsigned int sbf_balanced;
	unsigned int sbf_pushed;
936

L
Linus Torvalds 已提交
937
	/* try_to_wake_up() stats */
938 939 940
	unsigned int ttwu_wake_remote;
	unsigned int ttwu_move_affine;
	unsigned int ttwu_move_balance;
L
Linus Torvalds 已提交
941
#endif
942 943 944
#ifdef CONFIG_SCHED_DEBUG
	char *name;
#endif
945

946 947 948 949 950 951 952 953 954 955 956
	/*
	 * Span of all CPUs in this domain.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 *
	 * It is also be embedded into static data structures at build
	 * time. (See 'struct static_sched_domain' in kernel/sched.c)
	 */
	unsigned long span[0];
L
Linus Torvalds 已提交
957 958
};

959 960
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
961
	return to_cpumask(sd->span);
962 963
}

964
extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
965
				    struct sched_domain_attr *dattr_new);
P
Paul Jackson 已提交
966

967 968 969 970 971 972 973 974
/* Test a flag in parent sched domain */
static inline int test_sd_parent(struct sched_domain *sd, int flag)
{
	if (sd->parent && (sd->parent->flags & flag))
		return 1;

	return 0;
}
P
Paul Jackson 已提交
975

976
#else /* CONFIG_SMP */
L
Linus Torvalds 已提交
977

978
struct sched_domain_attr;
979

980
static inline void
981
partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
982 983
			struct sched_domain_attr *dattr_new)
{
984
}
985
#endif	/* !CONFIG_SMP */
L
Linus Torvalds 已提交
986 987 988 989

struct io_context;			/* See blkdev.h */


990
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
991
extern void prefetch_stack(struct task_struct *t);
992 993 994
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
L
Linus Torvalds 已提交
995 996 997

struct audit_context;		/* See audit.c */
struct mempolicy;
998
struct pipe_inode_info;
999
struct uts_namespace;
L
Linus Torvalds 已提交
1000

I
Ingo Molnar 已提交
1001 1002 1003 1004
struct rq;
struct sched_domain;

struct sched_class {
1005
	const struct sched_class *next;
I
Ingo Molnar 已提交
1006

1007
	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
1008
	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1009
	void (*yield_task) (struct rq *rq);
I
Ingo Molnar 已提交
1010

1011
	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
I
Ingo Molnar 已提交
1012

1013
	struct task_struct * (*pick_next_task) (struct rq *rq);
1014
	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
I
Ingo Molnar 已提交
1015

1016
#ifdef CONFIG_SMP
L
Li Zefan 已提交
1017 1018
	int  (*select_task_rq)(struct task_struct *p, int sync);

P
Peter Williams 已提交
1019
	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1020
			struct rq *busiest, unsigned long max_load_move,
I
Ingo Molnar 已提交
1021
			struct sched_domain *sd, enum cpu_idle_type idle,
1022
			int *all_pinned, int *this_best_prio);
I
Ingo Molnar 已提交
1023

1024 1025 1026
	int (*move_one_task) (struct rq *this_rq, int this_cpu,
			      struct rq *busiest, struct sched_domain *sd,
			      enum cpu_idle_type idle);
1027
	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1028
	int (*needs_post_schedule) (struct rq *this_rq);
1029 1030
	void (*post_schedule) (struct rq *this_rq);
	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1031

1032
	void (*set_cpus_allowed)(struct task_struct *p,
1033
				 const struct cpumask *newmask);
G
Gregory Haskins 已提交
1034

1035 1036
	void (*rq_online)(struct rq *rq);
	void (*rq_offline)(struct rq *rq);
L
Li Zefan 已提交
1037 1038 1039 1040 1041
#endif

	void (*set_curr_task) (struct rq *rq);
	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
	void (*task_new) (struct rq *rq, struct task_struct *p);
1042 1043 1044 1045 1046 1047 1048

	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
			       int running);
	void (*switched_to) (struct rq *this_rq, struct task_struct *task,
			     int running);
	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
			     int oldprio, int running);
P
Peter Zijlstra 已提交
1049 1050 1051 1052

#ifdef CONFIG_FAIR_GROUP_SCHED
	void (*moved_group) (struct task_struct *p);
#endif
I
Ingo Molnar 已提交
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
};

struct load_weight {
	unsigned long weight, inv_weight;
};

/*
 * CFS stats for a schedulable entity (task, task-group etc)
 *
 * Current field usage histogram:
 *
 *     4 se->block_start
 *     4 se->run_node
 *     4 se->sleep_start
 *     6 se->load.weight
 */
struct sched_entity {
	struct load_weight	load;		/* for load-balancing */
	struct rb_node		run_node;
1072
	struct list_head	group_node;
I
Ingo Molnar 已提交
1073 1074
	unsigned int		on_rq;

1075 1076
	u64			exec_start;
	u64			sum_exec_runtime;
I
Ingo Molnar 已提交
1077
	u64			vruntime;
1078
	u64			prev_sum_exec_runtime;
1079

I
Ingo Molnar 已提交
1080 1081 1082
	u64			last_wakeup;
	u64			avg_overlap;

1083 1084
	u64			nr_migrations;

1085 1086 1087
	u64			start_runtime;
	u64			avg_wakeup;

1088
#ifdef CONFIG_SCHEDSTATS
I
Ingo Molnar 已提交
1089
	u64			wait_start;
1090
	u64			wait_max;
1091 1092
	u64			wait_count;
	u64			wait_sum;
1093

I
Ingo Molnar 已提交
1094 1095
	u64			sleep_start;
	u64			sleep_max;
1096 1097 1098
	s64			sum_sleep_runtime;

	u64			block_start;
I
Ingo Molnar 已提交
1099 1100
	u64			block_max;
	u64			exec_max;
I
Ingo Molnar 已提交
1101
	u64			slice_max;
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118

	u64			nr_migrations_cold;
	u64			nr_failed_migrations_affine;
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_hot;
	u64			nr_forced_migrations;
	u64			nr_forced2_migrations;

	u64			nr_wakeups;
	u64			nr_wakeups_sync;
	u64			nr_wakeups_migrate;
	u64			nr_wakeups_local;
	u64			nr_wakeups_remote;
	u64			nr_wakeups_affine;
	u64			nr_wakeups_affine_attempts;
	u64			nr_wakeups_passive;
	u64			nr_wakeups_idle;
1119 1120
#endif

I
Ingo Molnar 已提交
1121 1122 1123 1124 1125 1126 1127 1128
#ifdef CONFIG_FAIR_GROUP_SCHED
	struct sched_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct cfs_rq		*cfs_rq;
	/* rq "owned" by this entity/group: */
	struct cfs_rq		*my_q;
#endif
};
1129

P
Peter Zijlstra 已提交
1130 1131
struct sched_rt_entity {
	struct list_head run_list;
1132
	unsigned long timeout;
1133
	unsigned int time_slice;
P
Peter Zijlstra 已提交
1134 1135
	int nr_cpus_allowed;

1136
	struct sched_rt_entity *back;
1137
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
1138 1139 1140 1141 1142 1143
	struct sched_rt_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct rt_rq		*rt_rq;
	/* rq "owned" by this entity/group: */
	struct rt_rq		*my_q;
#endif
P
Peter Zijlstra 已提交
1144 1145
};

L
Linus Torvalds 已提交
1146 1147
struct task_struct {
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
R
Roman Zippel 已提交
1148
	void *stack;
L
Linus Torvalds 已提交
1149
	atomic_t usage;
1150 1151
	unsigned int flags;	/* per process flags, defined below */
	unsigned int ptrace;
L
Linus Torvalds 已提交
1152

1153
	int lock_depth;		/* BKL lock depth */
L
Linus Torvalds 已提交
1154

1155 1156
#ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1157 1158
	int oncpu;
#endif
1159
#endif
1160

1161
	int prio, static_prio, normal_prio;
1162
	unsigned int rt_priority;
1163
	const struct sched_class *sched_class;
I
Ingo Molnar 已提交
1164
	struct sched_entity se;
P
Peter Zijlstra 已提交
1165
	struct sched_rt_entity rt;
L
Linus Torvalds 已提交
1166

1167 1168 1169 1170 1171
#ifdef CONFIG_PREEMPT_NOTIFIERS
	/* list of struct preempt_notifier: */
	struct hlist_head preempt_notifiers;
#endif

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	/*
	 * fpu_counter contains the number of consecutive context switches
	 * that the FPU is used. If this is over a threshold, the lazy fpu
	 * saving becomes unlazy to save the trap. This is an unsigned char
	 * so that after 256 times the counter wraps and the behavior turns
	 * lazy again; this to deal with bursty apps that only use FPU for
	 * a short time
	 */
	unsigned char fpu_counter;
	s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
1182
#ifdef CONFIG_BLK_DEV_IO_TRACE
1183
	unsigned int btrace_seq;
1184
#endif
L
Linus Torvalds 已提交
1185

1186
	unsigned int policy;
L
Linus Torvalds 已提交
1187 1188
	cpumask_t cpus_allowed;

P
Paul E. McKenney 已提交
1189 1190 1191 1192 1193
#ifdef CONFIG_PREEMPT_RCU
	int rcu_read_lock_nesting;
	int rcu_flipctr_idx;
#endif /* #ifdef CONFIG_PREEMPT_RCU */

1194
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
1195 1196 1197 1198
	struct sched_info sched_info;
#endif

	struct list_head tasks;
1199
	struct plist_node pushable_tasks;
L
Linus Torvalds 已提交
1200 1201 1202 1203 1204

	struct mm_struct *mm, *active_mm;

/* task state */
	struct linux_binfmt *binfmt;
1205
	int exit_state;
L
Linus Torvalds 已提交
1206 1207 1208
	int exit_code, exit_signal;
	int pdeath_signal;  /*  The signal sent when the parent dies  */
	/* ??? */
1209
	unsigned int personality;
L
Linus Torvalds 已提交
1210
	unsigned did_exec:1;
1211 1212
	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
				 * execve */
L
Linus Torvalds 已提交
1213 1214
	pid_t pid;
	pid_t tgid;
1215 1216 1217

	/* Canary value for the -fstack-protector gcc feature */
	unsigned long stack_canary;
1218

L
Linus Torvalds 已提交
1219 1220 1221
	/* 
	 * pointers to (original) parent process, youngest child, younger sibling,
	 * older sibling, respectively.  (p->father can be replaced with 
R
Roland McGrath 已提交
1222
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
1223
	 */
R
Roland McGrath 已提交
1224 1225
	struct task_struct *real_parent; /* real parent process */
	struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
L
Linus Torvalds 已提交
1226
	/*
R
Roland McGrath 已提交
1227
	 * children/sibling forms the list of my natural children
L
Linus Torvalds 已提交
1228 1229 1230 1231 1232
	 */
	struct list_head children;	/* list of my children */
	struct list_head sibling;	/* linkage in my parent's children list */
	struct task_struct *group_leader;	/* threadgroup leader */

R
Roland McGrath 已提交
1233 1234 1235 1236 1237 1238 1239 1240
	/*
	 * ptraced is the list of tasks this task is using ptrace on.
	 * This includes both natural children and PTRACE_ATTACH targets.
	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
	 */
	struct list_head ptraced;
	struct list_head ptrace_entry;

1241 1242 1243 1244
	/*
	 * This is the tracer handle for the ptrace BTS extension.
	 * This field actually belongs to the ptracer task.
	 */
1245
	struct bts_context *bts;
1246

L
Linus Torvalds 已提交
1247
	/* PID/PID hash table linkage. */
1248
	struct pid_link pids[PIDTYPE_MAX];
O
Oleg Nesterov 已提交
1249
	struct list_head thread_group;
L
Linus Torvalds 已提交
1250 1251 1252 1253 1254

	struct completion *vfork_done;		/* for vfork() */
	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */

1255
	cputime_t utime, stime, utimescaled, stimescaled;
1256
	cputime_t gtime;
1257
	cputime_t prev_utime, prev_stime;
L
Linus Torvalds 已提交
1258
	unsigned long nvcsw, nivcsw; /* context switch counts */
1259 1260
	struct timespec start_time; 		/* monotonic time */
	struct timespec real_start_time;	/* boot based time */
L
Linus Torvalds 已提交
1261 1262 1263
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
	unsigned long min_flt, maj_flt;

1264
	struct task_cputime cputime_expires;
L
Linus Torvalds 已提交
1265 1266 1267
	struct list_head cpu_timers[3];

/* process credentials */
1268 1269 1270 1271
	const struct cred *real_cred;	/* objective and real subjective task
					 * credentials (COW) */
	const struct cred *cred;	/* effective (overridable) subjective task
					 * credentials (COW) */
1272 1273 1274
	struct mutex cred_guard_mutex;	/* guard against foreign influences on
					 * credential calculations
					 * (notably. ptrace) */
1275

1276 1277 1278 1279
	char comm[TASK_COMM_LEN]; /* executable name excluding path
				     - access with [gs]et_task_comm (which lock
				       it with task_lock())
				     - initialized normally by flush_old_exec */
L
Linus Torvalds 已提交
1280 1281
/* file system info */
	int link_count, total_link_count;
1282
#ifdef CONFIG_SYSVIPC
L
Linus Torvalds 已提交
1283 1284
/* ipc stuff */
	struct sysv_sem sysvsem;
1285
#endif
1286
#ifdef CONFIG_DETECT_HUNG_TASK
1287 1288 1289
/* hung task detection */
	unsigned long last_switch_count;
#endif
L
Linus Torvalds 已提交
1290 1291 1292 1293 1294 1295
/* CPU-specific state of this task */
	struct thread_struct thread;
/* filesystem information */
	struct fs_struct *fs;
/* open file information */
	struct files_struct *files;
1296
/* namespaces */
S
Serge E. Hallyn 已提交
1297
	struct nsproxy *nsproxy;
L
Linus Torvalds 已提交
1298 1299 1300 1301 1302
/* signal handlers */
	struct signal_struct *signal;
	struct sighand_struct *sighand;

	sigset_t blocked, real_blocked;
1303
	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
L
Linus Torvalds 已提交
1304 1305 1306 1307 1308 1309 1310 1311
	struct sigpending pending;

	unsigned long sas_ss_sp;
	size_t sas_ss_size;
	int (*notifier)(void *priv);
	void *notifier_data;
	sigset_t *notifier_mask;
	struct audit_context *audit_context;
A
Al Viro 已提交
1312 1313
#ifdef CONFIG_AUDITSYSCALL
	uid_t loginuid;
1314
	unsigned int sessionid;
A
Al Viro 已提交
1315
#endif
L
Linus Torvalds 已提交
1316 1317 1318 1319 1320 1321 1322 1323
	seccomp_t seccomp;

/* Thread group tracking */
   	u32 parent_exec_id;
   	u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
	spinlock_t alloc_lock;

1324 1325 1326 1327 1328
#ifdef CONFIG_GENERIC_HARDIRQS
	/* IRQ handler threads */
	struct irqaction *irqaction;
#endif

1329 1330 1331
	/* Protection of the PI data structures: */
	spinlock_t pi_lock;

I
Ingo Molnar 已提交
1332 1333 1334 1335 1336 1337 1338
#ifdef CONFIG_RT_MUTEXES
	/* PI waiters blocked on a rt_mutex held by this task */
	struct plist_head pi_waiters;
	/* Deadlock detection and priority inheritance handling */
	struct rt_mutex_waiter *pi_blocked_on;
#endif

1339 1340 1341 1342
#ifdef CONFIG_DEBUG_MUTEXES
	/* mutex deadlock detection */
	struct mutex_waiter *blocked_on;
#endif
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
#ifdef CONFIG_TRACE_IRQFLAGS
	unsigned int irq_events;
	int hardirqs_enabled;
	unsigned long hardirq_enable_ip;
	unsigned int hardirq_enable_event;
	unsigned long hardirq_disable_ip;
	unsigned int hardirq_disable_event;
	int softirqs_enabled;
	unsigned long softirq_disable_ip;
	unsigned int softirq_disable_event;
	unsigned long softirq_enable_ip;
	unsigned int softirq_enable_event;
	int hardirq_context;
	int softirq_context;
#endif
I
Ingo Molnar 已提交
1358
#ifdef CONFIG_LOCKDEP
1359
# define MAX_LOCK_DEPTH 48UL
I
Ingo Molnar 已提交
1360 1361 1362
	u64 curr_chain_key;
	int lockdep_depth;
	unsigned int lockdep_recursion;
1363
	struct held_lock held_locks[MAX_LOCK_DEPTH];
1364
	gfp_t lockdep_reclaim_gfp;
I
Ingo Molnar 已提交
1365
#endif
1366

L
Linus Torvalds 已提交
1367 1368 1369
/* journalling filesystem info */
	void *journal_info;

1370 1371 1372
/* stacked block device info */
	struct bio *bio_list, **bio_tail;

L
Linus Torvalds 已提交
1373 1374 1375 1376 1377 1378 1379 1380 1381
/* VM state */
	struct reclaim_state *reclaim_state;

	struct backing_dev_info *backing_dev_info;

	struct io_context *io_context;

	unsigned long ptrace_message;
	siginfo_t *last_siginfo; /* For ptrace use.  */
1382
	struct task_io_accounting ioac;
1383
#if defined(CONFIG_TASK_XACCT)
L
Linus Torvalds 已提交
1384 1385
	u64 acct_rss_mem1;	/* accumulated rss usage */
	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1386
	cputime_t acct_timexpd;	/* stime + utime since last update */
L
Linus Torvalds 已提交
1387 1388 1389 1390
#endif
#ifdef CONFIG_CPUSETS
	nodemask_t mems_allowed;
	int cpuset_mems_generation;
1391
	int cpuset_mem_spread_rotor;
L
Linus Torvalds 已提交
1392
#endif
1393
#ifdef CONFIG_CGROUPS
1394 1395 1396 1397
	/* Control Group info protected by css_set_lock */
	struct css_set *cgroups;
	/* cg_list protected by css_set_lock and tsk->alloc_lock */
	struct list_head cg_list;
1398
#endif
1399
#ifdef CONFIG_FUTEX
1400
	struct robust_list_head __user *robust_list;
1401 1402 1403
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
1404 1405
	struct list_head pi_state_list;
	struct futex_pi_state *pi_state_cache;
1406
#endif
1407 1408
#ifdef CONFIG_PERF_COUNTERS
	struct perf_counter_context *perf_counter_ctxp;
1409 1410
	struct mutex perf_counter_mutex;
	struct list_head perf_counter_list;
1411
#endif
1412 1413 1414
#ifdef CONFIG_NUMA
	struct mempolicy *mempolicy;
	short il_next;
1415
#endif
1416
	atomic_t fs_excl;	/* holding fs exclusive resources */
I
Ingo Molnar 已提交
1417
	struct rcu_head rcu;
1418 1419 1420 1421 1422

	/*
	 * cache last used pipe for splice
	 */
	struct pipe_inode_info *splice_pipe;
1423 1424
#ifdef	CONFIG_TASK_DELAY_ACCT
	struct task_delay_info *delays;
1425 1426 1427
#endif
#ifdef CONFIG_FAULT_INJECTION
	int make_it_fail;
1428
#endif
P
Peter Zijlstra 已提交
1429
	struct prop_local_single dirties;
A
Arjan van de Ven 已提交
1430 1431 1432 1433
#ifdef CONFIG_LATENCYTOP
	int latency_record_count;
	struct latency_record latency_record[LT_SAVECOUNT];
#endif
1434 1435 1436 1437 1438 1439
	/*
	 * time slack values; these are used to round up poll() and
	 * select() etc timeout values. These are in nanoseconds.
	 */
	unsigned long timer_slack_ns;
	unsigned long default_timer_slack_ns;
1440 1441

	struct list_head	*scm_work_list;
1442
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1443 1444 1445 1446
	/* Index of current stored adress in ret_stack */
	int curr_ret_stack;
	/* Stack of return addresses for return function tracing */
	struct ftrace_ret_stack	*ret_stack;
1447 1448
	/* time stamp for last schedule */
	unsigned long long ftrace_timestamp;
1449 1450 1451 1452 1453
	/*
	 * Number of functions that haven't been traced
	 * because of depth overrun.
	 */
	atomic_t trace_overrun;
1454 1455
	/* Pause for the tracing */
	atomic_t tracing_graph_pause;
1456
#endif
1457 1458 1459
#ifdef CONFIG_TRACING
	/* state flags for use by tracers */
	unsigned long trace;
1460 1461 1462
	/* bitmask of trace recursion */
	unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
L
Linus Torvalds 已提交
1463 1464
};

1465 1466 1467
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)

1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
/*
 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
 * values are inverted: lower p->prio value means higher priority.
 *
 * The MAX_USER_RT_PRIO value allows the actual maximum
 * RT priority to be separate from the value exported to
 * user-space.  This allows kernel threads to set their
 * priority to a value higher than any user task. Note:
 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
 */

#define MAX_USER_RT_PRIO	100
#define MAX_RT_PRIO		MAX_USER_RT_PRIO

#define MAX_PRIO		(MAX_RT_PRIO + 40)
#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)

static inline int rt_prio(int prio)
{
	if (unlikely(prio < MAX_RT_PRIO))
		return 1;
	return 0;
}

A
Alexey Dobriyan 已提交
1494
static inline int rt_task(struct task_struct *p)
1495 1496 1497 1498
{
	return rt_prio(p->prio);
}

A
Alexey Dobriyan 已提交
1499
static inline struct pid *task_pid(struct task_struct *task)
1500 1501 1502 1503
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1504
static inline struct pid *task_tgid(struct task_struct *task)
1505 1506 1507 1508
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

1509 1510 1511 1512 1513
/*
 * Without tasklist or rcu lock it is not safe to dereference
 * the result of task_pgrp/task_session even if task == current,
 * we can race with another thread doing sys_setsid/sys_setpgid.
 */
A
Alexey Dobriyan 已提交
1514
static inline struct pid *task_pgrp(struct task_struct *task)
1515 1516 1517 1518
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
1519
static inline struct pid *task_session(struct task_struct *task)
1520 1521 1522 1523
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

1524 1525 1526 1527 1528 1529 1530
struct pid_namespace;

/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
1531 1532
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
1533 1534 1535 1536 1537 1538
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * set_task_vxid()   : assigns a virtual id to a task;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
1539 1540
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
			struct pid_namespace *ns);
1541

A
Alexey Dobriyan 已提交
1542
static inline pid_t task_pid_nr(struct task_struct *tsk)
1543 1544 1545 1546
{
	return tsk->pid;
}

1547 1548 1549 1550 1551
static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
1552 1553 1554

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
1555
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1556 1557 1558
}


A
Alexey Dobriyan 已提交
1559
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1560 1561 1562 1563
{
	return tsk->tgid;
}

1564
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1565 1566 1567 1568 1569 1570 1571

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_tgid(tsk));
}


1572 1573
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1574
{
1575
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1576 1577 1578 1579
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
1580
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1581 1582 1583
}


1584 1585
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1586
{
1587
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1588 1589 1590 1591
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
1592
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1593 1594
}

1595 1596 1597 1598 1599
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
1600

L
Linus Torvalds 已提交
1601 1602 1603 1604 1605 1606 1607 1608
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
 */
A
Alexey Dobriyan 已提交
1609
static inline int pid_alive(struct task_struct *p)
L
Linus Torvalds 已提交
1610
{
1611
	return p->pids[PIDTYPE_PID].pid != NULL;
L
Linus Torvalds 已提交
1612 1613
}

1614
/**
1615
 * is_global_init - check if a task structure is init
1616 1617 1618
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1619
 */
A
Alexey Dobriyan 已提交
1620
static inline int is_global_init(struct task_struct *tsk)
1621 1622 1623
{
	return tsk->pid == 1;
}
1624 1625 1626 1627

/*
 * is_container_init:
 * check whether in the task is init in its own pid namespace.
1628
 */
1629
extern int is_container_init(struct task_struct *tsk);
1630

1631 1632
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1633 1634
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
I
Ingo Molnar 已提交
1635

1636
extern void __put_task_struct(struct task_struct *t);
I
Ingo Molnar 已提交
1637 1638 1639 1640

static inline void put_task_struct(struct task_struct *t)
{
	if (atomic_dec_and_test(&t->usage))
1641
		__put_task_struct(t);
I
Ingo Molnar 已提交
1642
}
L
Linus Torvalds 已提交
1643

1644 1645 1646 1647
extern cputime_t task_utime(struct task_struct *p);
extern cputime_t task_stime(struct task_struct *p);
extern cputime_t task_gtime(struct task_struct *p);

L
Linus Torvalds 已提交
1648 1649 1650 1651 1652 1653 1654
/*
 * Per process flags
 */
#define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */
					/* Not implemented yet, only for 486*/
#define PF_STARTING	0x00000002	/* being created */
#define PF_EXITING	0x00000004	/* getting shut down */
1655
#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1656
#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
L
Linus Torvalds 已提交
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
#define PF_DUMPCORE	0x00000200	/* dumped core */
#define PF_SIGNALED	0x00000400	/* killed by a signal */
#define PF_MEMALLOC	0x00000800	/* Allocating memory */
#define PF_FLUSHER	0x00001000	/* responsible for disk writeback */
#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
#define PF_FROZEN	0x00010000	/* frozen for system suspend */
#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
#define PF_KSWAPD	0x00040000	/* I am kswapd */
#define PF_SWAPOFF	0x00080000	/* I am in swapoff */
#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1670
#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
J
Jens Axboe 已提交
1671 1672 1673 1674
#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1675
#define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1676
#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1677
#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
R
Rafael J. Wysocki 已提交
1678
#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
1679
#define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
L
Linus Torvalds 已提交
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) \
	conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)

#ifdef CONFIG_SMP
1707
extern int set_cpus_allowed_ptr(struct task_struct *p,
1708
				const struct cpumask *new_mask);
L
Linus Torvalds 已提交
1709
#else
1710
static inline int set_cpus_allowed_ptr(struct task_struct *p,
1711
				       const struct cpumask *new_mask)
L
Linus Torvalds 已提交
1712
{
1713
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
1714 1715 1716 1717
		return -EINVAL;
	return 0;
}
#endif
1718 1719 1720 1721
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
	return set_cpus_allowed_ptr(p, &new_mask);
}
L
Linus Torvalds 已提交
1722

1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
/*
 * Architectures can set this to 1 if they have specified
 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
 * but then during bootup it turns out that sched_clock()
 * is reliable after all:
 */
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
extern int sched_clock_stable;
#endif

L
Linus Torvalds 已提交
1733
extern unsigned long long sched_clock(void);
1734

1735 1736
extern void sched_clock_init(void);
extern u64 sched_clock_cpu(int cpu);
1737

1738
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
static inline void sched_clock_tick(void)
{
}

static inline void sched_clock_idle_sleep_event(void)
{
}

static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
#else
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif

1756 1757 1758 1759 1760 1761
/*
 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
 * clock constructed from sched_clock():
 */
extern unsigned long long cpu_clock(int cpu);

1762
extern unsigned long long
1763
task_sched_runtime(struct task_struct *task);
1764
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
L
Linus Torvalds 已提交
1765 1766 1767 1768 1769 1770 1771 1772

/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec()   {}
#endif

1773 1774
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1775

L
Linus Torvalds 已提交
1776 1777 1778 1779 1780 1781 1782
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
#endif

extern void sched_idle_next(void);
1783

1784 1785 1786 1787 1788 1789
#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
extern void wake_up_idle_cpu(int cpu);
#else
static inline void wake_up_idle_cpu(int cpu) { }
#endif

1790
extern unsigned int sysctl_sched_latency;
1791
extern unsigned int sysctl_sched_min_granularity;
1792
extern unsigned int sysctl_sched_wakeup_granularity;
1793 1794 1795
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh;
#ifdef CONFIG_SCHED_DEBUG
1796 1797
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
1798
extern unsigned int sysctl_sched_migration_cost;
1799
extern unsigned int sysctl_sched_nr_migrate;
1800
extern unsigned int sysctl_timer_migration;
1801 1802 1803 1804

int sched_nr_latency_handler(struct ctl_table *table, int write,
		struct file *file, void __user *buffer, size_t *length,
		loff_t *ppos);
1805
#endif
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
#ifdef CONFIG_SCHED_DEBUG
static inline unsigned int get_sysctl_timer_migration(void)
{
	return sysctl_timer_migration;
}
#else
static inline unsigned int get_sysctl_timer_migration(void)
{
	return 1;
}
#endif
P
Peter Zijlstra 已提交
1817 1818
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
1819

1820 1821 1822 1823
int sched_rt_handler(struct ctl_table *table, int write,
		struct file *filp, void __user *buffer, size_t *lenp,
		loff_t *ppos);

1824
extern unsigned int sysctl_sched_compat_yield;
1825

1826
#ifdef CONFIG_RT_MUTEXES
1827 1828 1829
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
1830
#else
A
Alexey Dobriyan 已提交
1831
static inline int rt_mutex_getprio(struct task_struct *p)
1832 1833 1834
{
	return p->normal_prio;
}
1835
# define rt_mutex_adjust_pi(p)		do { } while (0)
1836 1837
#endif

1838 1839 1840 1841 1842
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
extern int task_nice(const struct task_struct *p);
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
1843 1844
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1845 1846
extern int sched_setscheduler_nocheck(struct task_struct *, int,
				      struct sched_param *);
1847 1848 1849
extern struct task_struct *idle_task(int cpu);
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877

void yield(void);

/*
 * The default (Linux) execution domain.
 */
extern struct exec_domain	default_exec_domain;

union thread_union {
	struct thread_info thread_info;
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
	/* Reliable end of stack detection:
	 * Some APM bios versions misalign the stack
	 */
	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif

extern union thread_union init_thread_union;
extern struct task_struct init_task;

extern struct   mm_struct init_mm;

1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
extern struct pid_namespace init_pid_ns;

/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_type_ns():
 *      it is the most generic call - it finds a task by all id,
 *      type and namespace specified
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
1888 1889
 * find_task_by_vpid():
 *      finds a task by its virtual pid
1890
 *
1891
 * see also find_vpid() etc in include/linux/pid.h
1892 1893 1894 1895 1896
 */

extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
		struct pid_namespace *ns);

1897 1898 1899
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
		struct pid_namespace *ns);
1900

1901
extern void __set_special_pids(struct pid *pid);
L
Linus Torvalds 已提交
1902 1903

/* per-UID process charging. */
1904
extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
L
Linus Torvalds 已提交
1905 1906 1907 1908 1909 1910
static inline struct user_struct *get_uid(struct user_struct *u)
{
	atomic_inc(&u->__count);
	return u;
}
extern void free_uid(struct user_struct *);
1911
extern void release_uids(struct user_namespace *ns);
L
Linus Torvalds 已提交
1912 1913 1914

#include <asm/current.h>

1915
extern void do_timer(unsigned long ticks);
L
Linus Torvalds 已提交
1916

1917 1918 1919 1920
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk,
				unsigned long clone_flags);
L
Linus Torvalds 已提交
1921 1922 1923 1924 1925
#ifdef CONFIG_SMP
 extern void kick_process(struct task_struct *tsk);
#else
 static inline void kick_process(struct task_struct *tsk) { }
#endif
1926 1927
extern void sched_fork(struct task_struct *p, int clone_flags);
extern void sched_dead(struct task_struct *p);
L
Linus Torvalds 已提交
1928 1929 1930

extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
1931
extern void __flush_signals(struct task_struct *);
1932
extern void ignore_signals(struct task_struct *);
L
Linus Torvalds 已提交
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);

static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&tsk->sighand->siglock, flags);
	ret = dequeue_signal(tsk, mask, info);
	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);

	return ret;
}	

extern void block_all_signals(int (*notifier)(void *priv), void *priv,
			      sigset_t *mask);
extern void unblock_all_signals(void);
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
1955 1956
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
1957
extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
1958 1959
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
1960
extern int kill_proc_info(int, struct siginfo *, pid_t);
R
Roland McGrath 已提交
1961
extern int do_notify_parent(struct task_struct *, int);
L
Linus Torvalds 已提交
1962 1963 1964 1965 1966 1967
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
1968
extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
1969
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
L
Linus Torvalds 已提交
1970 1971
extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);

1972 1973 1974 1975 1976
static inline int kill_cad_pid(int sig, int priv)
{
	return kill_pid(cad_pid, sig, priv);
}

L
Linus Torvalds 已提交
1977 1978 1979 1980 1981
/* These can be the second arg to send_sig_info/send_group_sig_info.  */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV	((struct siginfo *) 1)
#define SEND_SIG_FORCED	((struct siginfo *) 2)

1982 1983 1984 1985 1986
static inline int is_si_special(const struct siginfo *info)
{
	return info <= SEND_SIG_FORCED;
}

L
Linus Torvalds 已提交
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
/* True if we are on the alternate signal stack.  */

static inline int on_sig_stack(unsigned long sp)
{
	return (sp - current->sas_ss_sp < current->sas_ss_size);
}

static inline int sas_ss_flags(unsigned long sp)
{
	return (current->sas_ss_size == 0 ? SS_DISABLE
		: on_sig_stack(sp) ? SS_ONSTACK : 0);
}

/*
 * Routines for handling mm_structs
 */
extern struct mm_struct * mm_alloc(void);

/* mmdrop drops the mm and the page tables */
2006
extern void __mmdrop(struct mm_struct *);
L
Linus Torvalds 已提交
2007 2008
static inline void mmdrop(struct mm_struct * mm)
{
I
Ingo Molnar 已提交
2009
	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
L
Linus Torvalds 已提交
2010 2011 2012 2013 2014 2015 2016 2017 2018
		__mmdrop(mm);
}

/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
2019 2020
/* Allocate a new mm structure and copy contents from tsk->mm */
extern struct mm_struct *dup_mm(struct task_struct *tsk);
L
Linus Torvalds 已提交
2021

A
Alexey Dobriyan 已提交
2022 2023
extern int copy_thread(unsigned long, unsigned long, unsigned long,
			struct task_struct *, struct pt_regs *);
L
Linus Torvalds 已提交
2024 2025 2026 2027
extern void flush_thread(void);
extern void exit_thread(void);

extern void exit_files(struct task_struct *);
2028
extern void __cleanup_signal(struct signal_struct *);
2029
extern void __cleanup_sighand(struct sighand_struct *);
2030

L
Linus Torvalds 已提交
2031
extern void exit_itimers(struct signal_struct *);
2032
extern void flush_itimer_signals(void);
L
Linus Torvalds 已提交
2033 2034 2035 2036 2037 2038 2039 2040 2041

extern NORET_TYPE void do_group_exit(int);

extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern int disallow_signal(int);

extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2042
struct task_struct *fork_idle(int);
L
Linus Torvalds 已提交
2043 2044

extern void set_task_comm(struct task_struct *tsk, char *from);
2045
extern char *get_task_comm(char *to, struct task_struct *tsk);
L
Linus Torvalds 已提交
2046 2047

#ifdef CONFIG_SMP
2048
extern void wait_task_context_switch(struct task_struct *p);
R
Roland McGrath 已提交
2049
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
2050
#else
2051
static inline void wait_task_context_switch(struct task_struct *p) {}
R
Roland McGrath 已提交
2052 2053 2054 2055 2056
static inline unsigned long wait_task_inactive(struct task_struct *p,
					       long match_state)
{
	return 1;
}
L
Linus Torvalds 已提交
2057 2058
#endif

2059 2060
#define next_task(p) \
	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
L
Linus Torvalds 已提交
2061 2062 2063 2064

#define for_each_process(p) \
	for (p = &init_task ; (p = next_task(p)) != &init_task ; )

D
David Howells 已提交
2065 2066
extern bool is_single_threaded(struct task_struct *);

L
Linus Torvalds 已提交
2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
/*
 * Careful: do_each_thread/while_each_thread is a double loop so
 *          'break' will not work as expected - use goto instead.
 */
#define do_each_thread(g, t) \
	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do

#define while_each_thread(g, t) \
	while ((t = next_thread(t)) != g)

2077 2078
/* de_thread depends on thread_group_leader not being a pid based check */
#define thread_group_leader(p)	(p == p->group_leader)
L
Linus Torvalds 已提交
2079

2080 2081 2082 2083 2084 2085
/* Do to the insanities of de_thread it is possible for a process
 * to have the pid of the thread group leader without actually being
 * the thread group leader.  For iteration through the pids in proc
 * all we care about is that we have a task with the appropriate
 * pid, we don't actually care if we have the right task.
 */
A
Alexey Dobriyan 已提交
2086
static inline int has_group_leader_pid(struct task_struct *p)
2087 2088 2089 2090
{
	return p->pid == p->tgid;
}

2091 2092 2093 2094 2095 2096
static inline
int same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
	return p1->tgid == p2->tgid;
}

2097
static inline struct task_struct *next_thread(const struct task_struct *p)
O
Oleg Nesterov 已提交
2098
{
2099 2100
	return list_entry_rcu(p->thread_group.next,
			      struct task_struct, thread_group);
O
Oleg Nesterov 已提交
2101 2102
}

A
Alexey Dobriyan 已提交
2103
static inline int thread_group_empty(struct task_struct *p)
L
Linus Torvalds 已提交
2104
{
O
Oleg Nesterov 已提交
2105
	return list_empty(&p->thread_group);
L
Linus Torvalds 已提交
2106 2107 2108 2109 2110
}

#define delay_group_leader(p) \
		(thread_group_leader(p) && !thread_group_empty(p))

2111 2112 2113 2114 2115
static inline int task_detached(struct task_struct *p)
{
	return p->exit_signal == -1;
}

L
Linus Torvalds 已提交
2116
/*
2117
 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2118
 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2119 2120
 * pins the final release of task.io_context.  Also protects ->cpuset and
 * ->cgroup.subsys[].
L
Linus Torvalds 已提交
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
 *
 * Nests both inside and outside of read_lock(&tasklist_lock).
 * It must not be nested with write_lock_irq(&tasklist_lock),
 * neither inside nor outside.
 */
static inline void task_lock(struct task_struct *p)
{
	spin_lock(&p->alloc_lock);
}

static inline void task_unlock(struct task_struct *p)
{
	spin_unlock(&p->alloc_lock);
}

2136 2137 2138 2139 2140 2141 2142 2143 2144
extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
							unsigned long *flags);

static inline void unlock_task_sighand(struct task_struct *tsk,
						unsigned long *flags)
{
	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}

A
Al Viro 已提交
2145 2146
#ifndef __HAVE_THREAD_FUNCTIONS

R
Roman Zippel 已提交
2147 2148
#define task_thread_info(task)	((struct thread_info *)(task)->stack)
#define task_stack_page(task)	((task)->stack)
A
Al Viro 已提交
2149

2150 2151 2152 2153 2154 2155 2156 2157
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
	*task_thread_info(p) = *task_thread_info(org);
	task_thread_info(p)->task = p;
}

static inline unsigned long *end_of_stack(struct task_struct *p)
{
R
Roman Zippel 已提交
2158
	return (unsigned long *)(task_thread_info(p) + 1);
2159 2160
}

A
Al Viro 已提交
2161 2162
#endif

2163 2164 2165 2166 2167 2168 2169
static inline int object_is_on_stack(void *obj)
{
	void *stack = task_stack_page(current);

	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}

2170 2171
extern void thread_info_cache_init(void);

2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
{
	unsigned long *n = end_of_stack(p);

	do { 	/* Skip over canary */
		n++;
	} while (!*n);

	return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif

L
Linus Torvalds 已提交
2185 2186 2187 2188 2189
/* set thread flags in other task's structures
 * - see asm/thread_info.h for TIF_xxxx flags available
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2190
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2191 2192 2193 2194
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2195
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2196 2197 2198 2199
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2200
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2201 2202 2203 2204
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2205
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2206 2207 2208 2209
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2210
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

2223 2224 2225 2226 2227
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

2228 2229 2230 2231 2232 2233
static inline int restart_syscall(void)
{
	set_tsk_thread_flag(current, TIF_SIGPENDING);
	return -ERESTARTNOINTR;
}

L
Linus Torvalds 已提交
2234 2235 2236 2237
static inline int signal_pending(struct task_struct *p)
{
	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
M
Matthew Wilcox 已提交
2238

2239
extern int __fatal_signal_pending(struct task_struct *p);
M
Matthew Wilcox 已提交
2240 2241 2242 2243 2244 2245

static inline int fatal_signal_pending(struct task_struct *p)
{
	return signal_pending(p) && __fatal_signal_pending(p);
}

2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
static inline int signal_pending_state(long state, struct task_struct *p)
{
	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
		return 0;
	if (!signal_pending(p))
		return 0;

	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}

L
Linus Torvalds 已提交
2256 2257
static inline int need_resched(void)
{
2258
	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
L
Linus Torvalds 已提交
2259 2260 2261 2262 2263 2264 2265 2266 2267
}

/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
2268
extern int _cond_resched(void);
2269
#ifdef CONFIG_PREEMPT_BKL
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
static inline int cond_resched(void)
{
	return 0;
}
#else
static inline int cond_resched(void)
{
	return _cond_resched();
}
#endif
L
Linus Torvalds 已提交
2280 2281
extern int cond_resched_lock(spinlock_t * lock);
extern int cond_resched_softirq(void);
2282 2283 2284 2285
static inline int cond_resched_bkl(void)
{
	return _cond_resched();
}
L
Linus Torvalds 已提交
2286 2287 2288

/*
 * Does a critical section need to be broken due to another
N
Nick Piggin 已提交
2289 2290
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * but a general need for low latency)
L
Linus Torvalds 已提交
2291
 */
N
Nick Piggin 已提交
2292
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
2293
{
N
Nick Piggin 已提交
2294 2295 2296
#ifdef CONFIG_PREEMPT
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
2297
	return 0;
N
Nick Piggin 已提交
2298
#endif
L
Linus Torvalds 已提交
2299 2300
}

2301 2302 2303
/*
 * Thread group CPU time accounting.
 */
2304
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2305
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2306

2307
static inline void thread_group_cputime_init(struct signal_struct *sig)
2308
{
2309 2310 2311
	sig->cputimer.cputime = INIT_CPUTIME;
	spin_lock_init(&sig->cputimer.lock);
	sig->cputimer.running = 0;
2312 2313 2314 2315 2316 2317
}

static inline void thread_group_cputime_free(struct signal_struct *sig)
{
}

R
Roland McGrath 已提交
2318 2319 2320 2321 2322 2323 2324
/*
 * Reevaluate whether the task has signals pending delivery.
 * Wake the task if so.
 * This is required every time the blocked sigset_t changes.
 * callers must hold sighand->siglock.
 */
extern void recalc_sigpending_and_wake(struct task_struct *t);
L
Linus Torvalds 已提交
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335
extern void recalc_sigpending(void);

extern void signal_wake_up(struct task_struct *t, int resume_stopped);

/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
A
Al Viro 已提交
2336
	return task_thread_info(p)->cpu;
L
Linus Torvalds 已提交
2337 2338
}

I
Ingo Molnar 已提交
2339
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

extern void arch_pick_mmap_layout(struct mm_struct *mm);

I
Ingo Molnar 已提交
2356 2357 2358 2359
#ifdef CONFIG_TRACING
extern void
__trace_special(void *__tr, void *__data,
		unsigned long arg1, unsigned long arg2, unsigned long arg3);
L
Linus Torvalds 已提交
2360
#else
I
Ingo Molnar 已提交
2361 2362 2363
static inline void
__trace_special(void *__tr, void *__data,
		unsigned long arg1, unsigned long arg2, unsigned long arg3)
L
Linus Torvalds 已提交
2364 2365 2366 2367
{
}
#endif

2368 2369
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2370

L
Linus Torvalds 已提交
2371 2372
extern void normalize_rt_tasks(void);

2373
#ifdef CONFIG_GROUP_SCHED
2374

2375
extern struct task_group init_task_group;
2376 2377
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
2378
extern void set_tg_uid(struct user_struct *user);
2379
#endif
2380

2381
extern struct task_group *sched_create_group(struct task_group *parent);
2382
extern void sched_destroy_group(struct task_group *tg);
2383
extern void sched_move_task(struct task_struct *tsk);
2384
#ifdef CONFIG_FAIR_GROUP_SCHED
2385
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2386
extern unsigned long sched_group_shares(struct task_group *tg);
2387 2388
#endif
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
2389 2390 2391
extern int sched_group_set_rt_runtime(struct task_group *tg,
				      long rt_runtime_us);
extern long sched_group_rt_runtime(struct task_group *tg);
2392 2393 2394
extern int sched_group_set_rt_period(struct task_group *tg,
				      long rt_period_us);
extern long sched_group_rt_period(struct task_group *tg);
2395
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2396
#endif
2397 2398
#endif

2399 2400 2401
extern int task_can_switch_user(struct user_struct *up,
					struct task_struct *tsk);

2402 2403 2404
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
2405
	tsk->ioac.rchar += amt;
2406 2407 2408 2409
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
2410
	tsk->ioac.wchar += amt;
2411 2412 2413 2414
}

static inline void inc_syscr(struct task_struct *tsk)
{
2415
	tsk->ioac.syscr++;
2416 2417 2418 2419
}

static inline void inc_syscw(struct task_struct *tsk)
{
2420
	tsk->ioac.syscw++;
2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void inc_syscr(struct task_struct *tsk)
{
}

static inline void inc_syscw(struct task_struct *tsk)
{
}
#endif

D
Dave Hansen 已提交
2440 2441 2442 2443
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

T
Thomas Gleixner 已提交
2444 2445 2446 2447 2448 2449 2450
/*
 * Call the function if the target task is executing on a CPU right now:
 */
extern void task_oncpu_function_call(struct task_struct *p,
				     void (*func) (void *info), void *info);


2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
#ifdef CONFIG_MM_OWNER
extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
#else
static inline void mm_update_next_owner(struct mm_struct *mm)
{
}

static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
}
#endif /* CONFIG_MM_OWNER */

2464 2465
#define TASK_STATE_TO_CHAR_STR "RSDTtZX"

L
Linus Torvalds 已提交
2466 2467 2468
#endif /* __KERNEL__ */

#endif