sched.h 73.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * cloning flags:
 */
#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
#define CLONE_VM	0x00000100	/* set if VM shared between processes */
#define CLONE_FS	0x00000200	/* set if fs info shared between processes */
#define CLONE_FILES	0x00000400	/* set if open files shared between processes */
#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
#define CLONE_THREAD	0x00010000	/* Same thread group? */
#define CLONE_NEWNS	0x00020000	/* New namespace group? */
#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
#define CLONE_DETACHED		0x00400000	/* Unused, ignored */
#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
#define CLONE_STOPPED		0x02000000	/* Start in stopped state */
25
#define CLONE_NEWUTS		0x04000000	/* New utsname group? */
K
Kirill Korotaev 已提交
26
#define CLONE_NEWIPC		0x08000000	/* New ipcs */
S
Serge E. Hallyn 已提交
27
#define CLONE_NEWUSER		0x10000000	/* New user namespace */
28
#define CLONE_NEWPID		0x20000000	/* New pid namespace */
29
#define CLONE_NEWNET		0x40000000	/* New network namespace */
30
#define CLONE_IO		0x80000000	/* Clone io context */
31 32 33 34 35 36 37 38

/*
 * Scheduling policies
 */
#define SCHED_NORMAL		0
#define SCHED_FIFO		1
#define SCHED_RR		2
#define SCHED_BATCH		3
I
Ingo Molnar 已提交
39 40
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE		5
41 42
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK     0x40000000
43

44
#ifdef __KERNEL__
45 46 47 48 49

struct sched_param {
	int sched_priority;
};

L
Linus Torvalds 已提交
50 51 52 53 54 55 56 57 58 59 60 61 62
#include <asm/param.h>	/* for HZ */

#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
63
#include <linux/mm_types.h>
L
Linus Torvalds 已提交
64 65 66 67 68 69 70 71 72

#include <asm/system.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>

#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
73
#include <linux/path.h>
L
Linus Torvalds 已提交
74 75 76 77 78
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
P
Peter Zijlstra 已提交
79
#include <linux/proportions.h>
L
Linus Torvalds 已提交
80
#include <linux/seccomp.h>
I
Ingo Molnar 已提交
81
#include <linux/rcupdate.h>
82
#include <linux/rculist.h>
I
Ingo Molnar 已提交
83
#include <linux/rtmutex.h>
L
Linus Torvalds 已提交
84

85 86 87 88 89
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
90
#include <linux/task_io_accounting.h>
91
#include <linux/kobject.h>
A
Arjan van de Ven 已提交
92
#include <linux/latencytop.h>
93
#include <linux/cred.h>
94 95

#include <asm/processor.h>
H
H. J. Lu 已提交
96

L
Linus Torvalds 已提交
97
struct exec_domain;
98
struct futex_pi_state;
99
struct robust_list_head;
100
struct bio;
101
struct fs_struct;
102
struct bts_context;
103
struct perf_event_context;
L
Linus Torvalds 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121

/*
 * List of flags we want to share for kernel threads,
 * if only because they are not used by them anyway.
 */
#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)

/*
 * These are the constant used to fake the fixed-point load-average
 * counting. Some notes:
 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 *    a load-average precision of 10 bits integer + 11 bits fractional
 *  - if you want to count load-averages more often, you need more
 *    precision, or rounding will get you. With 2-second counting freq,
 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 *    11 bit fractions.
 */
extern unsigned long avenrun[];		/* Load averages */
122
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
L
Linus Torvalds 已提交
123 124 125

#define FSHIFT		11		/* nr of bits of precision */
#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
126
#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
L
Linus Torvalds 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
#define EXP_5		2014		/* 1/exp(5sec/5min) */
#define EXP_15		2037		/* 1/exp(5sec/15min) */

#define CALC_LOAD(load,exp,n) \
	load *= exp; \
	load += n*(FIXED_1-exp); \
	load >>= FSHIFT;

extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
143 144 145 146
extern unsigned long nr_iowait_cpu(void);
extern unsigned long this_cpu_load(void);


147
extern void calc_global_load(void);
148
extern u64 cpu_nr_migrations(int cpu);
L
Linus Torvalds 已提交
149

150 151
extern unsigned long get_parent_ip(unsigned long addr);

I
Ingo Molnar 已提交
152 153
struct seq_file;
struct cfs_rq;
154
struct task_group;
I
Ingo Molnar 已提交
155 156 157 158
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
159
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
I
Ingo Molnar 已提交
160 161 162 163 164 165 166 167 168
#else
static inline void
proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
}
static inline void proc_sched_set_task(struct task_struct *p)
{
}
static inline void
169
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
I
Ingo Molnar 已提交
170 171 172
{
}
#endif
L
Linus Torvalds 已提交
173

174 175
extern unsigned long long time_sync_thresh;

176 177 178 179 180 181 182 183 184 185
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
L
Linus Torvalds 已提交
186 187 188
#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
M
Matthew Wilcox 已提交
189 190
#define __TASK_STOPPED		4
#define __TASK_TRACED		8
191 192 193 194
/* in tsk->exit_state */
#define EXIT_ZOMBIE		16
#define EXIT_DEAD		32
/* in tsk->state again */
195
#define TASK_DEAD		64
M
Matthew Wilcox 已提交
196
#define TASK_WAKEKILL		128
P
Peter Zijlstra 已提交
197
#define TASK_WAKING		256
M
Matthew Wilcox 已提交
198 199 200 201 202

/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
L
Linus Torvalds 已提交
203

204 205
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
M
Matthew Wilcox 已提交
206
#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
207 208 209

/* get_task_state() */
#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
M
Matthew Wilcox 已提交
210 211
				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
				 __TASK_TRACED)
212

M
Matthew Wilcox 已提交
213 214
#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
215
#define task_is_stopped_or_traced(task)	\
M
Matthew Wilcox 已提交
216
			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
217
#define task_contributes_to_load(task)	\
218
				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
219
				 (task->flags & PF_FREEZING) == 0)
L
Linus Torvalds 已提交
220 221 222 223 224 225

#define __set_task_state(tsk, state_value)		\
	do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value)		\
	set_mb((tsk)->state, (state_value))

226 227 228 229 230 231 232 233 234 235 236
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
 *	set_current_state(TASK_UNINTERRUPTIBLE);
 *	if (do_i_need_to_sleep())
 *		schedule();
 *
 * If the caller does not need such serialisation then use __set_current_state()
 */
L
Linus Torvalds 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
#define __set_current_state(state_value)			\
	do { current->state = (state_value); } while (0)
#define set_current_state(state_value)		\
	set_mb(current->state, (state_value))

/* Task command name length */
#define TASK_COMM_LEN 16

#include <linux/spinlock.h>

/*
 * This serializes "schedule()" and also protects
 * the run-queue from deletions/modifications (but
 * _adding_ to the beginning of the run-queue has
 * a separate lock).
 */
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;

256
struct task_struct;
L
Linus Torvalds 已提交
257 258 259

extern void sched_init(void);
extern void sched_init_smp(void);
260
extern asmlinkage void schedule_tail(struct task_struct *prev);
261
extern void init_idle(struct task_struct *idle, int cpu);
I
Ingo Molnar 已提交
262
extern void init_idle_bootup_task(struct task_struct *idle);
L
Linus Torvalds 已提交
263

264
extern int runqueue_is_locked(int cpu);
265
extern void task_rq_unlock_wait(struct task_struct *p);
I
Ingo Molnar 已提交
266

267
extern cpumask_var_t nohz_cpu_mask;
268 269
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
270
extern int get_nohz_load_balancer(void);
271 272 273 274 275 276
#else
static inline int select_nohz_load_balancer(int cpu)
{
	return 0;
}
#endif
L
Linus Torvalds 已提交
277

I
Ingo Molnar 已提交
278
/*
I
Ingo Molnar 已提交
279
 * Only dump TASK_* tasks. (0 for all tasks)
I
Ingo Molnar 已提交
280 281 282 283 284
 */
extern void show_state_filter(unsigned long state_filter);

static inline void show_state(void)
{
I
Ingo Molnar 已提交
285
	show_state_filter(0);
I
Ingo Molnar 已提交
286 287
}

L
Linus Torvalds 已提交
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
extern void show_regs(struct pt_regs *);

/*
 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 * task), SP is the stack pointer of the first frame that should be shown in the back
 * trace (or NULL if the entire call-chain of the task should be shown).
 */
extern void show_stack(struct task_struct *task, unsigned long *sp);

void io_schedule(void);
long io_schedule_timeout(long timeout);

extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);

305 306
extern void sched_show_task(struct task_struct *p);

I
Ingo Molnar 已提交
307
#ifdef CONFIG_DETECT_SOFTLOCKUP
308
extern void softlockup_tick(void);
I
Ingo Molnar 已提交
309
extern void touch_softlockup_watchdog(void);
310
extern void touch_all_softlockup_watchdogs(void);
311 312 313
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
				    struct file *filp, void __user *buffer,
				    size_t *lenp, loff_t *ppos);
I
Ingo Molnar 已提交
314
extern unsigned int  softlockup_panic;
315
extern int softlockup_thresh;
I
Ingo Molnar 已提交
316
#else
317
static inline void softlockup_tick(void)
I
Ingo Molnar 已提交
318 319 320 321 322
{
}
static inline void touch_softlockup_watchdog(void)
{
}
323 324 325
static inline void touch_all_softlockup_watchdogs(void)
{
}
I
Ingo Molnar 已提交
326 327
#endif

328 329 330 331 332 333 334 335 336
#ifdef CONFIG_DETECT_HUNG_TASK
extern unsigned int  sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_check_count;
extern unsigned long sysctl_hung_task_timeout_secs;
extern unsigned long sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
					 struct file *filp, void __user *buffer,
					 size_t *lenp, loff_t *ppos);
#endif
I
Ingo Molnar 已提交
337

L
Linus Torvalds 已提交
338 339
/* Attach to any functions which should be ignored in wchan output. */
#define __sched		__attribute__((__section__(".sched.text")))
340 341 342 343

/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];

L
Linus Torvalds 已提交
344 345 346 347
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);

#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
348
extern signed long schedule_timeout(signed long timeout);
349
extern signed long schedule_timeout_interruptible(signed long timeout);
M
Matthew Wilcox 已提交
350
extern signed long schedule_timeout_killable(signed long timeout);
351
extern signed long schedule_timeout_uninterruptible(signed long timeout);
P
Peter Zijlstra 已提交
352
asmlinkage void __schedule(void);
L
Linus Torvalds 已提交
353
asmlinkage void schedule(void);
354
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
L
Linus Torvalds 已提交
355

S
Serge E. Hallyn 已提交
356
struct nsproxy;
357
struct user_namespace;
L
Linus Torvalds 已提交
358

359 360 361 362 363 364 365 366 367 368 369 370 371 372
/*
 * Default maximum number of active map areas, this limits the number of vmas
 * per mm struct. Users can overwrite this number by sysctl but there is a
 * problem.
 *
 * When a program's coredump is generated as ELF format, a section is created
 * per a vma. In ELF, the number of sections is represented in unsigned short.
 * This means the number of sections should be smaller than 65535 at coredump.
 * Because the kernel adds some informative sections to a image of program at
 * generating coredump, we need some margin. The number of extra sections is
 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 */
#define MAPCOUNT_ELF_CORE_MARGIN	(5)
#define DEFAULT_MAX_MAP_COUNT	(USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
L
Linus Torvalds 已提交
373 374 375 376 377 378 379 380 381 382 383 384

extern int sysctl_max_map_count;

#include <linux/aio.h>

extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
		       unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
			  unsigned long len, unsigned long pgoff,
			  unsigned long flags);
385 386
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
L
Linus Torvalds 已提交
387

388
#if USE_SPLIT_PTLOCKS
389 390 391 392
/*
 * The mm counters are not protected by its page_table_lock,
 * so must be incremented atomically.
 */
393 394 395 396 397
#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
398

399
#else  /* !USE_SPLIT_PTLOCKS */
400 401 402 403
/*
 * The mm counters are protected by its page_table_lock,
 * so can be incremented directly.
 */
L
Linus Torvalds 已提交
404 405 406 407 408
#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
#define get_mm_counter(mm, member) ((mm)->_##member)
#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
#define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member--
409

410
#endif /* !USE_SPLIT_PTLOCKS */
411

412 413
#define get_mm_rss(mm)					\
	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
414 415 416 417 418 419 420 421 422 423
#define update_hiwater_rss(mm)	do {			\
	unsigned long _rss = get_mm_rss(mm);		\
	if ((mm)->hiwater_rss < _rss)			\
		(mm)->hiwater_rss = _rss;		\
} while (0)
#define update_hiwater_vm(mm)	do {			\
	if ((mm)->hiwater_vm < (mm)->total_vm)		\
		(mm)->hiwater_vm = (mm)->total_vm;	\
} while (0)

424 425 426 427 428 429 430 431 432
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
	return max(mm->hiwater_rss, get_mm_rss(mm));
}

static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
	return max(mm->hiwater_vm, mm->total_vm);
}
433

434 435 436 437
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);

/* mm flags */
438
/* dumpable bits */
439 440
#define MMF_DUMPABLE      0  /* core dump is permitted */
#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
H
Hugh Dickins 已提交
441

442
#define MMF_DUMPABLE_BITS 2
H
Hugh Dickins 已提交
443
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
444 445 446 447 448 449

/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE	2
#define MMF_DUMP_ANON_SHARED	3
#define MMF_DUMP_MAPPED_PRIVATE	4
#define MMF_DUMP_MAPPED_SHARED	5
R
Roland McGrath 已提交
450
#define MMF_DUMP_ELF_HEADERS	6
451 452
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED  8
H
Hugh Dickins 已提交
453

454
#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
455
#define MMF_DUMP_FILTER_BITS	7
456 457 458
#define MMF_DUMP_FILTER_MASK \
	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
459
	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
460 461 462 463 464 465 466
	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)

#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF	0
#endif
H
Hugh Dickins 已提交
467 468 469 470
					/* leave room for more dump flags */
#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */

#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
471

L
Linus Torvalds 已提交
472 473 474 475
struct sighand_struct {
	atomic_t		count;
	struct k_sigaction	action[_NSIG];
	spinlock_t		siglock;
D
Davide Libenzi 已提交
476
	wait_queue_head_t	signalfd_wqh;
L
Linus Torvalds 已提交
477 478
};

479
struct pacct_struct {
480 481
	int			ac_flag;
	long			ac_exitcode;
482
	unsigned long		ac_mem;
483 484
	cputime_t		ac_utime, ac_stime;
	unsigned long		ac_minflt, ac_majflt;
485 486
};

487 488 489 490 491
/**
 * struct task_cputime - collected CPU time counts
 * @utime:		time spent in user mode, in &cputime_t units
 * @stime:		time spent in kernel mode, in &cputime_t units
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
492
 *
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
 * This structure groups together three kinds of CPU time that are
 * tracked for threads and thread groups.  Most things considering
 * CPU time want to group these counts together and treat all three
 * of them in parallel.
 */
struct task_cputime {
	cputime_t utime;
	cputime_t stime;
	unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp	stime
#define virt_exp	utime
#define sched_exp	sum_exec_runtime

508 509 510 511 512 513 514
#define INIT_CPUTIME	\
	(struct task_cputime) {					\
		.utime = cputime_zero,				\
		.stime = cputime_zero,				\
		.sum_exec_runtime = 0,				\
	}

P
Peter Zijlstra 已提交
515 516 517
/*
 * Disable preemption until the scheduler is running.
 * Reset by start_kernel()->sched_init()->init_idle().
P
Peter Zijlstra 已提交
518 519 520
 *
 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
 * before the scheduler is active -- see should_resched().
P
Peter Zijlstra 已提交
521
 */
P
Peter Zijlstra 已提交
522
#define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
P
Peter Zijlstra 已提交
523

524
/**
525 526 527 528 529
 * struct thread_group_cputimer - thread group interval timer counts
 * @cputime:		thread group interval timers.
 * @running:		non-zero when there are timers running and
 * 			@cputime receives updates.
 * @lock:		lock for fields in this struct.
530 531
 *
 * This structure contains the version of task_cputime, above, that is
532
 * used for thread group CPU timer calculations.
533
 */
534 535 536 537
struct thread_group_cputimer {
	struct task_cputime cputime;
	int running;
	spinlock_t lock;
538 539
};

L
Linus Torvalds 已提交
540 541 542 543 544 545 546 547 548 549 550 551 552 553
/*
 * NOTE! "signal_struct" does not have it's own
 * locking, because a shared signal_struct always
 * implies a shared sighand_struct, so locking
 * sighand_struct is always a proper superset of
 * the locking of signal_struct.
 */
struct signal_struct {
	atomic_t		count;
	atomic_t		live;

	wait_queue_head_t	wait_chldexit;	/* for wait4() */

	/* current thread group signal load-balancing target: */
554
	struct task_struct	*curr_target;
L
Linus Torvalds 已提交
555 556 557 558 559 560 561 562 563 564 565 566

	/* shared signal handling: */
	struct sigpending	shared_pending;

	/* thread group exit support */
	int			group_exit_code;
	/* overloaded:
	 * - notify group_exit_task when ->count is equal to notify_count
	 * - everyone except group_exit_task is stopped during signal delivery
	 *   of fatal signals, group_exit_task processes the signal.
	 */
	int			notify_count;
567
	struct task_struct	*group_exit_task;
L
Linus Torvalds 已提交
568 569 570 571 572 573 574 575 576

	/* thread group stop support, overloads group_exit_code too */
	int			group_stop_count;
	unsigned int		flags; /* see SIGNAL_* flags below */

	/* POSIX.1b Interval Timers */
	struct list_head posix_timers;

	/* ITIMER_REAL timer for the process */
577
	struct hrtimer real_timer;
578
	struct pid *leader_pid;
579
	ktime_t it_real_incr;
L
Linus Torvalds 已提交
580 581 582 583 584

	/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
	cputime_t it_prof_expires, it_virt_expires;
	cputime_t it_prof_incr, it_virt_incr;

585
	/*
586 587
	 * Thread group totals for process CPU timers.
	 * See thread_group_cputimer(), et al, for details.
588
	 */
589
	struct thread_group_cputimer cputimer;
590 591 592 593 594 595

	/* Earliest-expiration cache. */
	struct task_cputime cputime_expires;

	struct list_head cpu_timers[3];

596
	struct pid *tty_old_pgrp;
597

L
Linus Torvalds 已提交
598 599 600 601 602 603 604 605 606 607 608
	/* boolean value for session group leader */
	int leader;

	struct tty_struct *tty; /* NULL if no tty */

	/*
	 * Cumulative resource counters for dead threads in the group,
	 * and for reaped dead child processes forked by this group.
	 * Live threads maintain their own counters and add to these
	 * in __exit_signal, except for the group leader.
	 */
609
	cputime_t utime, stime, cutime, cstime;
610 611
	cputime_t gtime;
	cputime_t cgtime;
L
Linus Torvalds 已提交
612 613
	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
614
	unsigned long inblock, oublock, cinblock, coublock;
615
	struct task_io_accounting ioac;
L
Linus Torvalds 已提交
616

617 618 619 620 621 622 623 624
	/*
	 * Cumulative ns of schedule CPU time fo dead threads in the
	 * group, not including a zombie group leader, (This only differs
	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
	 * other than jiffies.)
	 */
	unsigned long long sum_sched_runtime;

L
Linus Torvalds 已提交
625 626 627 628 629 630 631 632 633 634 635
	/*
	 * We don't bother to synchronize most readers of this at all,
	 * because there is no reader checking a limit that actually needs
	 * to get both rlim_cur and rlim_max atomically, and either one
	 * alone is a single word that can safely be read normally.
	 * getrlimit/setrlimit use task_lock(current->group_leader) to
	 * protect this instead of the siglock, because they really
	 * have no need to disable irqs.
	 */
	struct rlimit rlim[RLIM_NLIMITS];

636 637 638
#ifdef CONFIG_BSD_PROCESS_ACCT
	struct pacct_struct pacct;	/* per-process accounting information */
#endif
639 640 641
#ifdef CONFIG_TASKSTATS
	struct taskstats *stats;
#endif
M
Miloslav Trmac 已提交
642 643 644 645
#ifdef CONFIG_AUDIT
	unsigned audit_tty;
	struct tty_audit_buf *tty_audit_buf;
#endif
646 647

	int oom_adj;	/* OOM kill score adjustment (bit shift) */
L
Linus Torvalds 已提交
648 649
};

650 651 652 653 654
/* Context switch must be unlocked if interrupts are to be enabled */
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
# define __ARCH_WANT_UNLOCKED_CTXSW
#endif

L
Linus Torvalds 已提交
655 656 657 658 659 660 661
/*
 * Bits in flags field of signal_struct.
 */
#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */
#define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */
662 663 664 665 666 667
/*
 * Pending notifications to parent.
 */
#define SIGNAL_CLD_STOPPED	0x00000010
#define SIGNAL_CLD_CONTINUED	0x00000020
#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
L
Linus Torvalds 已提交
668

669 670
#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */

671 672 673 674 675 676 677
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
		(sig->group_exit_task != NULL);
}

L
Linus Torvalds 已提交
678 679 680 681 682 683 684 685
/*
 * Some day this will be a full-fledged user tracking system..
 */
struct user_struct {
	atomic_t __count;	/* reference count */
	atomic_t processes;	/* How many processes does this user have? */
	atomic_t files;		/* How many open files does this user have? */
	atomic_t sigpending;	/* How many pending signals does this user have? */
686
#ifdef CONFIG_INOTIFY_USER
R
Robert Love 已提交
687 688 689
	atomic_t inotify_watches; /* How many inotify watches does this user have? */
	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
#endif
690 691 692
#ifdef CONFIG_EPOLL
	atomic_t epoll_watches;	/* The number of file descriptors currently watched */
#endif
A
Alexey Dobriyan 已提交
693
#ifdef CONFIG_POSIX_MQUEUE
L
Linus Torvalds 已提交
694 695
	/* protected by mq_lock	*/
	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
A
Alexey Dobriyan 已提交
696
#endif
L
Linus Torvalds 已提交
697 698 699 700 701 702 703 704
	unsigned long locked_shm; /* How many pages of mlocked shm ? */

#ifdef CONFIG_KEYS
	struct key *uid_keyring;	/* UID specific keyring */
	struct key *session_keyring;	/* UID's default session keyring */
#endif

	/* Hash table maintenance information */
P
Pavel Emelyanov 已提交
705
	struct hlist_node uidhash_node;
L
Linus Torvalds 已提交
706
	uid_t uid;
707
	struct user_namespace *user_ns;
708

709
#ifdef CONFIG_USER_SCHED
710
	struct task_group *tg;
D
Dhaval Giani 已提交
711
#ifdef CONFIG_SYSFS
712
	struct kobject kobj;
713
	struct delayed_work work;
714
#endif
D
Dhaval Giani 已提交
715
#endif
716

717
#ifdef CONFIG_PERF_EVENTS
718 719
	atomic_long_t locked_vm;
#endif
L
Linus Torvalds 已提交
720 721
};

722
extern int uids_sysfs_init(void);
723

L
Linus Torvalds 已提交
724 725 726 727 728
extern struct user_struct *find_user(uid_t);

extern struct user_struct root_user;
#define INIT_USER (&root_user)

729

L
Linus Torvalds 已提交
730 731 732
struct backing_dev_info;
struct reclaim_state;

733
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
734 735
struct sched_info {
	/* cumulative counters */
736
	unsigned long pcount;	      /* # of times run on this cpu */
737
	unsigned long long run_delay; /* time spent waiting on a runqueue */
L
Linus Torvalds 已提交
738 739

	/* timestamps */
740 741
	unsigned long long last_arrival,/* when we last ran on a cpu */
			   last_queued;	/* when we were last queued to run */
I
Ingo Molnar 已提交
742 743
#ifdef CONFIG_SCHEDSTATS
	/* BKL stats */
744
	unsigned int bkl_count;
I
Ingo Molnar 已提交
745
#endif
L
Linus Torvalds 已提交
746
};
747
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
L
Linus Torvalds 已提交
748

749 750 751 752 753 754 755 756 757 758 759 760 761 762
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
	spinlock_t	lock;
	unsigned int	flags;	/* Private per-task flags */

	/* For each stat XXX, add following, aligned appropriately
	 *
	 * struct timespec XXX_start, XXX_end;
	 * u64 XXX_delay;
	 * u32 XXX_count;
	 *
	 * Atomicity of updates to XXX_delay, XXX_count protected by
	 * single lock above (split into XXX_lock if contention is an issue).
	 */
763 764 765 766 767 768 769 770 771 772 773 774 775

	/*
	 * XXX_count is incremented on every XXX operation, the delay
	 * associated with the operation is added to XXX_delay.
	 * XXX_delay contains the accumulated delay time in nanoseconds.
	 */
	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
	u64 blkio_delay;	/* wait for sync block io completion */
	u64 swapin_delay;	/* wait for swapin block io completion */
	u32 blkio_count;	/* total count of the number of sync block */
				/* io operations performed */
	u32 swapin_count;	/* total count of the number of swapin block */
				/* io operations performed */
776 777 778 779

	struct timespec freepages_start, freepages_end;
	u64 freepages_delay;	/* wait for memory reclaim */
	u32 freepages_count;	/* total count of memory reclaim */
780
};
781 782 783 784 785 786 787 788 789 790 791
#endif	/* CONFIG_TASK_DELAY_ACCT */

static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
	return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
	extern int delayacct_on;
	return delayacct_on;
#else
	return 0;
792
#endif
793
}
794

I
Ingo Molnar 已提交
795 796 797 798 799
enum cpu_idle_type {
	CPU_IDLE,
	CPU_NOT_IDLE,
	CPU_NEWLY_IDLE,
	CPU_MAX_IDLE_TYPES
L
Linus Torvalds 已提交
800 801 802 803 804
};

/*
 * sched-domains (multiprocessor balancing) declarations:
 */
805 806 807 808 809 810 811

/*
 * Increase resolution of nice-level calculations:
 */
#define SCHED_LOAD_SHIFT	10
#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)

812
#define SCHED_LOAD_SCALE_FUZZ	SCHED_LOAD_SCALE
L
Linus Torvalds 已提交
813

814
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
815 816 817 818
#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
819
#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
P
Peter Zijlstra 已提交
820
#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
P
Peter Zijlstra 已提交
821
#define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
P
Peter Zijlstra 已提交
822 823 824 825
#define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
#define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
826

P
Peter Zijlstra 已提交
827
#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
828

829 830 831 832 833 834 835 836 837 838
enum powersavings_balance_level {
	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
					 * first for long running threads
					 */
	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
					 * cpu package for power savings
					 */
	MAX_POWERSAVINGS_BALANCE_LEVELS
};
839

840
extern int sched_mc_power_savings, sched_smt_power_savings;
841

842 843 844 845
static inline int sd_balance_for_mc_power(void)
{
	if (sched_smt_power_savings)
		return SD_POWERSAVINGS_BALANCE;
846

P
Peter Zijlstra 已提交
847
	return SD_PREFER_SIBLING;
848
}
849

850 851 852 853 854
static inline int sd_balance_for_package_power(void)
{
	if (sched_mc_power_savings | sched_smt_power_savings)
		return SD_POWERSAVINGS_BALANCE;

P
Peter Zijlstra 已提交
855
	return SD_PREFER_SIBLING;
856
}
857

858 859 860 861 862 863 864 865 866 867 868 869 870
/*
 * Optimise SD flags for power savings:
 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
 * Keep default SD flags if sched_{smt,mc}_power_saving=0
 */

static inline int sd_power_saving_flags(void)
{
	if (sched_mc_power_savings | sched_smt_power_savings)
		return SD_BALANCE_NEWIDLE;

	return 0;
}
L
Linus Torvalds 已提交
871 872 873 874 875 876

struct sched_group {
	struct sched_group *next;	/* Must be a circular list */

	/*
	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
877
	 * single CPU.
878
	 */
879
	unsigned int cpu_power;
880

881 882 883 884 885 886 887 888 889 890 891
	/*
	 * The CPUs this group covers.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 *
	 * It is also be embedded into static data structures at build
	 * time. (See 'struct static_sched_group' in kernel/sched.c)
	 */
	unsigned long cpumask[0];
L
Linus Torvalds 已提交
892 893
};

894 895
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
896
	return to_cpumask(sg->cpumask);
897 898
}

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
enum sched_domain_level {
	SD_LV_NONE = 0,
	SD_LV_SIBLING,
	SD_LV_MC,
	SD_LV_CPU,
	SD_LV_NODE,
	SD_LV_ALLNODES,
	SD_LV_MAX
};

struct sched_domain_attr {
	int relax_domain_level;
};

#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
	.relax_domain_level = -1,			\
}

L
Linus Torvalds 已提交
917 918 919
struct sched_domain {
	/* These fields must be setup */
	struct sched_domain *parent;	/* top domain must be null terminated */
920
	struct sched_domain *child;	/* bottom domain must be null terminated */
L
Linus Torvalds 已提交
921 922 923 924 925 926
	struct sched_group *groups;	/* the balancing groups of the domain */
	unsigned long min_interval;	/* Minimum balance interval ms */
	unsigned long max_interval;	/* Maximum balance interval ms */
	unsigned int busy_factor;	/* less balancing by factor if busy */
	unsigned int imbalance_pct;	/* No balance until over watermark */
	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
N
Nick Piggin 已提交
927 928 929 930
	unsigned int busy_idx;
	unsigned int idle_idx;
	unsigned int newidle_idx;
	unsigned int wake_idx;
N
Nick Piggin 已提交
931
	unsigned int forkexec_idx;
P
Peter Zijlstra 已提交
932
	unsigned int smt_gain;
L
Linus Torvalds 已提交
933
	int flags;			/* See SD_* */
934
	enum sched_domain_level level;
L
Linus Torvalds 已提交
935 936 937 938 939 940

	/* Runtime fields. */
	unsigned long last_balance;	/* init to jiffies. units in jiffies */
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */

P
Peter Zijlstra 已提交
941 942
	u64 last_update;

L
Linus Torvalds 已提交
943 944
#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
945 946 947 948 949 950 951 952
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
L
Linus Torvalds 已提交
953 954

	/* Active load balancing */
955 956 957
	unsigned int alb_count;
	unsigned int alb_failed;
	unsigned int alb_pushed;
L
Linus Torvalds 已提交
958

959
	/* SD_BALANCE_EXEC stats */
960 961 962
	unsigned int sbe_count;
	unsigned int sbe_balanced;
	unsigned int sbe_pushed;
L
Linus Torvalds 已提交
963

964
	/* SD_BALANCE_FORK stats */
965 966 967
	unsigned int sbf_count;
	unsigned int sbf_balanced;
	unsigned int sbf_pushed;
968

L
Linus Torvalds 已提交
969
	/* try_to_wake_up() stats */
970 971 972
	unsigned int ttwu_wake_remote;
	unsigned int ttwu_move_affine;
	unsigned int ttwu_move_balance;
L
Linus Torvalds 已提交
973
#endif
974 975 976
#ifdef CONFIG_SCHED_DEBUG
	char *name;
#endif
977

978 979 980 981 982 983 984 985 986 987 988
	/*
	 * Span of all CPUs in this domain.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 *
	 * It is also be embedded into static data structures at build
	 * time. (See 'struct static_sched_domain' in kernel/sched.c)
	 */
	unsigned long span[0];
L
Linus Torvalds 已提交
989 990
};

991 992
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
993
	return to_cpumask(sd->span);
994 995
}

996
extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
997
				    struct sched_domain_attr *dattr_new);
P
Paul Jackson 已提交
998

999 1000 1001 1002 1003 1004 1005 1006
/* Test a flag in parent sched domain */
static inline int test_sd_parent(struct sched_domain *sd, int flag)
{
	if (sd->parent && (sd->parent->flags & flag))
		return 1;

	return 0;
}
P
Paul Jackson 已提交
1007

1008 1009 1010
unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);

1011
#else /* CONFIG_SMP */
L
Linus Torvalds 已提交
1012

1013
struct sched_domain_attr;
1014

1015
static inline void
1016
partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
1017 1018
			struct sched_domain_attr *dattr_new)
{
1019
}
1020
#endif	/* !CONFIG_SMP */
L
Linus Torvalds 已提交
1021

1022

L
Linus Torvalds 已提交
1023 1024 1025
struct io_context;			/* See blkdev.h */


1026
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1027
extern void prefetch_stack(struct task_struct *t);
1028 1029 1030
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
L
Linus Torvalds 已提交
1031 1032 1033

struct audit_context;		/* See audit.c */
struct mempolicy;
1034
struct pipe_inode_info;
1035
struct uts_namespace;
L
Linus Torvalds 已提交
1036

I
Ingo Molnar 已提交
1037 1038 1039
struct rq;
struct sched_domain;

P
Peter Zijlstra 已提交
1040 1041 1042 1043
/*
 * wake flags
 */
#define WF_SYNC		0x01		/* waker goes to sleep after wakup */
P
Peter Zijlstra 已提交
1044
#define WF_FORK		0x02		/* child wakeup after fork */
P
Peter Zijlstra 已提交
1045

I
Ingo Molnar 已提交
1046
struct sched_class {
1047
	const struct sched_class *next;
I
Ingo Molnar 已提交
1048

1049
	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
1050
	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1051
	void (*yield_task) (struct rq *rq);
I
Ingo Molnar 已提交
1052

P
Peter Zijlstra 已提交
1053
	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
I
Ingo Molnar 已提交
1054

1055
	struct task_struct * (*pick_next_task) (struct rq *rq);
1056
	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
I
Ingo Molnar 已提交
1057

1058
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
1059
	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
L
Li Zefan 已提交
1060

P
Peter Williams 已提交
1061
	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1062
			struct rq *busiest, unsigned long max_load_move,
I
Ingo Molnar 已提交
1063
			struct sched_domain *sd, enum cpu_idle_type idle,
1064
			int *all_pinned, int *this_best_prio);
I
Ingo Molnar 已提交
1065

1066 1067 1068
	int (*move_one_task) (struct rq *this_rq, int this_cpu,
			      struct rq *busiest, struct sched_domain *sd,
			      enum cpu_idle_type idle);
1069 1070 1071
	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
	void (*post_schedule) (struct rq *this_rq);
	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1072

1073
	void (*set_cpus_allowed)(struct task_struct *p,
1074
				 const struct cpumask *newmask);
G
Gregory Haskins 已提交
1075

1076 1077
	void (*rq_online)(struct rq *rq);
	void (*rq_offline)(struct rq *rq);
L
Li Zefan 已提交
1078 1079 1080 1081 1082
#endif

	void (*set_curr_task) (struct rq *rq);
	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
	void (*task_new) (struct rq *rq, struct task_struct *p);
1083 1084 1085 1086 1087 1088 1089

	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
			       int running);
	void (*switched_to) (struct rq *this_rq, struct task_struct *task,
			     int running);
	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
			     int oldprio, int running);
P
Peter Zijlstra 已提交
1090

1091 1092
	unsigned int (*get_rr_interval) (struct task_struct *task);

P
Peter Zijlstra 已提交
1093 1094 1095
#ifdef CONFIG_FAIR_GROUP_SCHED
	void (*moved_group) (struct task_struct *p);
#endif
I
Ingo Molnar 已提交
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
};

struct load_weight {
	unsigned long weight, inv_weight;
};

/*
 * CFS stats for a schedulable entity (task, task-group etc)
 *
 * Current field usage histogram:
 *
 *     4 se->block_start
 *     4 se->run_node
 *     4 se->sleep_start
 *     6 se->load.weight
 */
struct sched_entity {
	struct load_weight	load;		/* for load-balancing */
	struct rb_node		run_node;
1115
	struct list_head	group_node;
I
Ingo Molnar 已提交
1116 1117
	unsigned int		on_rq;

1118 1119
	u64			exec_start;
	u64			sum_exec_runtime;
I
Ingo Molnar 已提交
1120
	u64			vruntime;
1121
	u64			prev_sum_exec_runtime;
1122

I
Ingo Molnar 已提交
1123 1124 1125
	u64			last_wakeup;
	u64			avg_overlap;

1126 1127
	u64			nr_migrations;

1128 1129 1130
	u64			start_runtime;
	u64			avg_wakeup;

1131 1132
	u64			avg_running;

1133
#ifdef CONFIG_SCHEDSTATS
I
Ingo Molnar 已提交
1134
	u64			wait_start;
1135
	u64			wait_max;
1136 1137
	u64			wait_count;
	u64			wait_sum;
1138 1139
	u64			iowait_count;
	u64			iowait_sum;
1140

I
Ingo Molnar 已提交
1141 1142
	u64			sleep_start;
	u64			sleep_max;
1143 1144 1145
	s64			sum_sleep_runtime;

	u64			block_start;
I
Ingo Molnar 已提交
1146 1147
	u64			block_max;
	u64			exec_max;
I
Ingo Molnar 已提交
1148
	u64			slice_max;
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165

	u64			nr_migrations_cold;
	u64			nr_failed_migrations_affine;
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_hot;
	u64			nr_forced_migrations;
	u64			nr_forced2_migrations;

	u64			nr_wakeups;
	u64			nr_wakeups_sync;
	u64			nr_wakeups_migrate;
	u64			nr_wakeups_local;
	u64			nr_wakeups_remote;
	u64			nr_wakeups_affine;
	u64			nr_wakeups_affine_attempts;
	u64			nr_wakeups_passive;
	u64			nr_wakeups_idle;
1166 1167
#endif

I
Ingo Molnar 已提交
1168 1169 1170 1171 1172 1173 1174 1175
#ifdef CONFIG_FAIR_GROUP_SCHED
	struct sched_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct cfs_rq		*cfs_rq;
	/* rq "owned" by this entity/group: */
	struct cfs_rq		*my_q;
#endif
};
1176

P
Peter Zijlstra 已提交
1177 1178
struct sched_rt_entity {
	struct list_head run_list;
1179
	unsigned long timeout;
1180
	unsigned int time_slice;
P
Peter Zijlstra 已提交
1181 1182
	int nr_cpus_allowed;

1183
	struct sched_rt_entity *back;
1184
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
1185 1186 1187 1188 1189 1190
	struct sched_rt_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct rt_rq		*rt_rq;
	/* rq "owned" by this entity/group: */
	struct rt_rq		*my_q;
#endif
P
Peter Zijlstra 已提交
1191 1192
};

1193 1194
struct rcu_node;

L
Linus Torvalds 已提交
1195 1196
struct task_struct {
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
R
Roman Zippel 已提交
1197
	void *stack;
L
Linus Torvalds 已提交
1198
	atomic_t usage;
1199 1200
	unsigned int flags;	/* per process flags, defined below */
	unsigned int ptrace;
L
Linus Torvalds 已提交
1201

1202
	int lock_depth;		/* BKL lock depth */
L
Linus Torvalds 已提交
1203

1204 1205
#ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1206 1207
	int oncpu;
#endif
1208
#endif
1209

1210
	int prio, static_prio, normal_prio;
1211
	unsigned int rt_priority;
1212
	const struct sched_class *sched_class;
I
Ingo Molnar 已提交
1213
	struct sched_entity se;
P
Peter Zijlstra 已提交
1214
	struct sched_rt_entity rt;
L
Linus Torvalds 已提交
1215

1216 1217 1218 1219 1220
#ifdef CONFIG_PREEMPT_NOTIFIERS
	/* list of struct preempt_notifier: */
	struct hlist_head preempt_notifiers;
#endif

1221 1222 1223 1224 1225 1226 1227 1228 1229
	/*
	 * fpu_counter contains the number of consecutive context switches
	 * that the FPU is used. If this is over a threshold, the lazy fpu
	 * saving becomes unlazy to save the trap. This is an unsigned char
	 * so that after 256 times the counter wraps and the behavior turns
	 * lazy again; this to deal with bursty apps that only use FPU for
	 * a short time
	 */
	unsigned char fpu_counter;
1230
#ifdef CONFIG_BLK_DEV_IO_TRACE
1231
	unsigned int btrace_seq;
1232
#endif
L
Linus Torvalds 已提交
1233

1234
	unsigned int policy;
L
Linus Torvalds 已提交
1235 1236
	cpumask_t cpus_allowed;

1237
#ifdef CONFIG_TREE_PREEMPT_RCU
P
Paul E. McKenney 已提交
1238
	int rcu_read_lock_nesting;
1239
	char rcu_read_unlock_special;
1240
	struct rcu_node *rcu_blocked_node;
1241 1242
	struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
P
Paul E. McKenney 已提交
1243

1244
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
1245 1246 1247 1248
	struct sched_info sched_info;
#endif

	struct list_head tasks;
1249
	struct plist_node pushable_tasks;
L
Linus Torvalds 已提交
1250 1251 1252 1253 1254

	struct mm_struct *mm, *active_mm;

/* task state */
	struct linux_binfmt *binfmt;
1255
	int exit_state;
L
Linus Torvalds 已提交
1256 1257 1258
	int exit_code, exit_signal;
	int pdeath_signal;  /*  The signal sent when the parent dies  */
	/* ??? */
1259
	unsigned int personality;
L
Linus Torvalds 已提交
1260
	unsigned did_exec:1;
1261 1262
	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
				 * execve */
1263 1264
	unsigned in_iowait:1;

1265 1266 1267 1268

	/* Revert to default priority/policy when forking */
	unsigned sched_reset_on_fork:1;

L
Linus Torvalds 已提交
1269 1270
	pid_t pid;
	pid_t tgid;
1271

1272
#ifdef CONFIG_CC_STACKPROTECTOR
1273 1274
	/* Canary value for the -fstack-protector gcc feature */
	unsigned long stack_canary;
1275
#endif
1276

L
Linus Torvalds 已提交
1277 1278 1279
	/* 
	 * pointers to (original) parent process, youngest child, younger sibling,
	 * older sibling, respectively.  (p->father can be replaced with 
R
Roland McGrath 已提交
1280
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
1281
	 */
R
Roland McGrath 已提交
1282 1283
	struct task_struct *real_parent; /* real parent process */
	struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
L
Linus Torvalds 已提交
1284
	/*
R
Roland McGrath 已提交
1285
	 * children/sibling forms the list of my natural children
L
Linus Torvalds 已提交
1286 1287 1288 1289 1290
	 */
	struct list_head children;	/* list of my children */
	struct list_head sibling;	/* linkage in my parent's children list */
	struct task_struct *group_leader;	/* threadgroup leader */

R
Roland McGrath 已提交
1291 1292 1293 1294 1295 1296 1297 1298
	/*
	 * ptraced is the list of tasks this task is using ptrace on.
	 * This includes both natural children and PTRACE_ATTACH targets.
	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
	 */
	struct list_head ptraced;
	struct list_head ptrace_entry;

1299 1300 1301 1302
	/*
	 * This is the tracer handle for the ptrace BTS extension.
	 * This field actually belongs to the ptracer task.
	 */
1303
	struct bts_context *bts;
1304

L
Linus Torvalds 已提交
1305
	/* PID/PID hash table linkage. */
1306
	struct pid_link pids[PIDTYPE_MAX];
O
Oleg Nesterov 已提交
1307
	struct list_head thread_group;
L
Linus Torvalds 已提交
1308 1309 1310 1311 1312

	struct completion *vfork_done;		/* for vfork() */
	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */

1313
	cputime_t utime, stime, utimescaled, stimescaled;
1314
	cputime_t gtime;
1315
	cputime_t prev_utime, prev_stime;
L
Linus Torvalds 已提交
1316
	unsigned long nvcsw, nivcsw; /* context switch counts */
1317 1318
	struct timespec start_time; 		/* monotonic time */
	struct timespec real_start_time;	/* boot based time */
L
Linus Torvalds 已提交
1319 1320 1321
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
	unsigned long min_flt, maj_flt;

1322
	struct task_cputime cputime_expires;
L
Linus Torvalds 已提交
1323 1324 1325
	struct list_head cpu_timers[3];

/* process credentials */
1326 1327 1328 1329
	const struct cred *real_cred;	/* objective and real subjective task
					 * credentials (COW) */
	const struct cred *cred;	/* effective (overridable) subjective task
					 * credentials (COW) */
1330 1331 1332
	struct mutex cred_guard_mutex;	/* guard against foreign influences on
					 * credential calculations
					 * (notably. ptrace) */
1333
	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1334

1335 1336 1337 1338
	char comm[TASK_COMM_LEN]; /* executable name excluding path
				     - access with [gs]et_task_comm (which lock
				       it with task_lock())
				     - initialized normally by flush_old_exec */
L
Linus Torvalds 已提交
1339 1340
/* file system info */
	int link_count, total_link_count;
1341
#ifdef CONFIG_SYSVIPC
L
Linus Torvalds 已提交
1342 1343
/* ipc stuff */
	struct sysv_sem sysvsem;
1344
#endif
1345
#ifdef CONFIG_DETECT_HUNG_TASK
1346 1347 1348
/* hung task detection */
	unsigned long last_switch_count;
#endif
L
Linus Torvalds 已提交
1349 1350 1351 1352 1353 1354
/* CPU-specific state of this task */
	struct thread_struct thread;
/* filesystem information */
	struct fs_struct *fs;
/* open file information */
	struct files_struct *files;
1355
/* namespaces */
S
Serge E. Hallyn 已提交
1356
	struct nsproxy *nsproxy;
L
Linus Torvalds 已提交
1357 1358 1359 1360 1361
/* signal handlers */
	struct signal_struct *signal;
	struct sighand_struct *sighand;

	sigset_t blocked, real_blocked;
1362
	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
L
Linus Torvalds 已提交
1363 1364 1365 1366 1367 1368 1369 1370
	struct sigpending pending;

	unsigned long sas_ss_sp;
	size_t sas_ss_size;
	int (*notifier)(void *priv);
	void *notifier_data;
	sigset_t *notifier_mask;
	struct audit_context *audit_context;
A
Al Viro 已提交
1371 1372
#ifdef CONFIG_AUDITSYSCALL
	uid_t loginuid;
1373
	unsigned int sessionid;
A
Al Viro 已提交
1374
#endif
L
Linus Torvalds 已提交
1375 1376 1377 1378 1379
	seccomp_t seccomp;

/* Thread group tracking */
   	u32 parent_exec_id;
   	u32 self_exec_id;
1380 1381
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
 * mempolicy */
L
Linus Torvalds 已提交
1382 1383
	spinlock_t alloc_lock;

1384 1385 1386 1387 1388
#ifdef CONFIG_GENERIC_HARDIRQS
	/* IRQ handler threads */
	struct irqaction *irqaction;
#endif

1389 1390 1391
	/* Protection of the PI data structures: */
	spinlock_t pi_lock;

I
Ingo Molnar 已提交
1392 1393 1394 1395 1396 1397 1398
#ifdef CONFIG_RT_MUTEXES
	/* PI waiters blocked on a rt_mutex held by this task */
	struct plist_head pi_waiters;
	/* Deadlock detection and priority inheritance handling */
	struct rt_mutex_waiter *pi_blocked_on;
#endif

1399 1400 1401 1402
#ifdef CONFIG_DEBUG_MUTEXES
	/* mutex deadlock detection */
	struct mutex_waiter *blocked_on;
#endif
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
#ifdef CONFIG_TRACE_IRQFLAGS
	unsigned int irq_events;
	int hardirqs_enabled;
	unsigned long hardirq_enable_ip;
	unsigned int hardirq_enable_event;
	unsigned long hardirq_disable_ip;
	unsigned int hardirq_disable_event;
	int softirqs_enabled;
	unsigned long softirq_disable_ip;
	unsigned int softirq_disable_event;
	unsigned long softirq_enable_ip;
	unsigned int softirq_enable_event;
	int hardirq_context;
	int softirq_context;
#endif
I
Ingo Molnar 已提交
1418
#ifdef CONFIG_LOCKDEP
1419
# define MAX_LOCK_DEPTH 48UL
I
Ingo Molnar 已提交
1420 1421 1422
	u64 curr_chain_key;
	int lockdep_depth;
	unsigned int lockdep_recursion;
1423
	struct held_lock held_locks[MAX_LOCK_DEPTH];
1424
	gfp_t lockdep_reclaim_gfp;
I
Ingo Molnar 已提交
1425
#endif
1426

L
Linus Torvalds 已提交
1427 1428 1429
/* journalling filesystem info */
	void *journal_info;

1430 1431 1432
/* stacked block device info */
	struct bio *bio_list, **bio_tail;

L
Linus Torvalds 已提交
1433 1434 1435 1436 1437 1438 1439 1440 1441
/* VM state */
	struct reclaim_state *reclaim_state;

	struct backing_dev_info *backing_dev_info;

	struct io_context *io_context;

	unsigned long ptrace_message;
	siginfo_t *last_siginfo; /* For ptrace use.  */
1442
	struct task_io_accounting ioac;
1443
#if defined(CONFIG_TASK_XACCT)
L
Linus Torvalds 已提交
1444 1445
	u64 acct_rss_mem1;	/* accumulated rss usage */
	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1446
	cputime_t acct_timexpd;	/* stime + utime since last update */
L
Linus Torvalds 已提交
1447 1448
#endif
#ifdef CONFIG_CPUSETS
1449
	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1450
	int cpuset_mem_spread_rotor;
L
Linus Torvalds 已提交
1451
#endif
1452
#ifdef CONFIG_CGROUPS
1453 1454 1455 1456
	/* Control Group info protected by css_set_lock */
	struct css_set *cgroups;
	/* cg_list protected by css_set_lock and tsk->alloc_lock */
	struct list_head cg_list;
1457
#endif
1458
#ifdef CONFIG_FUTEX
1459
	struct robust_list_head __user *robust_list;
1460 1461 1462
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
1463 1464
	struct list_head pi_state_list;
	struct futex_pi_state *pi_state_cache;
1465
#endif
1466 1467 1468 1469
#ifdef CONFIG_PERF_EVENTS
	struct perf_event_context *perf_event_ctxp;
	struct mutex perf_event_mutex;
	struct list_head perf_event_list;
1470
#endif
1471
#ifdef CONFIG_NUMA
1472
	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1473
	short il_next;
1474
#endif
1475
	atomic_t fs_excl;	/* holding fs exclusive resources */
I
Ingo Molnar 已提交
1476
	struct rcu_head rcu;
1477 1478 1479 1480 1481

	/*
	 * cache last used pipe for splice
	 */
	struct pipe_inode_info *splice_pipe;
1482 1483
#ifdef	CONFIG_TASK_DELAY_ACCT
	struct task_delay_info *delays;
1484 1485 1486
#endif
#ifdef CONFIG_FAULT_INJECTION
	int make_it_fail;
1487
#endif
P
Peter Zijlstra 已提交
1488
	struct prop_local_single dirties;
A
Arjan van de Ven 已提交
1489 1490 1491 1492
#ifdef CONFIG_LATENCYTOP
	int latency_record_count;
	struct latency_record latency_record[LT_SAVECOUNT];
#endif
1493 1494 1495 1496 1497 1498
	/*
	 * time slack values; these are used to round up poll() and
	 * select() etc timeout values. These are in nanoseconds.
	 */
	unsigned long timer_slack_ns;
	unsigned long default_timer_slack_ns;
1499 1500

	struct list_head	*scm_work_list;
1501
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1502 1503 1504 1505
	/* Index of current stored adress in ret_stack */
	int curr_ret_stack;
	/* Stack of return addresses for return function tracing */
	struct ftrace_ret_stack	*ret_stack;
1506 1507
	/* time stamp for last schedule */
	unsigned long long ftrace_timestamp;
1508 1509 1510 1511 1512
	/*
	 * Number of functions that haven't been traced
	 * because of depth overrun.
	 */
	atomic_t trace_overrun;
1513 1514
	/* Pause for the tracing */
	atomic_t tracing_graph_pause;
1515
#endif
1516 1517 1518
#ifdef CONFIG_TRACING
	/* state flags for use by tracers */
	unsigned long trace;
1519 1520 1521
	/* bitmask of trace recursion */
	unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
L
Linus Torvalds 已提交
1522 1523
};

1524 1525 1526
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
/*
 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
 * values are inverted: lower p->prio value means higher priority.
 *
 * The MAX_USER_RT_PRIO value allows the actual maximum
 * RT priority to be separate from the value exported to
 * user-space.  This allows kernel threads to set their
 * priority to a value higher than any user task. Note:
 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
 */

#define MAX_USER_RT_PRIO	100
#define MAX_RT_PRIO		MAX_USER_RT_PRIO

#define MAX_PRIO		(MAX_RT_PRIO + 40)
#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)

static inline int rt_prio(int prio)
{
	if (unlikely(prio < MAX_RT_PRIO))
		return 1;
	return 0;
}

A
Alexey Dobriyan 已提交
1553
static inline int rt_task(struct task_struct *p)
1554 1555 1556 1557
{
	return rt_prio(p->prio);
}

A
Alexey Dobriyan 已提交
1558
static inline struct pid *task_pid(struct task_struct *task)
1559 1560 1561 1562
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1563
static inline struct pid *task_tgid(struct task_struct *task)
1564 1565 1566 1567
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

1568 1569 1570 1571 1572
/*
 * Without tasklist or rcu lock it is not safe to dereference
 * the result of task_pgrp/task_session even if task == current,
 * we can race with another thread doing sys_setsid/sys_setpgid.
 */
A
Alexey Dobriyan 已提交
1573
static inline struct pid *task_pgrp(struct task_struct *task)
1574 1575 1576 1577
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
1578
static inline struct pid *task_session(struct task_struct *task)
1579 1580 1581 1582
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

1583 1584 1585 1586 1587 1588 1589
struct pid_namespace;

/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
1590 1591
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
1592 1593 1594 1595 1596 1597
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * set_task_vxid()   : assigns a virtual id to a task;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
1598 1599
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
			struct pid_namespace *ns);
1600

A
Alexey Dobriyan 已提交
1601
static inline pid_t task_pid_nr(struct task_struct *tsk)
1602 1603 1604 1605
{
	return tsk->pid;
}

1606 1607 1608 1609 1610
static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
1611 1612 1613

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
1614
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1615 1616 1617
}


A
Alexey Dobriyan 已提交
1618
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1619 1620 1621 1622
{
	return tsk->tgid;
}

1623
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1624 1625 1626 1627 1628 1629 1630

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_tgid(tsk));
}


1631 1632
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1633
{
1634
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1635 1636 1637 1638
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
1639
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1640 1641 1642
}


1643 1644
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1645
{
1646
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1647 1648 1649 1650
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
1651
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1652 1653
}

1654 1655 1656 1657 1658
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
1659

L
Linus Torvalds 已提交
1660 1661 1662 1663 1664 1665 1666 1667
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
 */
A
Alexey Dobriyan 已提交
1668
static inline int pid_alive(struct task_struct *p)
L
Linus Torvalds 已提交
1669
{
1670
	return p->pids[PIDTYPE_PID].pid != NULL;
L
Linus Torvalds 已提交
1671 1672
}

1673
/**
1674
 * is_global_init - check if a task structure is init
1675 1676 1677
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1678
 */
A
Alexey Dobriyan 已提交
1679
static inline int is_global_init(struct task_struct *tsk)
1680 1681 1682
{
	return tsk->pid == 1;
}
1683 1684 1685 1686

/*
 * is_container_init:
 * check whether in the task is init in its own pid namespace.
1687
 */
1688
extern int is_container_init(struct task_struct *tsk);
1689

1690 1691
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1692 1693
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
I
Ingo Molnar 已提交
1694

1695
extern void __put_task_struct(struct task_struct *t);
I
Ingo Molnar 已提交
1696 1697 1698 1699

static inline void put_task_struct(struct task_struct *t)
{
	if (atomic_dec_and_test(&t->usage))
1700
		__put_task_struct(t);
I
Ingo Molnar 已提交
1701
}
L
Linus Torvalds 已提交
1702

1703 1704 1705 1706
extern cputime_t task_utime(struct task_struct *p);
extern cputime_t task_stime(struct task_struct *p);
extern cputime_t task_gtime(struct task_struct *p);

L
Linus Torvalds 已提交
1707 1708 1709 1710 1711 1712 1713
/*
 * Per process flags
 */
#define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */
					/* Not implemented yet, only for 486*/
#define PF_STARTING	0x00000002	/* being created */
#define PF_EXITING	0x00000004	/* getting shut down */
1714
#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1715
#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
L
Linus Torvalds 已提交
1716 1717 1718 1719 1720 1721 1722
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
#define PF_DUMPCORE	0x00000200	/* dumped core */
#define PF_SIGNALED	0x00000400	/* killed by a signal */
#define PF_MEMALLOC	0x00000800	/* Allocating memory */
#define PF_FLUSHER	0x00001000	/* responsible for disk writeback */
#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1723
#define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
L
Linus Torvalds 已提交
1724 1725 1726 1727
#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
#define PF_FROZEN	0x00010000	/* frozen for system suspend */
#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
#define PF_KSWAPD	0x00040000	/* I am kswapd */
H
Hugh Dickins 已提交
1728
#define PF_OOM_ORIGIN	0x00080000	/* Allocating much memory to others */
L
Linus Torvalds 已提交
1729
#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1730
#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
J
Jens Axboe 已提交
1731 1732 1733 1734
#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1735
#define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1736
#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1737
#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
R
Rafael J. Wysocki 已提交
1738
#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
1739
#define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
L
Linus Torvalds 已提交
1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) \
	conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)

1766 1767 1768 1769 1770 1771 1772 1773 1774
#ifdef CONFIG_TREE_PREEMPT_RCU

#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */

static inline void rcu_copy_process(struct task_struct *p)
{
	p->rcu_read_lock_nesting = 0;
	p->rcu_read_unlock_special = 0;
1775
	p->rcu_blocked_node = NULL;
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
	INIT_LIST_HEAD(&p->rcu_node_entry);
}

#else

static inline void rcu_copy_process(struct task_struct *p)
{
}

#endif

L
Linus Torvalds 已提交
1787
#ifdef CONFIG_SMP
1788
extern int set_cpus_allowed_ptr(struct task_struct *p,
1789
				const struct cpumask *new_mask);
L
Linus Torvalds 已提交
1790
#else
1791
static inline int set_cpus_allowed_ptr(struct task_struct *p,
1792
				       const struct cpumask *new_mask)
L
Linus Torvalds 已提交
1793
{
1794
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
1795 1796 1797 1798
		return -EINVAL;
	return 0;
}
#endif
1799 1800 1801 1802
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
	return set_cpus_allowed_ptr(p, &new_mask);
}
L
Linus Torvalds 已提交
1803

1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
/*
 * Architectures can set this to 1 if they have specified
 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
 * but then during bootup it turns out that sched_clock()
 * is reliable after all:
 */
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
extern int sched_clock_stable;
#endif

L
Linus Torvalds 已提交
1814
extern unsigned long long sched_clock(void);
1815

1816 1817
extern void sched_clock_init(void);
extern u64 sched_clock_cpu(int cpu);
1818

1819
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
static inline void sched_clock_tick(void)
{
}

static inline void sched_clock_idle_sleep_event(void)
{
}

static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
#else
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif

1837 1838 1839 1840 1841 1842
/*
 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
 * clock constructed from sched_clock():
 */
extern unsigned long long cpu_clock(int cpu);

1843
extern unsigned long long
1844
task_sched_runtime(struct task_struct *task);
1845
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
L
Linus Torvalds 已提交
1846 1847 1848 1849 1850 1851 1852 1853

/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec()   {}
#endif

1854 1855
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1856

L
Linus Torvalds 已提交
1857 1858 1859 1860 1861 1862 1863
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
#endif

extern void sched_idle_next(void);
1864

1865 1866 1867 1868 1869 1870
#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
extern void wake_up_idle_cpu(int cpu);
#else
static inline void wake_up_idle_cpu(int cpu) { }
#endif

1871
extern unsigned int sysctl_sched_latency;
1872
extern unsigned int sysctl_sched_min_granularity;
1873
extern unsigned int sysctl_sched_wakeup_granularity;
1874 1875
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh;
1876
extern unsigned int sysctl_sched_child_runs_first;
1877
#ifdef CONFIG_SCHED_DEBUG
1878
extern unsigned int sysctl_sched_features;
1879
extern unsigned int sysctl_sched_migration_cost;
1880
extern unsigned int sysctl_sched_nr_migrate;
1881
extern unsigned int sysctl_sched_time_avg;
1882
extern unsigned int sysctl_timer_migration;
1883 1884 1885 1886

int sched_nr_latency_handler(struct ctl_table *table, int write,
		struct file *file, void __user *buffer, size_t *length,
		loff_t *ppos);
1887
#endif
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
#ifdef CONFIG_SCHED_DEBUG
static inline unsigned int get_sysctl_timer_migration(void)
{
	return sysctl_timer_migration;
}
#else
static inline unsigned int get_sysctl_timer_migration(void)
{
	return 1;
}
#endif
P
Peter Zijlstra 已提交
1899 1900
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
1901

1902 1903 1904 1905
int sched_rt_handler(struct ctl_table *table, int write,
		struct file *filp, void __user *buffer, size_t *lenp,
		loff_t *ppos);

1906
extern unsigned int sysctl_sched_compat_yield;
1907

1908
#ifdef CONFIG_RT_MUTEXES
1909 1910 1911
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
1912
#else
A
Alexey Dobriyan 已提交
1913
static inline int rt_mutex_getprio(struct task_struct *p)
1914 1915 1916
{
	return p->normal_prio;
}
1917
# define rt_mutex_adjust_pi(p)		do { } while (0)
1918 1919
#endif

1920 1921 1922 1923 1924
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
extern int task_nice(const struct task_struct *p);
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
1925 1926
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1927 1928
extern int sched_setscheduler_nocheck(struct task_struct *, int,
				      struct sched_param *);
1929 1930 1931
extern struct task_struct *idle_task(int cpu);
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959

void yield(void);

/*
 * The default (Linux) execution domain.
 */
extern struct exec_domain	default_exec_domain;

union thread_union {
	struct thread_info thread_info;
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
	/* Reliable end of stack detection:
	 * Some APM bios versions misalign the stack
	 */
	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif

extern union thread_union init_thread_union;
extern struct task_struct init_task;

extern struct   mm_struct init_mm;

1960 1961 1962 1963 1964 1965 1966
extern struct pid_namespace init_pid_ns;

/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
1967 1968
 * find_task_by_vpid():
 *      finds a task by its virtual pid
1969
 *
1970
 * see also find_vpid() etc in include/linux/pid.h
1971 1972
 */

1973 1974 1975
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
		struct pid_namespace *ns);
1976

1977
extern void __set_special_pids(struct pid *pid);
L
Linus Torvalds 已提交
1978 1979

/* per-UID process charging. */
1980
extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
L
Linus Torvalds 已提交
1981 1982 1983 1984 1985 1986
static inline struct user_struct *get_uid(struct user_struct *u)
{
	atomic_inc(&u->__count);
	return u;
}
extern void free_uid(struct user_struct *);
1987
extern void release_uids(struct user_namespace *ns);
L
Linus Torvalds 已提交
1988 1989 1990

#include <asm/current.h>

1991
extern void do_timer(unsigned long ticks);
L
Linus Torvalds 已提交
1992

1993 1994 1995 1996
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk,
				unsigned long clone_flags);
L
Linus Torvalds 已提交
1997 1998 1999 2000 2001
#ifdef CONFIG_SMP
 extern void kick_process(struct task_struct *tsk);
#else
 static inline void kick_process(struct task_struct *tsk) { }
#endif
2002 2003
extern void sched_fork(struct task_struct *p, int clone_flags);
extern void sched_dead(struct task_struct *p);
L
Linus Torvalds 已提交
2004 2005 2006

extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
2007
extern void __flush_signals(struct task_struct *);
2008
extern void ignore_signals(struct task_struct *);
L
Linus Torvalds 已提交
2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);

static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&tsk->sighand->siglock, flags);
	ret = dequeue_signal(tsk, mask, info);
	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);

	return ret;
}	

extern void block_all_signals(int (*notifier)(void *priv), void *priv,
			      sigset_t *mask);
extern void unblock_all_signals(void);
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2031 2032
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2033
extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
2034 2035
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
2036
extern int kill_proc_info(int, struct siginfo *, pid_t);
R
Roland McGrath 已提交
2037
extern int do_notify_parent(struct task_struct *, int);
L
Linus Torvalds 已提交
2038 2039 2040 2041 2042 2043
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
2044
extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2045
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
L
Linus Torvalds 已提交
2046 2047
extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);

2048 2049 2050 2051 2052
static inline int kill_cad_pid(int sig, int priv)
{
	return kill_pid(cad_pid, sig, priv);
}

L
Linus Torvalds 已提交
2053 2054 2055 2056 2057
/* These can be the second arg to send_sig_info/send_group_sig_info.  */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV	((struct siginfo *) 1)
#define SEND_SIG_FORCED	((struct siginfo *) 2)

2058 2059 2060 2061 2062
static inline int is_si_special(const struct siginfo *info)
{
	return info <= SEND_SIG_FORCED;
}

L
Linus Torvalds 已提交
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
/* True if we are on the alternate signal stack.  */

static inline int on_sig_stack(unsigned long sp)
{
	return (sp - current->sas_ss_sp < current->sas_ss_size);
}

static inline int sas_ss_flags(unsigned long sp)
{
	return (current->sas_ss_size == 0 ? SS_DISABLE
		: on_sig_stack(sp) ? SS_ONSTACK : 0);
}

/*
 * Routines for handling mm_structs
 */
extern struct mm_struct * mm_alloc(void);

/* mmdrop drops the mm and the page tables */
2082
extern void __mmdrop(struct mm_struct *);
L
Linus Torvalds 已提交
2083 2084
static inline void mmdrop(struct mm_struct * mm)
{
I
Ingo Molnar 已提交
2085
	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
L
Linus Torvalds 已提交
2086 2087 2088 2089 2090 2091 2092 2093 2094
		__mmdrop(mm);
}

/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
2095 2096
/* Allocate a new mm structure and copy contents from tsk->mm */
extern struct mm_struct *dup_mm(struct task_struct *tsk);
L
Linus Torvalds 已提交
2097

A
Alexey Dobriyan 已提交
2098 2099
extern int copy_thread(unsigned long, unsigned long, unsigned long,
			struct task_struct *, struct pt_regs *);
L
Linus Torvalds 已提交
2100 2101 2102 2103
extern void flush_thread(void);
extern void exit_thread(void);

extern void exit_files(struct task_struct *);
2104
extern void __cleanup_signal(struct signal_struct *);
2105
extern void __cleanup_sighand(struct sighand_struct *);
2106

L
Linus Torvalds 已提交
2107
extern void exit_itimers(struct signal_struct *);
2108
extern void flush_itimer_signals(void);
L
Linus Torvalds 已提交
2109 2110 2111 2112 2113 2114 2115 2116 2117

extern NORET_TYPE void do_group_exit(int);

extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern int disallow_signal(int);

extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2118
struct task_struct *fork_idle(int);
L
Linus Torvalds 已提交
2119 2120

extern void set_task_comm(struct task_struct *tsk, char *from);
2121
extern char *get_task_comm(char *to, struct task_struct *tsk);
L
Linus Torvalds 已提交
2122 2123

#ifdef CONFIG_SMP
2124
extern void wait_task_context_switch(struct task_struct *p);
R
Roland McGrath 已提交
2125
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
2126
#else
2127
static inline void wait_task_context_switch(struct task_struct *p) {}
R
Roland McGrath 已提交
2128 2129 2130 2131 2132
static inline unsigned long wait_task_inactive(struct task_struct *p,
					       long match_state)
{
	return 1;
}
L
Linus Torvalds 已提交
2133 2134
#endif

2135 2136
#define next_task(p) \
	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
L
Linus Torvalds 已提交
2137 2138 2139 2140

#define for_each_process(p) \
	for (p = &init_task ; (p = next_task(p)) != &init_task ; )

2141
extern bool current_is_single_threaded(void);
D
David Howells 已提交
2142

L
Linus Torvalds 已提交
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
/*
 * Careful: do_each_thread/while_each_thread is a double loop so
 *          'break' will not work as expected - use goto instead.
 */
#define do_each_thread(g, t) \
	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do

#define while_each_thread(g, t) \
	while ((t = next_thread(t)) != g)

2153 2154
/* de_thread depends on thread_group_leader not being a pid based check */
#define thread_group_leader(p)	(p == p->group_leader)
L
Linus Torvalds 已提交
2155

2156 2157 2158 2159 2160 2161
/* Do to the insanities of de_thread it is possible for a process
 * to have the pid of the thread group leader without actually being
 * the thread group leader.  For iteration through the pids in proc
 * all we care about is that we have a task with the appropriate
 * pid, we don't actually care if we have the right task.
 */
A
Alexey Dobriyan 已提交
2162
static inline int has_group_leader_pid(struct task_struct *p)
2163 2164 2165 2166
{
	return p->pid == p->tgid;
}

2167 2168 2169 2170 2171 2172
static inline
int same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
	return p1->tgid == p2->tgid;
}

2173
static inline struct task_struct *next_thread(const struct task_struct *p)
O
Oleg Nesterov 已提交
2174
{
2175 2176
	return list_entry_rcu(p->thread_group.next,
			      struct task_struct, thread_group);
O
Oleg Nesterov 已提交
2177 2178
}

A
Alexey Dobriyan 已提交
2179
static inline int thread_group_empty(struct task_struct *p)
L
Linus Torvalds 已提交
2180
{
O
Oleg Nesterov 已提交
2181
	return list_empty(&p->thread_group);
L
Linus Torvalds 已提交
2182 2183 2184 2185 2186
}

#define delay_group_leader(p) \
		(thread_group_leader(p) && !thread_group_empty(p))

2187 2188 2189 2190 2191
static inline int task_detached(struct task_struct *p)
{
	return p->exit_signal == -1;
}

L
Linus Torvalds 已提交
2192
/*
2193
 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2194
 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2195 2196
 * pins the final release of task.io_context.  Also protects ->cpuset and
 * ->cgroup.subsys[].
L
Linus Torvalds 已提交
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
 *
 * Nests both inside and outside of read_lock(&tasklist_lock).
 * It must not be nested with write_lock_irq(&tasklist_lock),
 * neither inside nor outside.
 */
static inline void task_lock(struct task_struct *p)
{
	spin_lock(&p->alloc_lock);
}

static inline void task_unlock(struct task_struct *p)
{
	spin_unlock(&p->alloc_lock);
}

2212 2213 2214 2215 2216 2217 2218 2219 2220
extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
							unsigned long *flags);

static inline void unlock_task_sighand(struct task_struct *tsk,
						unsigned long *flags)
{
	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}

A
Al Viro 已提交
2221 2222
#ifndef __HAVE_THREAD_FUNCTIONS

R
Roman Zippel 已提交
2223 2224
#define task_thread_info(task)	((struct thread_info *)(task)->stack)
#define task_stack_page(task)	((task)->stack)
A
Al Viro 已提交
2225

2226 2227 2228 2229 2230 2231 2232 2233
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
	*task_thread_info(p) = *task_thread_info(org);
	task_thread_info(p)->task = p;
}

static inline unsigned long *end_of_stack(struct task_struct *p)
{
R
Roman Zippel 已提交
2234
	return (unsigned long *)(task_thread_info(p) + 1);
2235 2236
}

A
Al Viro 已提交
2237 2238
#endif

2239 2240 2241 2242 2243 2244 2245
static inline int object_is_on_stack(void *obj)
{
	void *stack = task_stack_page(current);

	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}

2246 2247
extern void thread_info_cache_init(void);

2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
{
	unsigned long *n = end_of_stack(p);

	do { 	/* Skip over canary */
		n++;
	} while (!*n);

	return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif

L
Linus Torvalds 已提交
2261 2262 2263 2264 2265
/* set thread flags in other task's structures
 * - see asm/thread_info.h for TIF_xxxx flags available
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2266
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2267 2268 2269 2270
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2271
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2272 2273 2274 2275
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2276
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2277 2278 2279 2280
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2281
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2282 2283 2284 2285
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2286
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

2299 2300 2301 2302 2303
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

2304 2305 2306 2307 2308 2309
static inline int restart_syscall(void)
{
	set_tsk_thread_flag(current, TIF_SIGPENDING);
	return -ERESTARTNOINTR;
}

L
Linus Torvalds 已提交
2310 2311 2312 2313
static inline int signal_pending(struct task_struct *p)
{
	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
M
Matthew Wilcox 已提交
2314

2315
extern int __fatal_signal_pending(struct task_struct *p);
M
Matthew Wilcox 已提交
2316 2317 2318 2319 2320 2321

static inline int fatal_signal_pending(struct task_struct *p)
{
	return signal_pending(p) && __fatal_signal_pending(p);
}

2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
static inline int signal_pending_state(long state, struct task_struct *p)
{
	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
		return 0;
	if (!signal_pending(p))
		return 0;

	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}

L
Linus Torvalds 已提交
2332 2333
static inline int need_resched(void)
{
2334
	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
L
Linus Torvalds 已提交
2335 2336 2337 2338 2339 2340 2341 2342 2343
}

/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
2344
extern int _cond_resched(void);
2345

2346 2347 2348 2349
#define cond_resched() ({			\
	__might_sleep(__FILE__, __LINE__, 0);	\
	_cond_resched();			\
})
2350

2351 2352
extern int __cond_resched_lock(spinlock_t *lock);

2353 2354
#ifdef CONFIG_PREEMPT
#define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2355
#else
2356
#define PREEMPT_LOCK_OFFSET	0
2357
#endif
2358

2359
#define cond_resched_lock(lock) ({				\
2360
	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2361 2362 2363 2364 2365 2366 2367 2368 2369
	__cond_resched_lock(lock);				\
})

extern int __cond_resched_softirq(void);

#define cond_resched_softirq() ({				\
	__might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET);	\
	__cond_resched_softirq();				\
})
L
Linus Torvalds 已提交
2370 2371 2372

/*
 * Does a critical section need to be broken due to another
N
Nick Piggin 已提交
2373 2374
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * but a general need for low latency)
L
Linus Torvalds 已提交
2375
 */
N
Nick Piggin 已提交
2376
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
2377
{
N
Nick Piggin 已提交
2378 2379 2380
#ifdef CONFIG_PREEMPT
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
2381
	return 0;
N
Nick Piggin 已提交
2382
#endif
L
Linus Torvalds 已提交
2383 2384
}

2385 2386 2387
/*
 * Thread group CPU time accounting.
 */
2388
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2389
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2390

2391
static inline void thread_group_cputime_init(struct signal_struct *sig)
2392
{
2393 2394 2395
	sig->cputimer.cputime = INIT_CPUTIME;
	spin_lock_init(&sig->cputimer.lock);
	sig->cputimer.running = 0;
2396 2397 2398 2399 2400 2401
}

static inline void thread_group_cputime_free(struct signal_struct *sig)
{
}

R
Roland McGrath 已提交
2402 2403 2404 2405 2406 2407 2408
/*
 * Reevaluate whether the task has signals pending delivery.
 * Wake the task if so.
 * This is required every time the blocked sigset_t changes.
 * callers must hold sighand->siglock.
 */
extern void recalc_sigpending_and_wake(struct task_struct *t);
L
Linus Torvalds 已提交
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419
extern void recalc_sigpending(void);

extern void signal_wake_up(struct task_struct *t, int resume_stopped);

/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
A
Al Viro 已提交
2420
	return task_thread_info(p)->cpu;
L
Linus Torvalds 已提交
2421 2422
}

I
Ingo Molnar 已提交
2423
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

extern void arch_pick_mmap_layout(struct mm_struct *mm);

I
Ingo Molnar 已提交
2440 2441 2442 2443
#ifdef CONFIG_TRACING
extern void
__trace_special(void *__tr, void *__data,
		unsigned long arg1, unsigned long arg2, unsigned long arg3);
L
Linus Torvalds 已提交
2444
#else
I
Ingo Molnar 已提交
2445 2446 2447
static inline void
__trace_special(void *__tr, void *__data,
		unsigned long arg1, unsigned long arg2, unsigned long arg3)
L
Linus Torvalds 已提交
2448 2449 2450 2451
{
}
#endif

2452 2453
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2454

L
Linus Torvalds 已提交
2455 2456
extern void normalize_rt_tasks(void);

2457
#ifdef CONFIG_GROUP_SCHED
2458

2459
extern struct task_group init_task_group;
2460 2461
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
2462
extern void set_tg_uid(struct user_struct *user);
2463
#endif
2464

2465
extern struct task_group *sched_create_group(struct task_group *parent);
2466
extern void sched_destroy_group(struct task_group *tg);
2467
extern void sched_move_task(struct task_struct *tsk);
2468
#ifdef CONFIG_FAIR_GROUP_SCHED
2469
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2470
extern unsigned long sched_group_shares(struct task_group *tg);
2471 2472
#endif
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
2473 2474 2475
extern int sched_group_set_rt_runtime(struct task_group *tg,
				      long rt_runtime_us);
extern long sched_group_rt_runtime(struct task_group *tg);
2476 2477 2478
extern int sched_group_set_rt_period(struct task_group *tg,
				      long rt_period_us);
extern long sched_group_rt_period(struct task_group *tg);
2479
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2480
#endif
2481 2482
#endif

2483 2484 2485
extern int task_can_switch_user(struct user_struct *up,
					struct task_struct *tsk);

2486 2487 2488
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
2489
	tsk->ioac.rchar += amt;
2490 2491 2492 2493
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
2494
	tsk->ioac.wchar += amt;
2495 2496 2497 2498
}

static inline void inc_syscr(struct task_struct *tsk)
{
2499
	tsk->ioac.syscr++;
2500 2501 2502 2503
}

static inline void inc_syscw(struct task_struct *tsk)
{
2504
	tsk->ioac.syscw++;
2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void inc_syscr(struct task_struct *tsk)
{
}

static inline void inc_syscw(struct task_struct *tsk)
{
}
#endif

D
Dave Hansen 已提交
2524 2525 2526 2527
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

T
Thomas Gleixner 已提交
2528 2529 2530 2531 2532 2533 2534
/*
 * Call the function if the target task is executing on a CPU right now:
 */
extern void task_oncpu_function_call(struct task_struct *p,
				     void (*func) (void *info), void *info);


2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547
#ifdef CONFIG_MM_OWNER
extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
#else
static inline void mm_update_next_owner(struct mm_struct *mm)
{
}

static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
}
#endif /* CONFIG_MM_OWNER */

2548 2549
#define TASK_STATE_TO_CHAR_STR "RSDTtZX"

L
Linus Torvalds 已提交
2550 2551 2552
#endif /* __KERNEL__ */

#endif