sched.h 84.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4
#include <uapi/linux/sched.h>
5

6 7
#include <linux/sched/prio.h>

8 9 10 11 12

struct sched_param {
	int sched_priority;
};

L
Linus Torvalds 已提交
13 14 15 16 17 18 19 20
#include <asm/param.h>	/* for HZ */

#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
21
#include <linux/plist.h>
L
Linus Torvalds 已提交
22 23 24 25 26
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
27
#include <linux/mm_types.h>
28
#include <linux/preempt_mask.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41

#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>

#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
P
Peter Zijlstra 已提交
42
#include <linux/proportions.h>
L
Linus Torvalds 已提交
43
#include <linux/seccomp.h>
I
Ingo Molnar 已提交
44
#include <linux/rcupdate.h>
45
#include <linux/rculist.h>
I
Ingo Molnar 已提交
46
#include <linux/rtmutex.h>
L
Linus Torvalds 已提交
47

48 49 50 51 52
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
53
#include <linux/task_io_accounting.h>
A
Arjan van de Ven 已提交
54
#include <linux/latencytop.h>
55
#include <linux/cred.h>
P
Peter Zijlstra 已提交
56
#include <linux/llist.h>
57
#include <linux/uidgid.h>
58
#include <linux/gfp.h>
59 60

#include <asm/processor.h>
H
H. J. Lu 已提交
61

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
#define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */

/*
 * Extended scheduling parameters data structure.
 *
 * This is needed because the original struct sched_param can not be
 * altered without introducing ABI issues with legacy applications
 * (e.g., in sched_getparam()).
 *
 * However, the possibility of specifying more than just a priority for
 * the tasks may be useful for a wide variety of application fields, e.g.,
 * multimedia, streaming, automation and control, and many others.
 *
 * This variant (sched_attr) is meant at describing a so-called
 * sporadic time-constrained task. In such model a task is specified by:
 *  - the activation period or minimum instance inter-arrival time;
 *  - the maximum (or average, depending on the actual scheduling
 *    discipline) computation time of all instances, a.k.a. runtime;
 *  - the deadline (relative to the actual activation time) of each
 *    instance.
 * Very briefly, a periodic (sporadic) task asks for the execution of
 * some specific computation --which is typically called an instance--
 * (at most) every period. Moreover, each instance typically lasts no more
 * than the runtime and must be completed by time instant t equal to
 * the instance activation time + the deadline.
 *
 * This is reflected by the actual fields of the sched_attr structure:
 *
 *  @size		size of the structure, for fwd/bwd compat.
 *
 *  @sched_policy	task's scheduling policy
 *  @sched_flags	for customizing the scheduler behaviour
 *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
 *  @sched_priority	task's static priority (SCHED_FIFO/RR)
 *  @sched_deadline	representative of the task's deadline
 *  @sched_runtime	representative of the task's runtime
 *  @sched_period	representative of the task's period
 *
 * Given this task model, there are a multiplicity of scheduling algorithms
 * and policies, that can be used to ensure all the tasks will make their
 * timing constraints.
103 104 105 106
 *
 * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
 * only user of this new interface. More information about the algorithm
 * available in the scheduling class file or in Documentation/.
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
 */
struct sched_attr {
	u32 size;

	u32 sched_policy;
	u64 sched_flags;

	/* SCHED_NORMAL, SCHED_BATCH */
	s32 sched_nice;

	/* SCHED_FIFO, SCHED_RR */
	u32 sched_priority;

	/* SCHED_DEADLINE */
	u64 sched_runtime;
	u64 sched_deadline;
	u64 sched_period;
};

L
Linus Torvalds 已提交
126
struct exec_domain;
127
struct futex_pi_state;
128
struct robust_list_head;
129
struct bio_list;
130
struct fs_struct;
131
struct perf_event_context;
132
struct blk_plug;
133
struct filename;
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

/*
 * List of flags we want to share for kernel threads,
 * if only because they are not used by them anyway.
 */
#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)

/*
 * These are the constant used to fake the fixed-point load-average
 * counting. Some notes:
 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 *    a load-average precision of 10 bits integer + 11 bits fractional
 *  - if you want to count load-averages more often, you need more
 *    precision, or rounding will get you. With 2-second counting freq,
 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 *    11 bit fractions.
 */
extern unsigned long avenrun[];		/* Load averages */
152
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
L
Linus Torvalds 已提交
153 154 155

#define FSHIFT		11		/* nr of bits of precision */
#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
156
#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
L
Linus Torvalds 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
#define EXP_5		2014		/* 1/exp(5sec/5min) */
#define EXP_15		2037		/* 1/exp(5sec/15min) */

#define CALC_LOAD(load,exp,n) \
	load *= exp; \
	load += n*(FIXED_1-exp); \
	load >>= FSHIFT;

extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_iowait(void);
172
extern unsigned long nr_iowait_cpu(int cpu);
173 174 175
extern unsigned long this_cpu_load(void);


176
extern void calc_global_load(unsigned long ticks);
177
extern void update_cpu_load_nohz(void);
L
Linus Torvalds 已提交
178

179 180
extern unsigned long get_parent_ip(unsigned long addr);

181 182
extern void dump_cpu_task(int cpu);

I
Ingo Molnar 已提交
183 184
struct seq_file;
struct cfs_rq;
185
struct task_group;
I
Ingo Molnar 已提交
186 187 188 189
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
190
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
I
Ingo Molnar 已提交
191
#endif
L
Linus Torvalds 已提交
192

193 194 195 196 197 198 199 200 201 202
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
L
Linus Torvalds 已提交
203 204 205
#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
M
Matthew Wilcox 已提交
206 207
#define __TASK_STOPPED		4
#define __TASK_TRACED		8
208 209 210 211
/* in tsk->exit_state */
#define EXIT_ZOMBIE		16
#define EXIT_DEAD		32
/* in tsk->state again */
212
#define TASK_DEAD		64
M
Matthew Wilcox 已提交
213
#define TASK_WAKEKILL		128
P
Peter Zijlstra 已提交
214
#define TASK_WAKING		256
215 216
#define TASK_PARKED		512
#define TASK_STATE_MAX		1024
M
Matthew Wilcox 已提交
217

218
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
219

220 221
extern char ___assert_task_state[1 - 2*!!(
		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
M
Matthew Wilcox 已提交
222 223 224 225 226

/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
L
Linus Torvalds 已提交
227

228 229
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
M
Matthew Wilcox 已提交
230
#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
231 232 233

/* get_task_state() */
#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
M
Matthew Wilcox 已提交
234
				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
235
				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
236

M
Matthew Wilcox 已提交
237 238
#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
239
#define task_is_stopped_or_traced(task)	\
M
Matthew Wilcox 已提交
240
			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
241
#define task_contributes_to_load(task)	\
242
				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
T
Tejun Heo 已提交
243
				 (task->flags & PF_FROZEN) == 0)
L
Linus Torvalds 已提交
244 245 246 247 248 249

#define __set_task_state(tsk, state_value)		\
	do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value)		\
	set_mb((tsk)->state, (state_value))

250 251 252 253 254 255 256 257 258 259 260
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
 *	set_current_state(TASK_UNINTERRUPTIBLE);
 *	if (do_i_need_to_sleep())
 *		schedule();
 *
 * If the caller does not need such serialisation then use __set_current_state()
 */
L
Linus Torvalds 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
#define __set_current_state(state_value)			\
	do { current->state = (state_value); } while (0)
#define set_current_state(state_value)		\
	set_mb(current->state, (state_value))

/* Task command name length */
#define TASK_COMM_LEN 16

#include <linux/spinlock.h>

/*
 * This serializes "schedule()" and also protects
 * the run-queue from deletions/modifications (but
 * _adding_ to the beginning of the run-queue has
 * a separate lock).
 */
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;

280
struct task_struct;
L
Linus Torvalds 已提交
281

282 283 284 285
#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
#endif /* #ifdef CONFIG_PROVE_RCU */

L
Linus Torvalds 已提交
286 287
extern void sched_init(void);
extern void sched_init_smp(void);
288
extern asmlinkage void schedule_tail(struct task_struct *prev);
289
extern void init_idle(struct task_struct *idle, int cpu);
I
Ingo Molnar 已提交
290
extern void init_idle_bootup_task(struct task_struct *idle);
L
Linus Torvalds 已提交
291

292
extern int runqueue_is_locked(int cpu);
I
Ingo Molnar 已提交
293

294
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
295
extern void nohz_balance_enter_idle(int cpu);
296
extern void set_cpu_sd_state_idle(void);
297
extern int get_nohz_timer_target(void);
298
#else
299
static inline void nohz_balance_enter_idle(int cpu) { }
300
static inline void set_cpu_sd_state_idle(void) { }
301
#endif
L
Linus Torvalds 已提交
302

I
Ingo Molnar 已提交
303
/*
I
Ingo Molnar 已提交
304
 * Only dump TASK_* tasks. (0 for all tasks)
I
Ingo Molnar 已提交
305 306 307 308 309
 */
extern void show_state_filter(unsigned long state_filter);

static inline void show_state(void)
{
I
Ingo Molnar 已提交
310
	show_state_filter(0);
I
Ingo Molnar 已提交
311 312
}

L
Linus Torvalds 已提交
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
extern void show_regs(struct pt_regs *);

/*
 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 * task), SP is the stack pointer of the first frame that should be shown in the back
 * trace (or NULL if the entire call-chain of the task should be shown).
 */
extern void show_stack(struct task_struct *task, unsigned long *sp);

void io_schedule(void);
long io_schedule_timeout(long timeout);

extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);

330 331
extern void sched_show_task(struct task_struct *p);

332
#ifdef CONFIG_LOCKUP_DETECTOR
I
Ingo Molnar 已提交
333
extern void touch_softlockup_watchdog(void);
334
extern void touch_softlockup_watchdog_sync(void);
335
extern void touch_all_softlockup_watchdogs(void);
336 337 338
extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
				  void __user *buffer,
				  size_t *lenp, loff_t *ppos);
I
Ingo Molnar 已提交
339
extern unsigned int  softlockup_panic;
340
void lockup_detector_init(void);
I
Ingo Molnar 已提交
341 342 343 344
#else
static inline void touch_softlockup_watchdog(void)
{
}
345 346 347
static inline void touch_softlockup_watchdog_sync(void)
{
}
348 349 350
static inline void touch_all_softlockup_watchdogs(void)
{
}
351 352 353
static inline void lockup_detector_init(void)
{
}
I
Ingo Molnar 已提交
354 355
#endif

356 357 358 359 360 361 362 363
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
#else
static inline void reset_hung_task_detector(void)
{
}
#endif

L
Linus Torvalds 已提交
364 365
/* Attach to any functions which should be ignored in wchan output. */
#define __sched		__attribute__((__section__(".sched.text")))
366 367 368 369

/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];

L
Linus Torvalds 已提交
370 371 372 373
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);

#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
374
extern signed long schedule_timeout(signed long timeout);
375
extern signed long schedule_timeout_interruptible(signed long timeout);
M
Matthew Wilcox 已提交
376
extern signed long schedule_timeout_killable(signed long timeout);
377
extern signed long schedule_timeout_uninterruptible(signed long timeout);
L
Linus Torvalds 已提交
378
asmlinkage void schedule(void);
379
extern void schedule_preempt_disabled(void);
L
Linus Torvalds 已提交
380

S
Serge E. Hallyn 已提交
381
struct nsproxy;
382
struct user_namespace;
L
Linus Torvalds 已提交
383

384 385
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
L
Linus Torvalds 已提交
386 387 388 389 390 391 392
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
		       unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
			  unsigned long len, unsigned long pgoff,
			  unsigned long flags);
393 394 395
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
L
Linus Torvalds 已提交
396

397 398 399 400
#define SUID_DUMP_DISABLE	0	/* No setuid dumping */
#define SUID_DUMP_USER		1	/* Dump as user of process */
#define SUID_DUMP_ROOT		2	/* Dump as root */

401
/* mm flags */
H
Hugh Dickins 已提交
402

403
/* for SUID_DUMP_* above */
404
#define MMF_DUMPABLE_BITS 2
H
Hugh Dickins 已提交
405
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
406

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
extern void set_dumpable(struct mm_struct *mm, int value);
/*
 * This returns the actual value of the suid_dumpable flag. For things
 * that are using this for checking for privilege transitions, it must
 * test against SUID_DUMP_USER rather than treating it as a boolean
 * value.
 */
static inline int __get_dumpable(unsigned long mm_flags)
{
	return mm_flags & MMF_DUMPABLE_MASK;
}

static inline int get_dumpable(struct mm_struct *mm)
{
	return __get_dumpable(mm->flags);
}

424 425 426 427 428
/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE	2
#define MMF_DUMP_ANON_SHARED	3
#define MMF_DUMP_MAPPED_PRIVATE	4
#define MMF_DUMP_MAPPED_SHARED	5
R
Roland McGrath 已提交
429
#define MMF_DUMP_ELF_HEADERS	6
430 431
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED  8
H
Hugh Dickins 已提交
432

433
#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
434
#define MMF_DUMP_FILTER_BITS	7
435 436 437
#define MMF_DUMP_FILTER_MASK \
	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
438
	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
439 440 441 442 443 444 445
	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)

#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF	0
#endif
H
Hugh Dickins 已提交
446 447
					/* leave room for more dump flags */
#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
A
Andrea Arcangeli 已提交
448
#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
449
#define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
H
Hugh Dickins 已提交
450

451 452
#define MMF_HAS_UPROBES		19	/* has uprobes */
#define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
453

H
Hugh Dickins 已提交
454
#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
455

L
Linus Torvalds 已提交
456 457 458 459
struct sighand_struct {
	atomic_t		count;
	struct k_sigaction	action[_NSIG];
	spinlock_t		siglock;
D
Davide Libenzi 已提交
460
	wait_queue_head_t	signalfd_wqh;
L
Linus Torvalds 已提交
461 462
};

463
struct pacct_struct {
464 465
	int			ac_flag;
	long			ac_exitcode;
466
	unsigned long		ac_mem;
467 468
	cputime_t		ac_utime, ac_stime;
	unsigned long		ac_minflt, ac_majflt;
469 470
};

471 472 473
struct cpu_itimer {
	cputime_t expires;
	cputime_t incr;
474 475
	u32 error;
	u32 incr_error;
476 477
};

478 479 480 481 482 483 484 485 486 487 488 489
/**
 * struct cputime - snaphsot of system and user cputime
 * @utime: time spent in user mode
 * @stime: time spent in system mode
 *
 * Gathers a generic snapshot of user and system time.
 */
struct cputime {
	cputime_t utime;
	cputime_t stime;
};

490 491 492 493 494
/**
 * struct task_cputime - collected CPU time counts
 * @utime:		time spent in user mode, in &cputime_t units
 * @stime:		time spent in kernel mode, in &cputime_t units
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
495
 *
496 497 498 499 500
 * This is an extension of struct cputime that includes the total runtime
 * spent by the task from the scheduler point of view.
 *
 * As a result, this structure groups together three kinds of CPU time
 * that are tracked for threads and thread groups.  Most things considering
501 502 503 504 505 506 507 508 509 510 511 512 513
 * CPU time want to group these counts together and treat all three
 * of them in parallel.
 */
struct task_cputime {
	cputime_t utime;
	cputime_t stime;
	unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp	stime
#define virt_exp	utime
#define sched_exp	sum_exec_runtime

514 515
#define INIT_CPUTIME	\
	(struct task_cputime) {					\
516 517
		.utime = 0,					\
		.stime = 0,					\
518 519 520
		.sum_exec_runtime = 0,				\
	}

521 522 523 524 525 526
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
#else
#define PREEMPT_DISABLED	PREEMPT_ENABLED
#endif

P
Peter Zijlstra 已提交
527 528 529
/*
 * Disable preemption until the scheduler is running.
 * Reset by start_kernel()->sched_init()->init_idle().
P
Peter Zijlstra 已提交
530 531 532
 *
 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
 * before the scheduler is active -- see should_resched().
P
Peter Zijlstra 已提交
533
 */
534
#define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
P
Peter Zijlstra 已提交
535

536
/**
537 538 539 540 541
 * struct thread_group_cputimer - thread group interval timer counts
 * @cputime:		thread group interval timers.
 * @running:		non-zero when there are timers running and
 * 			@cputime receives updates.
 * @lock:		lock for fields in this struct.
542 543
 *
 * This structure contains the version of task_cputime, above, that is
544
 * used for thread group CPU timer calculations.
545
 */
546 547 548
struct thread_group_cputimer {
	struct task_cputime cputime;
	int running;
549
	raw_spinlock_t lock;
550 551
};

552
#include <linux/rwsem.h>
553 554
struct autogroup;

L
Linus Torvalds 已提交
555
/*
556
 * NOTE! "signal_struct" does not have its own
L
Linus Torvalds 已提交
557 558 559 560 561 562
 * locking, because a shared signal_struct always
 * implies a shared sighand_struct, so locking
 * sighand_struct is always a proper superset of
 * the locking of signal_struct.
 */
struct signal_struct {
563
	atomic_t		sigcnt;
L
Linus Torvalds 已提交
564
	atomic_t		live;
565
	int			nr_threads;
566
	struct list_head	thread_head;
L
Linus Torvalds 已提交
567 568 569 570

	wait_queue_head_t	wait_chldexit;	/* for wait4() */

	/* current thread group signal load-balancing target: */
571
	struct task_struct	*curr_target;
L
Linus Torvalds 已提交
572 573 574 575 576 577 578 579 580 581 582 583

	/* shared signal handling: */
	struct sigpending	shared_pending;

	/* thread group exit support */
	int			group_exit_code;
	/* overloaded:
	 * - notify group_exit_task when ->count is equal to notify_count
	 * - everyone except group_exit_task is stopped during signal delivery
	 *   of fatal signals, group_exit_task processes the signal.
	 */
	int			notify_count;
584
	struct task_struct	*group_exit_task;
L
Linus Torvalds 已提交
585 586 587 588 589

	/* thread group stop support, overloads group_exit_code too */
	int			group_stop_count;
	unsigned int		flags; /* see SIGNAL_* flags below */

590 591 592 593 594 595 596 597 598 599 600 601
	/*
	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
	 * manager, to re-parent orphan (double-forking) child processes
	 * to this process instead of 'init'. The service manager is
	 * able to receive SIGCHLD signals and is able to investigate
	 * the process until it calls wait(). All children of this
	 * process will inherit a flag if they should look for a
	 * child_subreaper process at exit.
	 */
	unsigned int		is_child_subreaper:1;
	unsigned int		has_child_subreaper:1;

L
Linus Torvalds 已提交
602
	/* POSIX.1b Interval Timers */
603 604
	int			posix_timer_id;
	struct list_head	posix_timers;
L
Linus Torvalds 已提交
605 606

	/* ITIMER_REAL timer for the process */
607
	struct hrtimer real_timer;
608
	struct pid *leader_pid;
609
	ktime_t it_real_incr;
L
Linus Torvalds 已提交
610

611 612 613 614 615 616
	/*
	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
	 * values are defined to 0 and 1 respectively
	 */
	struct cpu_itimer it[2];
L
Linus Torvalds 已提交
617

618
	/*
619 620
	 * Thread group totals for process CPU timers.
	 * See thread_group_cputimer(), et al, for details.
621
	 */
622
	struct thread_group_cputimer cputimer;
623 624 625 626 627 628

	/* Earliest-expiration cache. */
	struct task_cputime cputime_expires;

	struct list_head cpu_timers[3];

629
	struct pid *tty_old_pgrp;
630

L
Linus Torvalds 已提交
631 632 633 634 635
	/* boolean value for session group leader */
	int leader;

	struct tty_struct *tty; /* NULL if no tty */

636 637 638
#ifdef CONFIG_SCHED_AUTOGROUP
	struct autogroup *autogroup;
#endif
L
Linus Torvalds 已提交
639 640 641 642 643 644
	/*
	 * Cumulative resource counters for dead threads in the group,
	 * and for reaped dead child processes forked by this group.
	 * Live threads maintain their own counters and add to these
	 * in __exit_signal, except for the group leader.
	 */
645
	cputime_t utime, stime, cutime, cstime;
646 647
	cputime_t gtime;
	cputime_t cgtime;
648
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
649
	struct cputime prev_cputime;
650
#endif
L
Linus Torvalds 已提交
651 652
	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
653
	unsigned long inblock, oublock, cinblock, coublock;
J
Jiri Pirko 已提交
654
	unsigned long maxrss, cmaxrss;
655
	struct task_io_accounting ioac;
L
Linus Torvalds 已提交
656

657 658 659 660 661 662 663 664
	/*
	 * Cumulative ns of schedule CPU time fo dead threads in the
	 * group, not including a zombie group leader, (This only differs
	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
	 * other than jiffies.)
	 */
	unsigned long long sum_sched_runtime;

L
Linus Torvalds 已提交
665 666 667 668 669 670 671 672 673 674 675
	/*
	 * We don't bother to synchronize most readers of this at all,
	 * because there is no reader checking a limit that actually needs
	 * to get both rlim_cur and rlim_max atomically, and either one
	 * alone is a single word that can safely be read normally.
	 * getrlimit/setrlimit use task_lock(current->group_leader) to
	 * protect this instead of the siglock, because they really
	 * have no need to disable irqs.
	 */
	struct rlimit rlim[RLIM_NLIMITS];

676 677 678
#ifdef CONFIG_BSD_PROCESS_ACCT
	struct pacct_struct pacct;	/* per-process accounting information */
#endif
679 680 681
#ifdef CONFIG_TASKSTATS
	struct taskstats *stats;
#endif
M
Miloslav Trmac 已提交
682 683
#ifdef CONFIG_AUDIT
	unsigned audit_tty;
684
	unsigned audit_tty_log_passwd;
M
Miloslav Trmac 已提交
685 686
	struct tty_audit_buf *tty_audit_buf;
#endif
687 688
#ifdef CONFIG_CGROUPS
	/*
689 690 691 692 693 694 695
	 * group_rwsem prevents new tasks from entering the threadgroup and
	 * member tasks from exiting,a more specifically, setting of
	 * PF_EXITING.  fork and exit paths are protected with this rwsem
	 * using threadgroup_change_begin/end().  Users which require
	 * threadgroup to remain stable should use threadgroup_[un]lock()
	 * which also takes care of exec path.  Currently, cgroup is the
	 * only user.
696
	 */
697
	struct rw_semaphore group_rwsem;
698
#endif
699

700
	oom_flags_t oom_flags;
701 702 703
	short oom_score_adj;		/* OOM kill score adjustment */
	short oom_score_adj_min;	/* OOM kill score adjustment min value.
					 * Only settable by CAP_SYS_RESOURCE. */
704 705 706 707

	struct mutex cred_guard_mutex;	/* guard against foreign influences on
					 * credential calculations
					 * (notably. ptrace) */
L
Linus Torvalds 已提交
708 709 710 711 712 713
};

/*
 * Bits in flags field of signal_struct.
 */
#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
714 715
#define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
716
#define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
717 718 719 720 721 722
/*
 * Pending notifications to parent.
 */
#define SIGNAL_CLD_STOPPED	0x00000010
#define SIGNAL_CLD_CONTINUED	0x00000020
#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
L
Linus Torvalds 已提交
723

724 725
#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */

726 727 728 729 730 731 732
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
		(sig->group_exit_task != NULL);
}

L
Linus Torvalds 已提交
733 734 735 736 737 738 739 740
/*
 * Some day this will be a full-fledged user tracking system..
 */
struct user_struct {
	atomic_t __count;	/* reference count */
	atomic_t processes;	/* How many processes does this user have? */
	atomic_t files;		/* How many open files does this user have? */
	atomic_t sigpending;	/* How many pending signals does this user have? */
741
#ifdef CONFIG_INOTIFY_USER
R
Robert Love 已提交
742 743 744
	atomic_t inotify_watches; /* How many inotify watches does this user have? */
	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
#endif
745 746 747
#ifdef CONFIG_FANOTIFY
	atomic_t fanotify_listeners;
#endif
748
#ifdef CONFIG_EPOLL
749
	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
750
#endif
A
Alexey Dobriyan 已提交
751
#ifdef CONFIG_POSIX_MQUEUE
L
Linus Torvalds 已提交
752 753
	/* protected by mq_lock	*/
	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
A
Alexey Dobriyan 已提交
754
#endif
L
Linus Torvalds 已提交
755 756 757 758 759 760 761 762
	unsigned long locked_shm; /* How many pages of mlocked shm ? */

#ifdef CONFIG_KEYS
	struct key *uid_keyring;	/* UID specific keyring */
	struct key *session_keyring;	/* UID's default session keyring */
#endif

	/* Hash table maintenance information */
P
Pavel Emelyanov 已提交
763
	struct hlist_node uidhash_node;
764
	kuid_t uid;
765

766
#ifdef CONFIG_PERF_EVENTS
767 768
	atomic_long_t locked_vm;
#endif
L
Linus Torvalds 已提交
769 770
};

771
extern int uids_sysfs_init(void);
772

773
extern struct user_struct *find_user(kuid_t);
L
Linus Torvalds 已提交
774 775 776 777

extern struct user_struct root_user;
#define INIT_USER (&root_user)

778

L
Linus Torvalds 已提交
779 780 781
struct backing_dev_info;
struct reclaim_state;

782
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
783 784
struct sched_info {
	/* cumulative counters */
785
	unsigned long pcount;	      /* # of times run on this cpu */
786
	unsigned long long run_delay; /* time spent waiting on a runqueue */
L
Linus Torvalds 已提交
787 788

	/* timestamps */
789 790
	unsigned long long last_arrival,/* when we last ran on a cpu */
			   last_queued;	/* when we were last queued to run */
L
Linus Torvalds 已提交
791
};
792
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
L
Linus Torvalds 已提交
793

794 795 796 797 798 799 800 801 802 803 804 805 806 807
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
	spinlock_t	lock;
	unsigned int	flags;	/* Private per-task flags */

	/* For each stat XXX, add following, aligned appropriately
	 *
	 * struct timespec XXX_start, XXX_end;
	 * u64 XXX_delay;
	 * u32 XXX_count;
	 *
	 * Atomicity of updates to XXX_delay, XXX_count protected by
	 * single lock above (split into XXX_lock if contention is an issue).
	 */
808 809 810 811 812 813 814 815 816 817 818 819 820

	/*
	 * XXX_count is incremented on every XXX operation, the delay
	 * associated with the operation is added to XXX_delay.
	 * XXX_delay contains the accumulated delay time in nanoseconds.
	 */
	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
	u64 blkio_delay;	/* wait for sync block io completion */
	u64 swapin_delay;	/* wait for swapin block io completion */
	u32 blkio_count;	/* total count of the number of sync block */
				/* io operations performed */
	u32 swapin_count;	/* total count of the number of swapin block */
				/* io operations performed */
821 822 823 824

	struct timespec freepages_start, freepages_end;
	u64 freepages_delay;	/* wait for memory reclaim */
	u32 freepages_count;	/* total count of memory reclaim */
825
};
826 827 828 829 830 831 832 833 834 835 836
#endif	/* CONFIG_TASK_DELAY_ACCT */

static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
	return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
	extern int delayacct_on;
	return delayacct_on;
#else
	return 0;
837
#endif
838
}
839

I
Ingo Molnar 已提交
840 841 842 843 844
enum cpu_idle_type {
	CPU_IDLE,
	CPU_NOT_IDLE,
	CPU_NEWLY_IDLE,
	CPU_MAX_IDLE_TYPES
L
Linus Torvalds 已提交
845 846
};

847 848 849 850 851
/*
 * Increase resolution of cpu_power calculations
 */
#define SCHED_POWER_SHIFT	10
#define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
L
Linus Torvalds 已提交
852

853 854 855
/*
 * sched-domains (multiprocessor balancing) declarations:
 */
856
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
857 858 859 860
#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
861
#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
P
Peter Zijlstra 已提交
862 863 864 865
#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
#define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
866
#define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
P
Peter Zijlstra 已提交
867
#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
868
#define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
869
#define SD_NUMA			0x4000	/* cross-node balancing */
870

871 872
extern int __weak arch_sd_sibiling_asym_packing(void);

873 874 875 876 877 878 879 880
struct sched_domain_attr {
	int relax_domain_level;
};

#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
	.relax_domain_level = -1,			\
}

881 882
extern int sched_domain_level_max;

883 884
struct sched_group;

L
Linus Torvalds 已提交
885 886 887
struct sched_domain {
	/* These fields must be setup */
	struct sched_domain *parent;	/* top domain must be null terminated */
888
	struct sched_domain *child;	/* bottom domain must be null terminated */
L
Linus Torvalds 已提交
889 890 891 892 893 894
	struct sched_group *groups;	/* the balancing groups of the domain */
	unsigned long min_interval;	/* Minimum balance interval ms */
	unsigned long max_interval;	/* Maximum balance interval ms */
	unsigned int busy_factor;	/* less balancing by factor if busy */
	unsigned int imbalance_pct;	/* No balance until over watermark */
	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
N
Nick Piggin 已提交
895 896 897 898
	unsigned int busy_idx;
	unsigned int idle_idx;
	unsigned int newidle_idx;
	unsigned int wake_idx;
N
Nick Piggin 已提交
899
	unsigned int forkexec_idx;
P
Peter Zijlstra 已提交
900
	unsigned int smt_gain;
V
Vincent Guittot 已提交
901 902

	int nohz_idle;			/* NOHZ IDLE status */
L
Linus Torvalds 已提交
903
	int flags;			/* See SD_* */
904
	int level;
L
Linus Torvalds 已提交
905 906 907 908 909 910

	/* Runtime fields. */
	unsigned long last_balance;	/* init to jiffies. units in jiffies */
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */

911
	/* idle_balance() stats */
912
	u64 max_newidle_lb_cost;
913
	unsigned long next_decay_max_lb_cost;
P
Peter Zijlstra 已提交
914

L
Linus Torvalds 已提交
915 916
#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
917 918 919 920 921 922 923 924
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
L
Linus Torvalds 已提交
925 926

	/* Active load balancing */
927 928 929
	unsigned int alb_count;
	unsigned int alb_failed;
	unsigned int alb_pushed;
L
Linus Torvalds 已提交
930

931
	/* SD_BALANCE_EXEC stats */
932 933 934
	unsigned int sbe_count;
	unsigned int sbe_balanced;
	unsigned int sbe_pushed;
L
Linus Torvalds 已提交
935

936
	/* SD_BALANCE_FORK stats */
937 938 939
	unsigned int sbf_count;
	unsigned int sbf_balanced;
	unsigned int sbf_pushed;
940

L
Linus Torvalds 已提交
941
	/* try_to_wake_up() stats */
942 943 944
	unsigned int ttwu_wake_remote;
	unsigned int ttwu_move_affine;
	unsigned int ttwu_move_balance;
L
Linus Torvalds 已提交
945
#endif
946 947 948
#ifdef CONFIG_SCHED_DEBUG
	char *name;
#endif
949 950 951 952
	union {
		void *private;		/* used during construction */
		struct rcu_head rcu;	/* used during destruction */
	};
953

954
	unsigned int span_weight;
955 956 957 958 959 960 961 962
	/*
	 * Span of all CPUs in this domain.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 */
	unsigned long span[0];
L
Linus Torvalds 已提交
963 964
};

965 966
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
967
	return to_cpumask(sd->span);
968 969
}

970
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
971
				    struct sched_domain_attr *dattr_new);
P
Paul Jackson 已提交
972

973 974 975 976
/* Allocate an array of sched domains, for partition_sched_domains(). */
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);

977 978
bool cpus_share_cache(int this_cpu, int that_cpu);

979
#else /* CONFIG_SMP */
L
Linus Torvalds 已提交
980

981
struct sched_domain_attr;
982

983
static inline void
984
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
985 986
			struct sched_domain_attr *dattr_new)
{
987
}
988 989 990 991 992 993

static inline bool cpus_share_cache(int this_cpu, int that_cpu)
{
	return true;
}

994
#endif	/* !CONFIG_SMP */
L
Linus Torvalds 已提交
995

996

L
Linus Torvalds 已提交
997 998 999
struct io_context;			/* See blkdev.h */


1000
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1001
extern void prefetch_stack(struct task_struct *t);
1002 1003 1004
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
L
Linus Torvalds 已提交
1005 1006 1007

struct audit_context;		/* See audit.c */
struct mempolicy;
1008
struct pipe_inode_info;
1009
struct uts_namespace;
L
Linus Torvalds 已提交
1010

I
Ingo Molnar 已提交
1011
struct load_weight {
1012 1013
	unsigned long weight;
	u32 inv_weight;
I
Ingo Molnar 已提交
1014 1015
};

1016 1017 1018
struct sched_avg {
	/*
	 * These sums represent an infinite geometric series and so are bound
1019
	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
1020 1021 1022 1023
	 * choices of y < 1-2^(-32)*1024.
	 */
	u32 runnable_avg_sum, runnable_avg_period;
	u64 last_runnable_update;
1024
	s64 decay_count;
1025
	unsigned long load_avg_contrib;
1026 1027
};

1028
#ifdef CONFIG_SCHEDSTATS
1029
struct sched_statistics {
I
Ingo Molnar 已提交
1030
	u64			wait_start;
1031
	u64			wait_max;
1032 1033
	u64			wait_count;
	u64			wait_sum;
1034 1035
	u64			iowait_count;
	u64			iowait_sum;
1036

I
Ingo Molnar 已提交
1037 1038
	u64			sleep_start;
	u64			sleep_max;
1039 1040 1041
	s64			sum_sleep_runtime;

	u64			block_start;
I
Ingo Molnar 已提交
1042 1043
	u64			block_max;
	u64			exec_max;
I
Ingo Molnar 已提交
1044
	u64			slice_max;
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060

	u64			nr_migrations_cold;
	u64			nr_failed_migrations_affine;
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_hot;
	u64			nr_forced_migrations;

	u64			nr_wakeups;
	u64			nr_wakeups_sync;
	u64			nr_wakeups_migrate;
	u64			nr_wakeups_local;
	u64			nr_wakeups_remote;
	u64			nr_wakeups_affine;
	u64			nr_wakeups_affine_attempts;
	u64			nr_wakeups_passive;
	u64			nr_wakeups_idle;
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
};
#endif

struct sched_entity {
	struct load_weight	load;		/* for load-balancing */
	struct rb_node		run_node;
	struct list_head	group_node;
	unsigned int		on_rq;

	u64			exec_start;
	u64			sum_exec_runtime;
	u64			vruntime;
	u64			prev_sum_exec_runtime;

	u64			nr_migrations;

#ifdef CONFIG_SCHEDSTATS
	struct sched_statistics statistics;
1079 1080
#endif

I
Ingo Molnar 已提交
1081
#ifdef CONFIG_FAIR_GROUP_SCHED
P
Peter Zijlstra 已提交
1082
	int			depth;
I
Ingo Molnar 已提交
1083 1084 1085 1086 1087 1088
	struct sched_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct cfs_rq		*cfs_rq;
	/* rq "owned" by this entity/group: */
	struct cfs_rq		*my_q;
#endif
1089

1090
#ifdef CONFIG_SMP
1091
	/* Per-entity load-tracking */
1092 1093
	struct sched_avg	avg;
#endif
I
Ingo Molnar 已提交
1094
};
1095

P
Peter Zijlstra 已提交
1096 1097
struct sched_rt_entity {
	struct list_head run_list;
1098
	unsigned long timeout;
1099
	unsigned long watchdog_stamp;
1100
	unsigned int time_slice;
P
Peter Zijlstra 已提交
1101

1102
	struct sched_rt_entity *back;
1103
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
1104 1105 1106 1107 1108 1109
	struct sched_rt_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct rt_rq		*rt_rq;
	/* rq "owned" by this entity/group: */
	struct rt_rq		*my_q;
#endif
P
Peter Zijlstra 已提交
1110 1111
};

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
struct sched_dl_entity {
	struct rb_node	rb_node;

	/*
	 * Original scheduling parameters. Copied here from sched_attr
	 * during sched_setscheduler2(), they will remain the same until
	 * the next sched_setscheduler2().
	 */
	u64 dl_runtime;		/* maximum runtime for each instance	*/
	u64 dl_deadline;	/* relative deadline of each instance	*/
1122
	u64 dl_period;		/* separation of two instances (period) */
1123
	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143

	/*
	 * Actual scheduling parameters. Initialized with the values above,
	 * they are continously updated during task execution. Note that
	 * the remaining runtime could be < 0 in case we are in overrun.
	 */
	s64 runtime;		/* remaining runtime for this instance	*/
	u64 deadline;		/* absolute deadline for this instance	*/
	unsigned int flags;	/* specifying the scheduler behaviour	*/

	/*
	 * Some bool flags:
	 *
	 * @dl_throttled tells if we exhausted the runtime. If so, the
	 * task has to wait for a replenishment to be performed at the
	 * next firing of dl_timer.
	 *
	 * @dl_new tells if a new instance arrived. If so we must
	 * start executing it with full runtime and reset its absolute
	 * deadline;
1144 1145 1146 1147
	 *
	 * @dl_boosted tells if we are boosted due to DI. If so we are
	 * outside bandwidth enforcement mechanism (but only until we
	 * exit the critical section).
1148
	 */
1149
	int dl_throttled, dl_new, dl_boosted;
1150 1151 1152 1153 1154 1155 1156

	/*
	 * Bandwidth enforcement timer. Each -deadline task has its
	 * own bandwidth to be enforced, thus we need one timer per task.
	 */
	struct hrtimer dl_timer;
};
1157

1158 1159
struct rcu_node;

P
Peter Zijlstra 已提交
1160 1161 1162
enum perf_event_task_context {
	perf_invalid_context = -1,
	perf_hw_context = 0,
1163
	perf_sw_context,
P
Peter Zijlstra 已提交
1164 1165 1166
	perf_nr_task_contexts,
};

L
Linus Torvalds 已提交
1167 1168
struct task_struct {
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
R
Roman Zippel 已提交
1169
	void *stack;
L
Linus Torvalds 已提交
1170
	atomic_t usage;
1171 1172
	unsigned int flags;	/* per process flags, defined below */
	unsigned int ptrace;
L
Linus Torvalds 已提交
1173

1174
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
1175
	struct llist_node wake_entry;
P
Peter Zijlstra 已提交
1176
	int on_cpu;
1177 1178 1179
	struct task_struct *last_wakee;
	unsigned long wakee_flips;
	unsigned long wakee_flip_decay_ts;
1180 1181

	int wake_cpu;
1182
#endif
P
Peter Zijlstra 已提交
1183
	int on_rq;
1184

1185
	int prio, static_prio, normal_prio;
1186
	unsigned int rt_priority;
1187
	const struct sched_class *sched_class;
I
Ingo Molnar 已提交
1188
	struct sched_entity se;
P
Peter Zijlstra 已提交
1189
	struct sched_rt_entity rt;
P
Peter Zijlstra 已提交
1190 1191 1192
#ifdef CONFIG_CGROUP_SCHED
	struct task_group *sched_task_group;
#endif
1193
	struct sched_dl_entity dl;
L
Linus Torvalds 已提交
1194

1195 1196 1197 1198 1199
#ifdef CONFIG_PREEMPT_NOTIFIERS
	/* list of struct preempt_notifier: */
	struct hlist_head preempt_notifiers;
#endif

1200
#ifdef CONFIG_BLK_DEV_IO_TRACE
1201
	unsigned int btrace_seq;
1202
#endif
L
Linus Torvalds 已提交
1203

1204
	unsigned int policy;
1205
	int nr_cpus_allowed;
L
Linus Torvalds 已提交
1206 1207
	cpumask_t cpus_allowed;

P
Paul E. McKenney 已提交
1208
#ifdef CONFIG_PREEMPT_RCU
P
Paul E. McKenney 已提交
1209
	int rcu_read_lock_nesting;
1210 1211
	char rcu_read_unlock_special;
	struct list_head rcu_node_entry;
P
Paul E. McKenney 已提交
1212 1213 1214
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
	struct rcu_node *rcu_blocked_node;
1215
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1216 1217 1218
#ifdef CONFIG_RCU_BOOST
	struct rt_mutex *rcu_boost_mutex;
#endif /* #ifdef CONFIG_RCU_BOOST */
P
Paul E. McKenney 已提交
1219

1220
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
L
Linus Torvalds 已提交
1221 1222 1223 1224
	struct sched_info sched_info;
#endif

	struct list_head tasks;
1225
#ifdef CONFIG_SMP
1226
	struct plist_node pushable_tasks;
1227
	struct rb_node pushable_dl_tasks;
1228
#endif
L
Linus Torvalds 已提交
1229 1230

	struct mm_struct *mm, *active_mm;
1231 1232 1233
#ifdef CONFIG_COMPAT_BRK
	unsigned brk_randomized:1;
#endif
1234 1235 1236
#if defined(SPLIT_RSS_COUNTING)
	struct task_rss_stat	rss_stat;
#endif
L
Linus Torvalds 已提交
1237
/* task state */
1238
	int exit_state;
L
Linus Torvalds 已提交
1239 1240
	int exit_code, exit_signal;
	int pdeath_signal;  /*  The signal sent when the parent dies  */
1241
	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
1242 1243

	/* Used for emulating ABI behavior of previous Linux versions */
1244
	unsigned int personality;
1245

1246 1247
	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
				 * execve */
1248 1249
	unsigned in_iowait:1;

1250 1251
	/* task may not gain privileges */
	unsigned no_new_privs:1;
1252 1253 1254

	/* Revert to default priority/policy when forking */
	unsigned sched_reset_on_fork:1;
1255
	unsigned sched_contributes_to_load:1;
1256

L
Linus Torvalds 已提交
1257 1258
	pid_t pid;
	pid_t tgid;
1259

1260
#ifdef CONFIG_CC_STACKPROTECTOR
1261 1262
	/* Canary value for the -fstack-protector gcc feature */
	unsigned long stack_canary;
1263
#endif
1264
	/*
L
Linus Torvalds 已提交
1265
	 * pointers to (original) parent process, youngest child, younger sibling,
1266
	 * older sibling, respectively.  (p->father can be replaced with
R
Roland McGrath 已提交
1267
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
1268
	 */
1269 1270
	struct task_struct __rcu *real_parent; /* real parent process */
	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
L
Linus Torvalds 已提交
1271
	/*
R
Roland McGrath 已提交
1272
	 * children/sibling forms the list of my natural children
L
Linus Torvalds 已提交
1273 1274 1275 1276 1277
	 */
	struct list_head children;	/* list of my children */
	struct list_head sibling;	/* linkage in my parent's children list */
	struct task_struct *group_leader;	/* threadgroup leader */

R
Roland McGrath 已提交
1278 1279 1280 1281 1282 1283 1284 1285
	/*
	 * ptraced is the list of tasks this task is using ptrace on.
	 * This includes both natural children and PTRACE_ATTACH targets.
	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
	 */
	struct list_head ptraced;
	struct list_head ptrace_entry;

L
Linus Torvalds 已提交
1286
	/* PID/PID hash table linkage. */
1287
	struct pid_link pids[PIDTYPE_MAX];
O
Oleg Nesterov 已提交
1288
	struct list_head thread_group;
1289
	struct list_head thread_node;
L
Linus Torvalds 已提交
1290 1291 1292 1293 1294

	struct completion *vfork_done;		/* for vfork() */
	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */

1295
	cputime_t utime, stime, utimescaled, stimescaled;
1296
	cputime_t gtime;
1297
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1298
	struct cputime prev_cputime;
1299 1300 1301 1302 1303 1304 1305 1306 1307
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
	seqlock_t vtime_seqlock;
	unsigned long long vtime_snap;
	enum {
		VTIME_SLEEPING = 0,
		VTIME_USER,
		VTIME_SYS,
	} vtime_snap_whence;
1308
#endif
L
Linus Torvalds 已提交
1309
	unsigned long nvcsw, nivcsw; /* context switch counts */
1310 1311
	struct timespec start_time; 		/* monotonic time */
	struct timespec real_start_time;	/* boot based time */
L
Linus Torvalds 已提交
1312 1313 1314
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
	unsigned long min_flt, maj_flt;

1315
	struct task_cputime cputime_expires;
L
Linus Torvalds 已提交
1316 1317 1318
	struct list_head cpu_timers[3];

/* process credentials */
A
Arnd Bergmann 已提交
1319
	const struct cred __rcu *real_cred; /* objective and real subjective task
1320
					 * credentials (COW) */
A
Arnd Bergmann 已提交
1321
	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1322
					 * credentials (COW) */
1323 1324 1325
	char comm[TASK_COMM_LEN]; /* executable name excluding path
				     - access with [gs]et_task_comm (which lock
				       it with task_lock())
1326
				     - initialized normally by setup_new_exec */
L
Linus Torvalds 已提交
1327 1328
/* file system info */
	int link_count, total_link_count;
1329
#ifdef CONFIG_SYSVIPC
L
Linus Torvalds 已提交
1330 1331
/* ipc stuff */
	struct sysv_sem sysvsem;
1332
#endif
1333
#ifdef CONFIG_DETECT_HUNG_TASK
1334 1335 1336
/* hung task detection */
	unsigned long last_switch_count;
#endif
L
Linus Torvalds 已提交
1337 1338 1339 1340 1341 1342
/* CPU-specific state of this task */
	struct thread_struct thread;
/* filesystem information */
	struct fs_struct *fs;
/* open file information */
	struct files_struct *files;
1343
/* namespaces */
S
Serge E. Hallyn 已提交
1344
	struct nsproxy *nsproxy;
L
Linus Torvalds 已提交
1345 1346 1347 1348 1349
/* signal handlers */
	struct signal_struct *signal;
	struct sighand_struct *sighand;

	sigset_t blocked, real_blocked;
1350
	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
L
Linus Torvalds 已提交
1351 1352 1353 1354 1355 1356 1357
	struct sigpending pending;

	unsigned long sas_ss_sp;
	size_t sas_ss_size;
	int (*notifier)(void *priv);
	void *notifier_data;
	sigset_t *notifier_mask;
1358
	struct callback_head *task_works;
1359

L
Linus Torvalds 已提交
1360
	struct audit_context *audit_context;
A
Al Viro 已提交
1361
#ifdef CONFIG_AUDITSYSCALL
1362
	kuid_t loginuid;
1363
	unsigned int sessionid;
A
Al Viro 已提交
1364
#endif
1365
	struct seccomp seccomp;
L
Linus Torvalds 已提交
1366 1367 1368 1369

/* Thread group tracking */
   	u32 parent_exec_id;
   	u32 self_exec_id;
1370 1371
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
 * mempolicy */
L
Linus Torvalds 已提交
1372 1373
	spinlock_t alloc_lock;

1374
	/* Protection of the PI data structures: */
1375
	raw_spinlock_t pi_lock;
1376

I
Ingo Molnar 已提交
1377 1378
#ifdef CONFIG_RT_MUTEXES
	/* PI waiters blocked on a rt_mutex held by this task */
1379 1380
	struct rb_root pi_waiters;
	struct rb_node *pi_waiters_leftmost;
I
Ingo Molnar 已提交
1381 1382
	/* Deadlock detection and priority inheritance handling */
	struct rt_mutex_waiter *pi_blocked_on;
1383 1384
	/* Top pi_waiters task */
	struct task_struct *pi_top_task;
I
Ingo Molnar 已提交
1385 1386
#endif

1387 1388 1389 1390
#ifdef CONFIG_DEBUG_MUTEXES
	/* mutex deadlock detection */
	struct mutex_waiter *blocked_on;
#endif
1391 1392 1393 1394
#ifdef CONFIG_TRACE_IRQFLAGS
	unsigned int irq_events;
	unsigned long hardirq_enable_ip;
	unsigned long hardirq_disable_ip;
1395
	unsigned int hardirq_enable_event;
1396
	unsigned int hardirq_disable_event;
1397 1398
	int hardirqs_enabled;
	int hardirq_context;
1399 1400
	unsigned long softirq_disable_ip;
	unsigned long softirq_enable_ip;
1401
	unsigned int softirq_disable_event;
1402
	unsigned int softirq_enable_event;
1403
	int softirqs_enabled;
1404 1405
	int softirq_context;
#endif
I
Ingo Molnar 已提交
1406
#ifdef CONFIG_LOCKDEP
1407
# define MAX_LOCK_DEPTH 48UL
I
Ingo Molnar 已提交
1408 1409 1410
	u64 curr_chain_key;
	int lockdep_depth;
	unsigned int lockdep_recursion;
1411
	struct held_lock held_locks[MAX_LOCK_DEPTH];
1412
	gfp_t lockdep_reclaim_gfp;
I
Ingo Molnar 已提交
1413
#endif
1414

L
Linus Torvalds 已提交
1415 1416 1417
/* journalling filesystem info */
	void *journal_info;

1418
/* stacked block device info */
1419
	struct bio_list *bio_list;
1420

1421 1422 1423 1424 1425
#ifdef CONFIG_BLOCK
/* stack plugging */
	struct blk_plug *plug;
#endif

L
Linus Torvalds 已提交
1426 1427 1428 1429 1430 1431 1432 1433 1434
/* VM state */
	struct reclaim_state *reclaim_state;

	struct backing_dev_info *backing_dev_info;

	struct io_context *io_context;

	unsigned long ptrace_message;
	siginfo_t *last_siginfo; /* For ptrace use.  */
1435
	struct task_io_accounting ioac;
1436
#if defined(CONFIG_TASK_XACCT)
L
Linus Torvalds 已提交
1437 1438
	u64 acct_rss_mem1;	/* accumulated rss usage */
	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1439
	cputime_t acct_timexpd;	/* stime + utime since last update */
L
Linus Torvalds 已提交
1440 1441
#endif
#ifdef CONFIG_CPUSETS
1442
	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1443
	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1444
	int cpuset_mem_spread_rotor;
1445
	int cpuset_slab_spread_rotor;
L
Linus Torvalds 已提交
1446
#endif
1447
#ifdef CONFIG_CGROUPS
1448
	/* Control Group info protected by css_set_lock */
A
Arnd Bergmann 已提交
1449
	struct css_set __rcu *cgroups;
1450 1451
	/* cg_list protected by css_set_lock and tsk->alloc_lock */
	struct list_head cg_list;
1452
#endif
1453
#ifdef CONFIG_FUTEX
1454
	struct robust_list_head __user *robust_list;
1455 1456 1457
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
1458 1459
	struct list_head pi_state_list;
	struct futex_pi_state *pi_state_cache;
1460
#endif
1461
#ifdef CONFIG_PERF_EVENTS
P
Peter Zijlstra 已提交
1462
	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1463 1464
	struct mutex perf_event_mutex;
	struct list_head perf_event_list;
1465
#endif
1466
#ifdef CONFIG_NUMA
1467
	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1468
	short il_next;
1469
	short pref_node_fork;
1470
#endif
1471 1472 1473
#ifdef CONFIG_NUMA_BALANCING
	int numa_scan_seq;
	unsigned int numa_scan_period;
1474
	unsigned int numa_scan_period_max;
1475
	int numa_preferred_nid;
1476
	unsigned long numa_migrate_retry;
1477
	u64 node_stamp;			/* migration stamp  */
1478 1479
	u64 last_task_numa_placement;
	u64 last_sum_exec_runtime;
1480
	struct callback_head numa_work;
1481

1482 1483 1484
	struct list_head numa_entry;
	struct numa_group *numa_group;

1485 1486 1487 1488 1489
	/*
	 * Exponential decaying average of faults on a per-node basis.
	 * Scheduling placement decisions are made based on the these counts.
	 * The values remain static for the duration of a PTE scan
	 */
1490
	unsigned long *numa_faults_memory;
1491
	unsigned long total_numa_faults;
1492 1493 1494

	/*
	 * numa_faults_buffer records faults per node during the current
1495 1496
	 * scan window. When the scan completes, the counts in
	 * numa_faults_memory decay and these values are copied.
1497
	 */
1498
	unsigned long *numa_faults_buffer_memory;
1499

1500 1501 1502 1503 1504 1505 1506
	/*
	 * Track the nodes the process was running on when a NUMA hinting
	 * fault was incurred.
	 */
	unsigned long *numa_faults_cpu;
	unsigned long *numa_faults_buffer_cpu;

1507 1508 1509 1510 1511 1512 1513 1514
	/*
	 * numa_faults_locality tracks if faults recorded during the last
	 * scan window were remote/local. The task scan period is adapted
	 * based on the locality of the faults with different weights
	 * depending on whether they were shared or private faults
	 */
	unsigned long numa_faults_locality[2];

I
Ingo Molnar 已提交
1515
	unsigned long numa_pages_migrated;
1516 1517
#endif /* CONFIG_NUMA_BALANCING */

I
Ingo Molnar 已提交
1518
	struct rcu_head rcu;
1519 1520 1521 1522 1523

	/*
	 * cache last used pipe for splice
	 */
	struct pipe_inode_info *splice_pipe;
1524 1525 1526

	struct page_frag task_frag;

1527 1528
#ifdef	CONFIG_TASK_DELAY_ACCT
	struct task_delay_info *delays;
1529 1530 1531
#endif
#ifdef CONFIG_FAULT_INJECTION
	int make_it_fail;
1532
#endif
1533 1534 1535 1536 1537 1538
	/*
	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
	 * balance_dirty_pages() for some dirty throttling pause
	 */
	int nr_dirtied;
	int nr_dirtied_pause;
1539
	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1540

A
Arjan van de Ven 已提交
1541 1542 1543 1544
#ifdef CONFIG_LATENCYTOP
	int latency_record_count;
	struct latency_record latency_record[LT_SAVECOUNT];
#endif
1545 1546 1547 1548 1549 1550
	/*
	 * time slack values; these are used to round up poll() and
	 * select() etc timeout values. These are in nanoseconds.
	 */
	unsigned long timer_slack_ns;
	unsigned long default_timer_slack_ns;
1551

1552
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
D
Daniel Mack 已提交
1553
	/* Index of current stored address in ret_stack */
1554 1555 1556
	int curr_ret_stack;
	/* Stack of return addresses for return function tracing */
	struct ftrace_ret_stack	*ret_stack;
1557 1558
	/* time stamp for last schedule */
	unsigned long long ftrace_timestamp;
1559 1560 1561 1562 1563
	/*
	 * Number of functions that haven't been traced
	 * because of depth overrun.
	 */
	atomic_t trace_overrun;
1564 1565
	/* Pause for the tracing */
	atomic_t tracing_graph_pause;
1566
#endif
1567 1568 1569
#ifdef CONFIG_TRACING
	/* state flags for use by tracers */
	unsigned long trace;
1570
	/* bitmask and counter of trace recursion */
1571 1572
	unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
A
Andrew Morton 已提交
1573
#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1574 1575 1576
	struct memcg_batch_info {
		int do_batch;	/* incremented when batch uncharge started */
		struct mem_cgroup *memcg; /* target memcg of uncharge */
1577 1578
		unsigned long nr_pages;	/* uncharged usage */
		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1579
	} memcg_batch;
1580
	unsigned int memcg_kmem_skip_account;
1581
	struct memcg_oom_info {
1582 1583 1584
		struct mem_cgroup *memcg;
		gfp_t gfp_mask;
		int order;
1585 1586
		unsigned int may_oom:1;
	} memcg_oom;
1587
#endif
1588 1589 1590
#ifdef CONFIG_UPROBES
	struct uprobe_task *utask;
#endif
K
Kent Overstreet 已提交
1591 1592 1593 1594
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
	unsigned int	sequential_io;
	unsigned int	sequential_io_avg;
#endif
L
Linus Torvalds 已提交
1595 1596
};

1597
/* Future-safe accessor for struct task_struct's cpus_allowed. */
1598
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1599

1600 1601
#define TNF_MIGRATED	0x01
#define TNF_NO_GROUP	0x02
1602
#define TNF_SHARED	0x04
1603
#define TNF_FAULT_LOCAL	0x08
1604

1605
#ifdef CONFIG_NUMA_BALANCING
1606
extern void task_numa_fault(int last_node, int node, int pages, int flags);
1607
extern pid_t task_numa_group_id(struct task_struct *p);
1608
extern void set_numabalancing_state(bool enabled);
1609
extern void task_numa_free(struct task_struct *p);
1610 1611
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
					int src_nid, int dst_cpu);
1612
#else
1613
static inline void task_numa_fault(int last_node, int node, int pages,
1614
				   int flags)
1615 1616
{
}
1617 1618 1619 1620
static inline pid_t task_numa_group_id(struct task_struct *p)
{
	return 0;
}
1621 1622 1623
static inline void set_numabalancing_state(bool enabled)
{
}
1624 1625 1626
static inline void task_numa_free(struct task_struct *p)
{
}
1627 1628 1629 1630 1631
static inline bool should_numa_migrate_memory(struct task_struct *p,
				struct page *page, int src_nid, int dst_cpu)
{
	return true;
}
1632 1633
#endif

A
Alexey Dobriyan 已提交
1634
static inline struct pid *task_pid(struct task_struct *task)
1635 1636 1637 1638
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1639
static inline struct pid *task_tgid(struct task_struct *task)
1640 1641 1642 1643
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

1644 1645 1646 1647 1648
/*
 * Without tasklist or rcu lock it is not safe to dereference
 * the result of task_pgrp/task_session even if task == current,
 * we can race with another thread doing sys_setsid/sys_setpgid.
 */
A
Alexey Dobriyan 已提交
1649
static inline struct pid *task_pgrp(struct task_struct *task)
1650 1651 1652 1653
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
1654
static inline struct pid *task_session(struct task_struct *task)
1655 1656 1657 1658
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

1659 1660 1661 1662 1663 1664 1665
struct pid_namespace;

/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
1666 1667
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
1668 1669 1670 1671 1672 1673
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * set_task_vxid()   : assigns a virtual id to a task;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
1674 1675
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
			struct pid_namespace *ns);
1676

A
Alexey Dobriyan 已提交
1677
static inline pid_t task_pid_nr(struct task_struct *tsk)
1678 1679 1680 1681
{
	return tsk->pid;
}

1682 1683 1684 1685 1686
static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
1687 1688 1689

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
1690
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1691 1692 1693
}


A
Alexey Dobriyan 已提交
1694
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1695 1696 1697 1698
{
	return tsk->tgid;
}

1699
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1700 1701 1702 1703 1704 1705 1706

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_tgid(tsk));
}


1707 1708
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1709
{
1710
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1711 1712 1713 1714
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
1715
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1716 1717 1718
}


1719 1720
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1721
{
1722
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1723 1724 1725 1726
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
1727
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1728 1729
}

1730 1731 1732 1733 1734
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
1735

L
Linus Torvalds 已提交
1736 1737 1738 1739 1740 1741 1742
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
1743 1744
 *
 * Return: 1 if the process is alive. 0 otherwise.
L
Linus Torvalds 已提交
1745
 */
A
Alexey Dobriyan 已提交
1746
static inline int pid_alive(struct task_struct *p)
L
Linus Torvalds 已提交
1747
{
1748
	return p->pids[PIDTYPE_PID].pid != NULL;
L
Linus Torvalds 已提交
1749 1750
}

1751
/**
1752
 * is_global_init - check if a task structure is init
1753 1754 1755
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1756 1757
 *
 * Return: 1 if the task structure is init. 0 otherwise.
1758
 */
A
Alexey Dobriyan 已提交
1759
static inline int is_global_init(struct task_struct *tsk)
1760 1761 1762
{
	return tsk->pid == 1;
}
1763

1764 1765
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1766 1767
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
I
Ingo Molnar 已提交
1768

1769
extern void __put_task_struct(struct task_struct *t);
I
Ingo Molnar 已提交
1770 1771 1772 1773

static inline void put_task_struct(struct task_struct *t)
{
	if (atomic_dec_and_test(&t->usage))
1774
		__put_task_struct(t);
I
Ingo Molnar 已提交
1775
}
L
Linus Torvalds 已提交
1776

1777 1778 1779 1780 1781 1782 1783
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
			 cputime_t *utime, cputime_t *stime);
extern void task_cputime_scaled(struct task_struct *t,
				cputime_t *utimescaled, cputime_t *stimescaled);
extern cputime_t task_gtime(struct task_struct *t);
#else
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
static inline void task_cputime(struct task_struct *t,
				cputime_t *utime, cputime_t *stime)
{
	if (utime)
		*utime = t->utime;
	if (stime)
		*stime = t->stime;
}

static inline void task_cputime_scaled(struct task_struct *t,
				       cputime_t *utimescaled,
				       cputime_t *stimescaled)
{
	if (utimescaled)
		*utimescaled = t->utimescaled;
	if (stimescaled)
		*stimescaled = t->stimescaled;
}
1802 1803 1804 1805 1806 1807

static inline cputime_t task_gtime(struct task_struct *t)
{
	return t->gtime;
}
#endif
1808 1809
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1810

L
Linus Torvalds 已提交
1811 1812 1813 1814
/*
 * Per process flags
 */
#define PF_EXITING	0x00000004	/* getting shut down */
1815
#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1816
#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
T
Tejun Heo 已提交
1817
#define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
L
Linus Torvalds 已提交
1818
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1819
#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
L
Linus Torvalds 已提交
1820 1821 1822 1823
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
#define PF_DUMPCORE	0x00000200	/* dumped core */
#define PF_SIGNALED	0x00000400	/* killed by a signal */
#define PF_MEMALLOC	0x00000800	/* Allocating memory */
1824
#define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
L
Linus Torvalds 已提交
1825
#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1826
#define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
L
Linus Torvalds 已提交
1827 1828 1829 1830
#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
#define PF_FROZEN	0x00010000	/* frozen for system suspend */
#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
#define PF_KSWAPD	0x00040000	/* I am kswapd */
1831
#define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
L
Linus Torvalds 已提交
1832
#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1833
#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
J
Jens Axboe 已提交
1834 1835 1836 1837
#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1838
#define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
1839
#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1840
#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1841
#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1842
#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1843
#define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
L
Linus Torvalds 已提交
1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) \
	conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)

1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
static inline gfp_t memalloc_noio_flags(gfp_t flags)
{
	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
		flags &= ~__GFP_IO;
	return flags;
}

static inline unsigned int memalloc_noio_save(void)
{
	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
	current->flags |= PF_MEMALLOC_NOIO;
	return flags;
}

static inline void memalloc_noio_restore(unsigned int flags)
{
	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}

1890
/*
1891
 * task->jobctl flags
1892
 */
1893
#define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1894

1895 1896 1897
#define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
#define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
#define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
1898
#define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1899
#define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1900
#define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
T
Tejun Heo 已提交
1901
#define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1902 1903 1904 1905

#define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
#define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
1906
#define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1907
#define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1908
#define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
T
Tejun Heo 已提交
1909
#define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1910

1911
#define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1912
#define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1913

1914 1915
extern bool task_set_jobctl_pending(struct task_struct *task,
				    unsigned int mask);
1916
extern void task_clear_jobctl_trapping(struct task_struct *task);
1917 1918
extern void task_clear_jobctl_pending(struct task_struct *task,
				      unsigned int mask);
1919

P
Paul E. McKenney 已提交
1920
#ifdef CONFIG_PREEMPT_RCU
1921 1922

#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1923
#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1924 1925 1926 1927 1928

static inline void rcu_copy_process(struct task_struct *p)
{
	p->rcu_read_lock_nesting = 0;
	p->rcu_read_unlock_special = 0;
P
Paul E. McKenney 已提交
1929
#ifdef CONFIG_TREE_PREEMPT_RCU
1930
	p->rcu_blocked_node = NULL;
1931 1932 1933 1934
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST
	p->rcu_boost_mutex = NULL;
#endif /* #ifdef CONFIG_RCU_BOOST */
1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
	INIT_LIST_HEAD(&p->rcu_node_entry);
}

#else

static inline void rcu_copy_process(struct task_struct *p)
{
}

#endif

1946 1947 1948 1949 1950 1951 1952
static inline void tsk_restore_flags(struct task_struct *task,
				unsigned long orig_flags, unsigned long flags)
{
	task->flags &= ~flags;
	task->flags |= orig_flags & flags;
}

L
Linus Torvalds 已提交
1953
#ifdef CONFIG_SMP
1954 1955 1956
extern void do_set_cpus_allowed(struct task_struct *p,
			       const struct cpumask *new_mask);

1957
extern int set_cpus_allowed_ptr(struct task_struct *p,
1958
				const struct cpumask *new_mask);
L
Linus Torvalds 已提交
1959
#else
1960 1961 1962 1963
static inline void do_set_cpus_allowed(struct task_struct *p,
				      const struct cpumask *new_mask)
{
}
1964
static inline int set_cpus_allowed_ptr(struct task_struct *p,
1965
				       const struct cpumask *new_mask)
L
Linus Torvalds 已提交
1966
{
1967
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
1968 1969 1970 1971
		return -EINVAL;
	return 0;
}
#endif
1972

1973
#ifdef CONFIG_NO_HZ_COMMON
1974 1975 1976 1977 1978
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
#else
static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
1979
#endif /* CONFIG_NO_HZ_COMMON */
1980

1981
#ifndef CONFIG_CPUMASK_OFFSTACK
1982 1983 1984 1985
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
	return set_cpus_allowed_ptr(p, &new_mask);
}
1986
#endif
L
Linus Torvalds 已提交
1987

1988
/*
1989 1990 1991 1992 1993 1994
 * Do not use outside of architecture code which knows its limitations.
 *
 * sched_clock() has no promise of monotonicity or bounded drift between
 * CPUs, use (which you should not) requires disabling IRQs.
 *
 * Please use one of the three interfaces below.
1995
 */
1996
extern unsigned long long notrace sched_clock(void);
1997
/*
1998
 * See the comment in kernel/sched/clock.c
1999 2000 2001 2002 2003
 */
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);
extern u64 sched_clock_cpu(int cpu);

2004

2005
extern void sched_clock_init(void);
2006

2007
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
static inline void sched_clock_tick(void)
{
}

static inline void sched_clock_idle_sleep_event(void)
{
}

static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
#else
2020 2021 2022 2023 2024 2025
/*
 * Architectures can set this to 1 if they have specified
 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
 * but then during bootup it turns out that sched_clock()
 * is reliable after all:
 */
2026 2027 2028
extern int sched_clock_stable(void);
extern void set_sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
2029

2030 2031 2032 2033 2034
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif

2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
 * The reason for this explicit opt-in is not to have perf penalty with
 * slow sched_clocks.
 */
extern void enable_sched_clock_irqtime(void);
extern void disable_sched_clock_irqtime(void);
#else
static inline void enable_sched_clock_irqtime(void) {}
static inline void disable_sched_clock_irqtime(void) {}
#endif

2048
extern unsigned long long
2049
task_sched_runtime(struct task_struct *task);
L
Linus Torvalds 已提交
2050 2051 2052 2053 2054 2055 2056 2057

/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec()   {}
#endif

2058 2059
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2060

L
Linus Torvalds 已提交
2061 2062 2063 2064 2065 2066
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
#endif

2067
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2068
extern void wake_up_nohz_cpu(int cpu);
2069
#else
2070
static inline void wake_up_nohz_cpu(int cpu) { }
2071 2072
#endif

2073 2074
#ifdef CONFIG_NO_HZ_FULL
extern bool sched_can_stop_tick(void);
2075
extern u64 scheduler_tick_max_deferment(void);
2076 2077
#else
static inline bool sched_can_stop_tick(void) { return false; }
2078 2079
#endif

2080 2081 2082 2083 2084 2085 2086
#ifdef CONFIG_SCHED_AUTOGROUP
extern void sched_autogroup_create_attach(struct task_struct *p);
extern void sched_autogroup_detach(struct task_struct *p);
extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2087
extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2088 2089 2090 2091 2092 2093 2094 2095
#endif
#else
static inline void sched_autogroup_create_attach(struct task_struct *p) { }
static inline void sched_autogroup_detach(struct task_struct *p) { }
static inline void sched_autogroup_fork(struct signal_struct *sig) { }
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
#endif

2096
extern bool yield_to(struct task_struct *p, bool preempt);
2097 2098
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
/**
 * task_nice - return the nice value of a given task.
 * @p: the task in question.
 *
 * Return: The nice value [ -20 ... 0 ... 19 ].
 */
static inline int task_nice(const struct task_struct *p)
{
	return PRIO_TO_NICE((p)->static_prio);
}
2109 2110
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
2111
extern int idle_cpu(int cpu);
2112 2113
extern int sched_setscheduler(struct task_struct *, int,
			      const struct sched_param *);
2114
extern int sched_setscheduler_nocheck(struct task_struct *, int,
2115
				      const struct sched_param *);
2116 2117
extern int sched_setattr(struct task_struct *,
			 const struct sched_attr *);
2118
extern struct task_struct *idle_task(int cpu);
2119 2120
/**
 * is_idle_task - is the specified task an idle task?
2121
 * @p: the task in question.
2122 2123
 *
 * Return: 1 if @p is an idle task. 0 otherwise.
2124
 */
2125
static inline bool is_idle_task(const struct task_struct *p)
2126 2127 2128
{
	return p->pid == 0;
}
2129 2130
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158

void yield(void);

/*
 * The default (Linux) execution domain.
 */
extern struct exec_domain	default_exec_domain;

union thread_union {
	struct thread_info thread_info;
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
	/* Reliable end of stack detection:
	 * Some APM bios versions misalign the stack
	 */
	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif

extern union thread_union init_thread_union;
extern struct task_struct init_task;

extern struct   mm_struct init_mm;

2159 2160 2161 2162 2163 2164 2165
extern struct pid_namespace init_pid_ns;

/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
2166 2167
 * find_task_by_vpid():
 *      finds a task by its virtual pid
2168
 *
2169
 * see also find_vpid() etc in include/linux/pid.h
2170 2171
 */

2172 2173 2174
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
		struct pid_namespace *ns);
2175

L
Linus Torvalds 已提交
2176
/* per-UID process charging. */
2177
extern struct user_struct * alloc_uid(kuid_t);
L
Linus Torvalds 已提交
2178 2179 2180 2181 2182 2183 2184 2185 2186
static inline struct user_struct *get_uid(struct user_struct *u)
{
	atomic_inc(&u->__count);
	return u;
}
extern void free_uid(struct user_struct *);

#include <asm/current.h>

T
Torben Hohn 已提交
2187
extern void xtime_update(unsigned long ticks);
L
Linus Torvalds 已提交
2188

2189 2190
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
2191
extern void wake_up_new_task(struct task_struct *tsk);
L
Linus Torvalds 已提交
2192 2193 2194 2195 2196
#ifdef CONFIG_SMP
 extern void kick_process(struct task_struct *tsk);
#else
 static inline void kick_process(struct task_struct *tsk) { }
#endif
2197
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2198
extern void sched_dead(struct task_struct *p);
L
Linus Torvalds 已提交
2199 2200 2201

extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
2202
extern void __flush_signals(struct task_struct *);
2203
extern void ignore_signals(struct task_struct *);
L
Linus Torvalds 已提交
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);

static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&tsk->sighand->siglock, flags);
	ret = dequeue_signal(tsk, mask, info);
	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);

	return ret;
2217
}
L
Linus Torvalds 已提交
2218 2219 2220 2221 2222 2223 2224 2225

extern void block_all_signals(int (*notifier)(void *priv), void *priv,
			      sigset_t *mask);
extern void unblock_all_signals(void);
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2226 2227
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2228 2229
extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
				const struct cred *, u32);
2230 2231
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
2232
extern int kill_proc_info(int, struct siginfo *, pid_t);
2233
extern __must_check bool do_notify_parent(struct task_struct *, int);
2234
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
L
Linus Torvalds 已提交
2235 2236
extern void force_sig(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
2237
extern int zap_other_threads(struct task_struct *p);
L
Linus Torvalds 已提交
2238 2239
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
2240
extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2241
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
L
Linus Torvalds 已提交
2242

A
Al Viro 已提交
2243 2244 2245
static inline void restore_saved_sigmask(void)
{
	if (test_and_clear_restore_sigmask())
2246
		__set_current_blocked(&current->saved_sigmask);
A
Al Viro 已提交
2247 2248
}

A
Al Viro 已提交
2249 2250 2251 2252 2253 2254 2255 2256
static inline sigset_t *sigmask_to_save(void)
{
	sigset_t *res = &current->blocked;
	if (unlikely(test_restore_sigmask()))
		res = &current->saved_sigmask;
	return res;
}

2257 2258 2259 2260 2261
static inline int kill_cad_pid(int sig, int priv)
{
	return kill_pid(cad_pid, sig, priv);
}

L
Linus Torvalds 已提交
2262 2263 2264 2265 2266
/* These can be the second arg to send_sig_info/send_group_sig_info.  */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV	((struct siginfo *) 1)
#define SEND_SIG_FORCED	((struct siginfo *) 2)

2267 2268 2269
/*
 * True if we are on the alternate signal stack.
 */
L
Linus Torvalds 已提交
2270 2271
static inline int on_sig_stack(unsigned long sp)
{
2272 2273 2274 2275 2276 2277 2278
#ifdef CONFIG_STACK_GROWSUP
	return sp >= current->sas_ss_sp &&
		sp - current->sas_ss_sp < current->sas_ss_size;
#else
	return sp > current->sas_ss_sp &&
		sp - current->sas_ss_sp <= current->sas_ss_size;
#endif
L
Linus Torvalds 已提交
2279 2280 2281 2282 2283 2284 2285 2286
}

static inline int sas_ss_flags(unsigned long sp)
{
	return (current->sas_ss_size == 0 ? SS_DISABLE
		: on_sig_stack(sp) ? SS_ONSTACK : 0);
}

A
Al Viro 已提交
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
{
	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
#ifdef CONFIG_STACK_GROWSUP
		return current->sas_ss_sp;
#else
		return current->sas_ss_sp + current->sas_ss_size;
#endif
	return sp;
}

L
Linus Torvalds 已提交
2298 2299 2300 2301 2302 2303
/*
 * Routines for handling mm_structs
 */
extern struct mm_struct * mm_alloc(void);

/* mmdrop drops the mm and the page tables */
2304
extern void __mmdrop(struct mm_struct *);
L
Linus Torvalds 已提交
2305 2306
static inline void mmdrop(struct mm_struct * mm)
{
I
Ingo Molnar 已提交
2307
	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
L
Linus Torvalds 已提交
2308 2309 2310 2311 2312 2313 2314
		__mmdrop(mm);
}

/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
2315 2316 2317 2318 2319 2320
/*
 * Grab a reference to a task's mm, if it is not already going away
 * and ptrace_may_access with the mode parameter passed to it
 * succeeds.
 */
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
L
Linus Torvalds 已提交
2321 2322 2323
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);

A
Alexey Dobriyan 已提交
2324
extern int copy_thread(unsigned long, unsigned long, unsigned long,
2325
			struct task_struct *);
L
Linus Torvalds 已提交
2326 2327 2328 2329
extern void flush_thread(void);
extern void exit_thread(void);

extern void exit_files(struct task_struct *);
2330
extern void __cleanup_sighand(struct sighand_struct *);
2331

L
Linus Torvalds 已提交
2332
extern void exit_itimers(struct signal_struct *);
2333
extern void flush_itimer_signals(void);
L
Linus Torvalds 已提交
2334

2335
extern void do_group_exit(int);
L
Linus Torvalds 已提交
2336 2337 2338 2339

extern int allow_signal(int);
extern int disallow_signal(int);

2340
extern int do_execve(struct filename *,
2341
		     const char __user * const __user *,
2342
		     const char __user * const __user *);
2343
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2344
struct task_struct *fork_idle(int);
2345
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
L
Linus Torvalds 已提交
2346 2347

extern void set_task_comm(struct task_struct *tsk, char *from);
2348
extern char *get_task_comm(char *to, struct task_struct *tsk);
L
Linus Torvalds 已提交
2349 2350

#ifdef CONFIG_SMP
2351
void scheduler_ipi(void);
R
Roland McGrath 已提交
2352
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
2353
#else
2354
static inline void scheduler_ipi(void) { }
R
Roland McGrath 已提交
2355 2356 2357 2358 2359
static inline unsigned long wait_task_inactive(struct task_struct *p,
					       long match_state)
{
	return 1;
}
L
Linus Torvalds 已提交
2360 2361
#endif

2362 2363
#define next_task(p) \
	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
L
Linus Torvalds 已提交
2364 2365 2366 2367

#define for_each_process(p) \
	for (p = &init_task ; (p = next_task(p)) != &init_task ; )

2368
extern bool current_is_single_threaded(void);
D
David Howells 已提交
2369

L
Linus Torvalds 已提交
2370 2371 2372 2373 2374 2375 2376 2377 2378 2379
/*
 * Careful: do_each_thread/while_each_thread is a double loop so
 *          'break' will not work as expected - use goto instead.
 */
#define do_each_thread(g, t) \
	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do

#define while_each_thread(g, t) \
	while ((t = next_thread(t)) != g)

2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
#define __for_each_thread(signal, t)	\
	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)

#define for_each_thread(p, t)		\
	__for_each_thread((p)->signal, t)

/* Careful: this is a double loop, 'break' won't work as expected. */
#define for_each_process_thread(p, t)	\
	for_each_process(p) for_each_thread(p, t)

2390 2391
static inline int get_nr_threads(struct task_struct *tsk)
{
2392
	return tsk->signal->nr_threads;
2393 2394
}

2395 2396 2397 2398
static inline bool thread_group_leader(struct task_struct *p)
{
	return p->exit_signal >= 0;
}
L
Linus Torvalds 已提交
2399

2400 2401 2402 2403 2404 2405
/* Do to the insanities of de_thread it is possible for a process
 * to have the pid of the thread group leader without actually being
 * the thread group leader.  For iteration through the pids in proc
 * all we care about is that we have a task with the appropriate
 * pid, we don't actually care if we have the right task.
 */
2406
static inline bool has_group_leader_pid(struct task_struct *p)
2407
{
2408
	return task_pid(p) == p->signal->leader_pid;
2409 2410
}

2411
static inline
2412
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2413
{
2414
	return p1->signal == p2->signal;
2415 2416
}

2417
static inline struct task_struct *next_thread(const struct task_struct *p)
O
Oleg Nesterov 已提交
2418
{
2419 2420
	return list_entry_rcu(p->thread_group.next,
			      struct task_struct, thread_group);
O
Oleg Nesterov 已提交
2421 2422
}

A
Alexey Dobriyan 已提交
2423
static inline int thread_group_empty(struct task_struct *p)
L
Linus Torvalds 已提交
2424
{
O
Oleg Nesterov 已提交
2425
	return list_empty(&p->thread_group);
L
Linus Torvalds 已提交
2426 2427 2428 2429 2430 2431
}

#define delay_group_leader(p) \
		(thread_group_leader(p) && !thread_group_empty(p))

/*
2432
 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2433
 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2434
 * pins the final release of task.io_context.  Also protects ->cpuset and
O
Oleg Nesterov 已提交
2435
 * ->cgroup.subsys[]. And ->vfork_done.
L
Linus Torvalds 已提交
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450
 *
 * Nests both inside and outside of read_lock(&tasklist_lock).
 * It must not be nested with write_lock_irq(&tasklist_lock),
 * neither inside nor outside.
 */
static inline void task_lock(struct task_struct *p)
{
	spin_lock(&p->alloc_lock);
}

static inline void task_unlock(struct task_struct *p)
{
	spin_unlock(&p->alloc_lock);
}

2451
extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2452 2453
							unsigned long *flags);

2454 2455 2456 2457 2458 2459 2460 2461 2462
static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
						       unsigned long *flags)
{
	struct sighand_struct *ret;

	ret = __lock_task_sighand(tsk, flags);
	(void)__cond_lock(&tsk->sighand->siglock, ret);
	return ret;
}
2463

2464 2465 2466 2467 2468 2469
static inline void unlock_task_sighand(struct task_struct *tsk,
						unsigned long *flags)
{
	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}

2470
#ifdef CONFIG_CGROUPS
2471
static inline void threadgroup_change_begin(struct task_struct *tsk)
2472
{
2473
	down_read(&tsk->signal->group_rwsem);
2474
}
2475
static inline void threadgroup_change_end(struct task_struct *tsk)
2476
{
2477
	up_read(&tsk->signal->group_rwsem);
2478
}
2479 2480 2481 2482 2483 2484 2485

/**
 * threadgroup_lock - lock threadgroup
 * @tsk: member task of the threadgroup to lock
 *
 * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2486 2487
 * change ->group_leader/pid.  This is useful for cases where the threadgroup
 * needs to stay stable across blockable operations.
2488 2489 2490 2491 2492
 *
 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
 * synchronization.  While held, no new task will be added to threadgroup
 * and no existing live task will have its PF_EXITING set.
 *
2493 2494
 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
 * sub-thread becomes a new leader.
2495
 */
2496
static inline void threadgroup_lock(struct task_struct *tsk)
2497
{
2498
	down_write(&tsk->signal->group_rwsem);
2499
}
2500 2501 2502 2503 2504 2505 2506

/**
 * threadgroup_unlock - unlock threadgroup
 * @tsk: member task of the threadgroup to unlock
 *
 * Reverse threadgroup_lock().
 */
2507
static inline void threadgroup_unlock(struct task_struct *tsk)
2508
{
2509
	up_write(&tsk->signal->group_rwsem);
2510 2511
}
#else
2512 2513 2514 2515
static inline void threadgroup_change_begin(struct task_struct *tsk) {}
static inline void threadgroup_change_end(struct task_struct *tsk) {}
static inline void threadgroup_lock(struct task_struct *tsk) {}
static inline void threadgroup_unlock(struct task_struct *tsk) {}
2516 2517
#endif

A
Al Viro 已提交
2518 2519
#ifndef __HAVE_THREAD_FUNCTIONS

R
Roman Zippel 已提交
2520 2521
#define task_thread_info(task)	((struct thread_info *)(task)->stack)
#define task_stack_page(task)	((task)->stack)
A
Al Viro 已提交
2522

2523 2524 2525 2526 2527 2528 2529 2530
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
	*task_thread_info(p) = *task_thread_info(org);
	task_thread_info(p)->task = p;
}

static inline unsigned long *end_of_stack(struct task_struct *p)
{
R
Roman Zippel 已提交
2531
	return (unsigned long *)(task_thread_info(p) + 1);
2532 2533
}

A
Al Viro 已提交
2534 2535
#endif

2536 2537 2538 2539 2540 2541 2542
static inline int object_is_on_stack(void *obj)
{
	void *stack = task_stack_page(current);

	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}

2543 2544
extern void thread_info_cache_init(void);

2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
{
	unsigned long *n = end_of_stack(p);

	do { 	/* Skip over canary */
		n++;
	} while (!*n);

	return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif

L
Linus Torvalds 已提交
2558 2559 2560 2561 2562
/* set thread flags in other task's structures
 * - see asm/thread_info.h for TIF_xxxx flags available
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2563
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2564 2565 2566 2567
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2568
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2569 2570 2571 2572
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2573
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2574 2575 2576 2577
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2578
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2579 2580 2581 2582
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
2583
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

2596 2597 2598 2599 2600
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

2601 2602 2603 2604 2605 2606
static inline int restart_syscall(void)
{
	set_tsk_thread_flag(current, TIF_SIGPENDING);
	return -ERESTARTNOINTR;
}

L
Linus Torvalds 已提交
2607 2608 2609 2610
static inline int signal_pending(struct task_struct *p)
{
	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
M
Matthew Wilcox 已提交
2611

2612 2613 2614 2615
static inline int __fatal_signal_pending(struct task_struct *p)
{
	return unlikely(sigismember(&p->pending.signal, SIGKILL));
}
M
Matthew Wilcox 已提交
2616 2617 2618 2619 2620 2621

static inline int fatal_signal_pending(struct task_struct *p)
{
	return signal_pending(p) && __fatal_signal_pending(p);
}

2622 2623 2624 2625 2626 2627 2628 2629 2630 2631
static inline int signal_pending_state(long state, struct task_struct *p)
{
	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
		return 0;
	if (!signal_pending(p))
		return 0;

	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}

L
Linus Torvalds 已提交
2632 2633 2634 2635 2636 2637 2638
/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
2639
extern int _cond_resched(void);
2640

2641 2642 2643 2644
#define cond_resched() ({			\
	__might_sleep(__FILE__, __LINE__, 0);	\
	_cond_resched();			\
})
2645

2646 2647
extern int __cond_resched_lock(spinlock_t *lock);

2648
#ifdef CONFIG_PREEMPT_COUNT
2649
#define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2650
#else
2651
#define PREEMPT_LOCK_OFFSET	0
2652
#endif
2653

2654
#define cond_resched_lock(lock) ({				\
2655
	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2656 2657 2658 2659 2660
	__cond_resched_lock(lock);				\
})

extern int __cond_resched_softirq(void);

2661 2662 2663
#define cond_resched_softirq() ({					\
	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
	__cond_resched_softirq();					\
2664
})
L
Linus Torvalds 已提交
2665

2666 2667 2668 2669 2670 2671 2672 2673 2674
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
	rcu_read_unlock();
	cond_resched();
	rcu_read_lock();
#endif
}

L
Linus Torvalds 已提交
2675 2676
/*
 * Does a critical section need to be broken due to another
N
Nick Piggin 已提交
2677 2678
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * but a general need for low latency)
L
Linus Torvalds 已提交
2679
 */
N
Nick Piggin 已提交
2680
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
2681
{
N
Nick Piggin 已提交
2682 2683 2684
#ifdef CONFIG_PREEMPT
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
2685
	return 0;
N
Nick Piggin 已提交
2686
#endif
L
Linus Torvalds 已提交
2687 2688
}

2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699
/*
 * Idle thread specific functions to determine the need_resched
 * polling state. We have two versions, one based on TS_POLLING in
 * thread_info.status and one based on TIF_POLLING_NRFLAG in
 * thread_info.flags
 */
#ifdef TS_POLLING
static inline int tsk_is_polling(struct task_struct *p)
{
	return task_thread_info(p)->status & TS_POLLING;
}
2700
static inline void __current_set_polling(void)
2701 2702 2703 2704
{
	current_thread_info()->status |= TS_POLLING;
}

2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
static inline bool __must_check current_set_polling_and_test(void)
{
	__current_set_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 */
	smp_mb();

	return unlikely(tif_need_resched());
}

static inline void __current_clr_polling(void)
2719 2720
{
	current_thread_info()->status &= ~TS_POLLING;
2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
}

static inline bool __must_check current_clr_polling_and_test(void)
{
	__current_clr_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 */
	smp_mb();

	return unlikely(tif_need_resched());
2734
}
2735 2736 2737 2738 2739
#elif defined(TIF_POLLING_NRFLAG)
static inline int tsk_is_polling(struct task_struct *p)
{
	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
}
2740 2741

static inline void __current_set_polling(void)
2742 2743 2744 2745
{
	set_thread_flag(TIF_POLLING_NRFLAG);
}

2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
static inline bool __must_check current_set_polling_and_test(void)
{
	__current_set_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 *
	 * XXX: assumes set/clear bit are identical barrier wise.
	 */
	smp_mb__after_clear_bit();

	return unlikely(tif_need_resched());
}

static inline void __current_clr_polling(void)
2762 2763 2764
{
	clear_thread_flag(TIF_POLLING_NRFLAG);
}
2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778

static inline bool __must_check current_clr_polling_and_test(void)
{
	__current_clr_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_task()
	 */
	smp_mb__after_clear_bit();

	return unlikely(tif_need_resched());
}

2779 2780
#else
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }

static inline bool __must_check current_set_polling_and_test(void)
{
	return unlikely(tif_need_resched());
}
static inline bool __must_check current_clr_polling_and_test(void)
{
	return unlikely(tif_need_resched());
}
2792 2793
#endif

2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
static inline void current_clr_polling(void)
{
	__current_clr_polling();

	/*
	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
	 * Once the bit is cleared, we'll get IPIs with every new
	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
	 * fold.
	 */
	smp_mb(); /* paired with resched_task() */

	preempt_fold_need_resched();
}

2809 2810 2811 2812 2813
static __always_inline bool need_resched(void)
{
	return unlikely(tif_need_resched());
}

2814 2815 2816
/*
 * Thread group CPU time accounting.
 */
2817
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2818
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2819

2820
static inline void thread_group_cputime_init(struct signal_struct *sig)
2821
{
2822
	raw_spin_lock_init(&sig->cputimer.lock);
2823 2824
}

R
Roland McGrath 已提交
2825 2826 2827 2828 2829 2830 2831
/*
 * Reevaluate whether the task has signals pending delivery.
 * Wake the task if so.
 * This is required every time the blocked sigset_t changes.
 * callers must hold sighand->siglock.
 */
extern void recalc_sigpending_and_wake(struct task_struct *t);
L
Linus Torvalds 已提交
2832 2833
extern void recalc_sigpending(void);

2834 2835 2836 2837 2838 2839 2840 2841 2842 2843
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);

static inline void signal_wake_up(struct task_struct *t, bool resume)
{
	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
L
Linus Torvalds 已提交
2844 2845 2846 2847 2848 2849 2850 2851

/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
A
Al Viro 已提交
2852
	return task_thread_info(p)->cpu;
L
Linus Torvalds 已提交
2853 2854
}

I
Ingo Molnar 已提交
2855 2856 2857 2858 2859
static inline int task_node(const struct task_struct *p)
{
	return cpu_to_node(task_cpu(p));
}

I
Ingo Molnar 已提交
2860
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

2875 2876
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2877

D
Dhaval Giani 已提交
2878
#ifdef CONFIG_CGROUP_SCHED
2879
extern struct task_group root_task_group;
P
Peter Zijlstra 已提交
2880
#endif /* CONFIG_CGROUP_SCHED */
2881

2882 2883 2884
extern int task_can_switch_user(struct user_struct *up,
					struct task_struct *tsk);

2885 2886 2887
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
2888
	tsk->ioac.rchar += amt;
2889 2890 2891 2892
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
2893
	tsk->ioac.wchar += amt;
2894 2895 2896 2897
}

static inline void inc_syscr(struct task_struct *tsk)
{
2898
	tsk->ioac.syscr++;
2899 2900 2901 2902
}

static inline void inc_syscw(struct task_struct *tsk)
{
2903
	tsk->ioac.syscw++;
2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void inc_syscr(struct task_struct *tsk)
{
}

static inline void inc_syscw(struct task_struct *tsk)
{
}
#endif

D
Dave Hansen 已提交
2923 2924 2925 2926
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
#ifdef CONFIG_MM_OWNER
extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
#else
static inline void mm_update_next_owner(struct mm_struct *mm)
{
}

static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
}
#endif /* CONFIG_MM_OWNER */

2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961
static inline unsigned long task_rlimit(const struct task_struct *tsk,
		unsigned int limit)
{
	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
}

static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
		unsigned int limit)
{
	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
}

static inline unsigned long rlimit(unsigned int limit)
{
	return task_rlimit(current, limit);
}

static inline unsigned long rlimit_max(unsigned int limit)
{
	return task_rlimit_max(current, limit);
}

L
Linus Torvalds 已提交
2962
#endif