sched.h 44.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4
#include <uapi/linux/sched.h>
5

6 7
#include <linux/sched/prio.h>

L
Linus Torvalds 已提交
8
#include <linux/capability.h>
9
#include <linux/mutex.h>
10
#include <linux/plist.h>
11
#include <linux/mm_types_task.h>
L
Linus Torvalds 已提交
12 13 14
#include <asm/ptrace.h>

#include <linux/sem.h>
15
#include <linux/shm.h>
L
Linus Torvalds 已提交
16
#include <linux/signal.h>
17
#include <linux/signal_types.h>
L
Linus Torvalds 已提交
18 19
#include <linux/pid.h>
#include <linux/seccomp.h>
20
#include <linux/rculist.h>
I
Ingo Molnar 已提交
21
#include <linux/rtmutex.h>
L
Linus Torvalds 已提交
22

23 24
#include <linux/resource.h>
#include <linux/hrtimer.h>
D
Dmitry Vyukov 已提交
25
#include <linux/kcov.h>
26
#include <linux/task_io_accounting.h>
A
Arjan van de Ven 已提交
27
#include <linux/latencytop.h>
28
#include <linux/cred.h>
29
#include <linux/gfp.h>
30
#include <linux/topology.h>
31
#include <linux/magic.h>
32
#include <linux/cgroup-defs.h>
33

34 35
#include <asm/current.h>

36 37 38 39
/* task_struct member predeclarations: */
struct audit_context;
struct autogroup;
struct backing_dev_info;
40
struct bio_list;
41
struct blk_plug;
42
struct cfs_rq;
43
struct filename;
44 45 46 47
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct mempolicy;
48
struct nameidata;
49 50 51 52 53 54 55 56 57
struct nsproxy;
struct perf_event_context;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
struct sched_attr;
struct sched_param;
I
Ingo Molnar 已提交
58
struct seq_file;
59 60 61
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
62
struct task_group;
63 64
struct task_struct;
struct uts_namespace;
L
Linus Torvalds 已提交
65

66 67 68 69 70 71 72 73 74 75
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
L
Linus Torvalds 已提交
76 77 78
#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
M
Matthew Wilcox 已提交
79 80
#define __TASK_STOPPED		4
#define __TASK_TRACED		8
81
/* in tsk->exit_state */
82 83
#define EXIT_DEAD		16
#define EXIT_ZOMBIE		32
84
#define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
85
/* in tsk->state again */
86
#define TASK_DEAD		64
M
Matthew Wilcox 已提交
87
#define TASK_WAKEKILL		128
P
Peter Zijlstra 已提交
88
#define TASK_WAKING		256
89
#define TASK_PARKED		512
90
#define TASK_NOLOAD		1024
91 92
#define TASK_NEW		2048
#define TASK_STATE_MAX		4096
M
Matthew Wilcox 已提交
93

94
#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
95

96
/* Convenience macros for the sake of set_current_state */
M
Matthew Wilcox 已提交
97 98 99
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
L
Linus Torvalds 已提交
100

101 102
#define TASK_IDLE		(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)

103 104
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
M
Matthew Wilcox 已提交
105
#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
106 107 108

/* get_task_state() */
#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
M
Matthew Wilcox 已提交
109
				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
110
				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
111

M
Matthew Wilcox 已提交
112 113
#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
114
#define task_is_stopped_or_traced(task)	\
M
Matthew Wilcox 已提交
115
			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
116
#define task_contributes_to_load(task)	\
117
				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
118 119
				 (task->flags & PF_FROZEN) == 0 && \
				 (task->state & TASK_NOLOAD) == 0)
L
Linus Torvalds 已提交
120

P
Peter Zijlstra 已提交
121 122 123 124 125 126 127 128 129 130
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP

#define __set_current_state(state_value)			\
	do {							\
		current->task_state_change = _THIS_IP_;		\
		current->state = (state_value);			\
	} while (0)
#define set_current_state(state_value)				\
	do {							\
		current->task_state_change = _THIS_IP_;		\
131
		smp_store_mb(current->state, (state_value));	\
P
Peter Zijlstra 已提交
132 133 134
	} while (0)

#else
135 136 137 138 139
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
140
 *   for (;;) {
141
 *	set_current_state(TASK_UNINTERRUPTIBLE);
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
 *	if (!need_sleep)
 *		break;
 *
 *	schedule();
 *   }
 *   __set_current_state(TASK_RUNNING);
 *
 * If the caller does not need such serialisation (because, for instance, the
 * condition test and condition change and wakeup are under the same lock) then
 * use __set_current_state().
 *
 * The above is typically ordered against the wakeup, which does:
 *
 *	need_sleep = false;
 *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
 *
 * Where wake_up_state() (and all other wakeup primitives) imply enough
 * barriers to order the store of the variable against wakeup.
 *
 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
164
 *
165
 * This is obviously fine, since they both store the exact same value.
166
 *
167
 * Also see the comments of try_to_wake_up().
168
 */
P
Peter Zijlstra 已提交
169
#define __set_current_state(state_value)		\
L
Linus Torvalds 已提交
170
	do { current->state = (state_value); } while (0)
P
Peter Zijlstra 已提交
171
#define set_current_state(state_value)			\
172
	smp_store_mb(current->state, (state_value))
L
Linus Torvalds 已提交
173

P
Peter Zijlstra 已提交
174 175
#endif

L
Linus Torvalds 已提交
176 177 178 179 180 181
/* Task command name length */
#define TASK_COMM_LEN 16

extern void sched_init(void);
extern void sched_init_smp(void);

182 183
extern cpumask_var_t cpu_isolated_map;

184
extern int runqueue_is_locked(int cpu);
I
Ingo Molnar 已提交
185

L
Linus Torvalds 已提交
186 187 188 189 190 191
extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);

#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
192
extern signed long schedule_timeout(signed long timeout);
193
extern signed long schedule_timeout_interruptible(signed long timeout);
M
Matthew Wilcox 已提交
194
extern signed long schedule_timeout_killable(signed long timeout);
195
extern signed long schedule_timeout_uninterruptible(signed long timeout);
196
extern signed long schedule_timeout_idle(signed long timeout);
L
Linus Torvalds 已提交
197
asmlinkage void schedule(void);
198
extern void schedule_preempt_disabled(void);
L
Linus Torvalds 已提交
199

200 201
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
202
extern long io_schedule_timeout(long timeout);
203
extern void io_schedule(void);
204

205
/**
206
 * struct prev_cputime - snaphsot of system and user cputime
207 208
 * @utime: time spent in user mode
 * @stime: time spent in system mode
209
 * @lock: protects the above two fields
210
 *
211 212
 * Stores previous user/system time values such that we can guarantee
 * monotonicity.
213
 */
214 215
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
216 217
	u64 utime;
	u64 stime;
218 219
	raw_spinlock_t lock;
#endif
220 221
};

222 223
/**
 * struct task_cputime - collected CPU time counts
224 225
 * @utime:		time spent in user mode, in nanoseconds
 * @stime:		time spent in kernel mode, in nanoseconds
226
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
227
 *
228 229 230
 * This structure groups together three kinds of CPU time that are tracked for
 * threads and thread groups.  Most things considering CPU time want to group
 * these counts together and treat all three of them in parallel.
231 232
 */
struct task_cputime {
233 234
	u64 utime;
	u64 stime;
235 236
	unsigned long long sum_exec_runtime;
};
237

238 239
/* Alternate field names when used to cache expirations. */
#define virt_exp	utime
240
#define prof_exp	stime
241 242
#define sched_exp	sum_exec_runtime

243
#include <linux/rwsem.h>
L
Linus Torvalds 已提交
244

245
#ifdef CONFIG_SCHED_INFO
L
Linus Torvalds 已提交
246 247
struct sched_info {
	/* cumulative counters */
248
	unsigned long pcount;	      /* # of times run on this cpu */
249
	unsigned long long run_delay; /* time spent waiting on a runqueue */
L
Linus Torvalds 已提交
250 251

	/* timestamps */
252 253
	unsigned long long last_arrival,/* when we last ran on a cpu */
			   last_queued;	/* when we were last queued to run */
L
Linus Torvalds 已提交
254
};
255
#endif /* CONFIG_SCHED_INFO */
L
Linus Torvalds 已提交
256

257 258 259 260 261 262 263 264 265 266
/*
 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 * has a few: load, load_avg, util_avg, freq, and capacity.
 *
 * We define a basic fixed point arithmetic range, and then formalize
 * all these metrics based on that basic range.
 */
# define SCHED_FIXEDPOINT_SHIFT	10
# define SCHED_FIXEDPOINT_SCALE	(1L << SCHED_FIXEDPOINT_SHIFT)

267
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
268
extern void prefetch_stack(struct task_struct *t);
269 270 271
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
L
Linus Torvalds 已提交
272

I
Ingo Molnar 已提交
273
struct load_weight {
274 275
	unsigned long weight;
	u32 inv_weight;
I
Ingo Molnar 已提交
276 277
};

278
/*
279 280 281 282 283 284 285 286 287
 * The load_avg/util_avg accumulates an infinite geometric series
 * (see __update_load_avg() in kernel/sched/fair.c).
 *
 * [load_avg definition]
 *
 *   load_avg = runnable% * scale_load_down(load)
 *
 * where runnable% is the time ratio that a sched_entity is runnable.
 * For cfs_rq, it is the aggregated load_avg of all runnable and
288
 * blocked sched_entities.
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
 *
 * load_avg may also take frequency scaling into account:
 *
 *   load_avg = runnable% * scale_load_down(load) * freq%
 *
 * where freq% is the CPU frequency normalized to the highest frequency.
 *
 * [util_avg definition]
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE
 *
 * where running% is the time ratio that a sched_entity is running on
 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
 * and blocked sched_entities.
 *
 * util_avg may also factor frequency scaling and CPU capacity scaling:
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
 *
 * where freq% is the same as above, and capacity% is the CPU capacity
 * normalized to the greatest capacity (due to uarch differences, etc).
 *
 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
 * we therefore scale them to as large a range as necessary. This is for
 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
 *
 * [Overflow issue]
 *
 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
 * with the highest load (=88761), always runnable on a single cfs_rq,
 * and should not overflow as the number already hits PID_MAX_LIMIT.
 *
 * For all other cases (including 32-bit kernels), struct load_weight's
 * weight will overflow first before we do, because:
 *
 *    Max(load_avg) <= Max(load.weight)
 *
 * Then it is the load_weight's responsibility to consider overflow
 * issues.
329
 */
330
struct sched_avg {
331 332 333
	u64 last_update_time, load_sum;
	u32 util_sum, period_contrib;
	unsigned long load_avg, util_avg;
334 335
};

336
#ifdef CONFIG_SCHEDSTATS
337
struct sched_statistics {
I
Ingo Molnar 已提交
338
	u64			wait_start;
339
	u64			wait_max;
340 341
	u64			wait_count;
	u64			wait_sum;
342 343
	u64			iowait_count;
	u64			iowait_sum;
344

I
Ingo Molnar 已提交
345 346
	u64			sleep_start;
	u64			sleep_max;
347 348 349
	s64			sum_sleep_runtime;

	u64			block_start;
I
Ingo Molnar 已提交
350 351
	u64			block_max;
	u64			exec_max;
I
Ingo Molnar 已提交
352
	u64			slice_max;
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368

	u64			nr_migrations_cold;
	u64			nr_failed_migrations_affine;
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_hot;
	u64			nr_forced_migrations;

	u64			nr_wakeups;
	u64			nr_wakeups_sync;
	u64			nr_wakeups_migrate;
	u64			nr_wakeups_local;
	u64			nr_wakeups_remote;
	u64			nr_wakeups_affine;
	u64			nr_wakeups_affine_attempts;
	u64			nr_wakeups_passive;
	u64			nr_wakeups_idle;
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
};
#endif

struct sched_entity {
	struct load_weight	load;		/* for load-balancing */
	struct rb_node		run_node;
	struct list_head	group_node;
	unsigned int		on_rq;

	u64			exec_start;
	u64			sum_exec_runtime;
	u64			vruntime;
	u64			prev_sum_exec_runtime;

	u64			nr_migrations;

#ifdef CONFIG_SCHEDSTATS
	struct sched_statistics statistics;
387 388
#endif

I
Ingo Molnar 已提交
389
#ifdef CONFIG_FAIR_GROUP_SCHED
P
Peter Zijlstra 已提交
390
	int			depth;
I
Ingo Molnar 已提交
391 392 393 394 395 396
	struct sched_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct cfs_rq		*cfs_rq;
	/* rq "owned" by this entity/group: */
	struct cfs_rq		*my_q;
#endif
397

398
#ifdef CONFIG_SMP
399 400 401 402 403 404 405
	/*
	 * Per entity load average tracking.
	 *
	 * Put into separate cache line so it does not
	 * collide with read-mostly values above.
	 */
	struct sched_avg	avg ____cacheline_aligned_in_smp;
406
#endif
I
Ingo Molnar 已提交
407
};
408

P
Peter Zijlstra 已提交
409 410
struct sched_rt_entity {
	struct list_head run_list;
411
	unsigned long timeout;
412
	unsigned long watchdog_stamp;
413
	unsigned int time_slice;
414 415
	unsigned short on_rq;
	unsigned short on_list;
P
Peter Zijlstra 已提交
416

417
	struct sched_rt_entity *back;
418
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
419 420 421 422 423 424
	struct sched_rt_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct rt_rq		*rt_rq;
	/* rq "owned" by this entity/group: */
	struct rt_rq		*my_q;
#endif
P
Peter Zijlstra 已提交
425 426
};

427 428 429 430 431
struct sched_dl_entity {
	struct rb_node	rb_node;

	/*
	 * Original scheduling parameters. Copied here from sched_attr
432 433
	 * during sched_setattr(), they will remain the same until
	 * the next sched_setattr().
434 435 436
	 */
	u64 dl_runtime;		/* maximum runtime for each instance	*/
	u64 dl_deadline;	/* relative deadline of each instance	*/
437
	u64 dl_period;		/* separation of two instances (period) */
438
	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455

	/*
	 * Actual scheduling parameters. Initialized with the values above,
	 * they are continously updated during task execution. Note that
	 * the remaining runtime could be < 0 in case we are in overrun.
	 */
	s64 runtime;		/* remaining runtime for this instance	*/
	u64 deadline;		/* absolute deadline for this instance	*/
	unsigned int flags;	/* specifying the scheduler behaviour	*/

	/*
	 * Some bool flags:
	 *
	 * @dl_throttled tells if we exhausted the runtime. If so, the
	 * task has to wait for a replenishment to be performed at the
	 * next firing of dl_timer.
	 *
456 457
	 * @dl_boosted tells if we are boosted due to DI. If so we are
	 * outside bandwidth enforcement mechanism (but only until we
458 459 460 461
	 * exit the critical section);
	 *
	 * @dl_yielded tells if task gave up the cpu before consuming
	 * all its available runtime during the last job.
462
	 */
463
	int dl_throttled, dl_boosted, dl_yielded;
464 465 466 467 468 469 470

	/*
	 * Bandwidth enforcement timer. Each -deadline task has its
	 * own bandwidth to be enforced, thus we need one timer per task.
	 */
	struct hrtimer dl_timer;
};
471

472 473
union rcu_special {
	struct {
474 475 476 477 478 479
		u8 blocked;
		u8 need_qs;
		u8 exp_need_qs;
		u8 pad;	/* Otherwise the compiler can store garbage here. */
	} b; /* Bits. */
	u32 s; /* Set of bits. */
480
};
481

P
Peter Zijlstra 已提交
482 483 484
enum perf_event_task_context {
	perf_invalid_context = -1,
	perf_hw_context = 0,
485
	perf_sw_context,
P
Peter Zijlstra 已提交
486 487 488
	perf_nr_task_contexts,
};

489 490 491 492
struct wake_q_node {
	struct wake_q_node *next;
};

493 494 495 496 497 498 499 500 501 502
/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
	/*
	 * Each bit set is a CPU that potentially has a TLB entry for one of
	 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
	 */
	struct cpumask cpumask;

	/* True if any bit in cpumask is set */
	bool flush_required;
503 504 505 506 507 508 509

	/*
	 * If true then the PTE was dirty when unmapped. The entry must be
	 * flushed before IO is initiated or a stale TLB entry potentially
	 * allows an update without redirtying the page.
	 */
	bool writable;
510 511
};

L
Linus Torvalds 已提交
512
struct task_struct {
513 514 515 516 517 518 519
#ifdef CONFIG_THREAD_INFO_IN_TASK
	/*
	 * For reasons of header soup (see current_thread_info()), this
	 * must be the first element of task_struct.
	 */
	struct thread_info thread_info;
#endif
L
Linus Torvalds 已提交
520
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
R
Roman Zippel 已提交
521
	void *stack;
L
Linus Torvalds 已提交
522
	atomic_t usage;
523 524
	unsigned int flags;	/* per process flags, defined below */
	unsigned int ptrace;
L
Linus Torvalds 已提交
525

526
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
527
	struct llist_node wake_entry;
P
Peter Zijlstra 已提交
528
	int on_cpu;
529 530 531
#ifdef CONFIG_THREAD_INFO_IN_TASK
	unsigned int cpu;	/* current CPU */
#endif
M
Mike Galbraith 已提交
532
	unsigned int wakee_flips;
533
	unsigned long wakee_flip_decay_ts;
M
Mike Galbraith 已提交
534
	struct task_struct *last_wakee;
535 536

	int wake_cpu;
537
#endif
P
Peter Zijlstra 已提交
538
	int on_rq;
539

540
	int prio, static_prio, normal_prio;
541
	unsigned int rt_priority;
542
	const struct sched_class *sched_class;
I
Ingo Molnar 已提交
543
	struct sched_entity se;
P
Peter Zijlstra 已提交
544
	struct sched_rt_entity rt;
P
Peter Zijlstra 已提交
545 546 547
#ifdef CONFIG_CGROUP_SCHED
	struct task_group *sched_task_group;
#endif
548
	struct sched_dl_entity dl;
L
Linus Torvalds 已提交
549

550 551 552 553 554
#ifdef CONFIG_PREEMPT_NOTIFIERS
	/* list of struct preempt_notifier: */
	struct hlist_head preempt_notifiers;
#endif

555
#ifdef CONFIG_BLK_DEV_IO_TRACE
556
	unsigned int btrace_seq;
557
#endif
L
Linus Torvalds 已提交
558

559
	unsigned int policy;
560
	int nr_cpus_allowed;
L
Linus Torvalds 已提交
561 562
	cpumask_t cpus_allowed;

P
Paul E. McKenney 已提交
563
#ifdef CONFIG_PREEMPT_RCU
P
Paul E. McKenney 已提交
564
	int rcu_read_lock_nesting;
565
	union rcu_special rcu_read_unlock_special;
566
	struct list_head rcu_node_entry;
P
Paul E. McKenney 已提交
567
	struct rcu_node *rcu_blocked_node;
568
#endif /* #ifdef CONFIG_PREEMPT_RCU */
P
Paul E. McKenney 已提交
569 570 571 572
#ifdef CONFIG_TASKS_RCU
	unsigned long rcu_tasks_nvcsw;
	bool rcu_tasks_holdout;
	struct list_head rcu_tasks_holdout_list;
573
	int rcu_tasks_idle_cpu;
P
Paul E. McKenney 已提交
574
#endif /* #ifdef CONFIG_TASKS_RCU */
P
Paul E. McKenney 已提交
575

576
#ifdef CONFIG_SCHED_INFO
L
Linus Torvalds 已提交
577 578 579 580
	struct sched_info sched_info;
#endif

	struct list_head tasks;
581
#ifdef CONFIG_SMP
582
	struct plist_node pushable_tasks;
583
	struct rb_node pushable_dl_tasks;
584
#endif
L
Linus Torvalds 已提交
585 586

	struct mm_struct *mm, *active_mm;
587 588 589 590

	/* Per-thread vma caching: */
	struct vmacache vmacache;

591 592 593
#if defined(SPLIT_RSS_COUNTING)
	struct task_rss_stat	rss_stat;
#endif
L
Linus Torvalds 已提交
594
/* task state */
595
	int exit_state;
L
Linus Torvalds 已提交
596 597
	int exit_code, exit_signal;
	int pdeath_signal;  /*  The signal sent when the parent dies  */
598
	unsigned long jobctl;	/* JOBCTL_*, siglock protected */
599 600

	/* Used for emulating ABI behavior of previous Linux versions */
601
	unsigned int personality;
602

603
	/* scheduler bits, serialized by scheduler locks */
604
	unsigned sched_reset_on_fork:1;
605
	unsigned sched_contributes_to_load:1;
606
	unsigned sched_migrated:1;
P
Peter Zijlstra 已提交
607
	unsigned sched_remote_wakeup:1;
608 609 610 611 612
	unsigned :0; /* force alignment to the next boundary */

	/* unserialized, strictly 'current' */
	unsigned in_execve:1; /* bit to tell LSMs we're in execve */
	unsigned in_iowait:1;
613 614 615
#if !defined(TIF_RESTORE_SIGMASK)
	unsigned restore_sigmask:1;
#endif
T
Tejun Heo 已提交
616 617
#ifdef CONFIG_MEMCG
	unsigned memcg_may_oom:1;
618
#ifndef CONFIG_SLOB
619 620
	unsigned memcg_kmem_skip_account:1;
#endif
621
#endif
622 623 624
#ifdef CONFIG_COMPAT_BRK
	unsigned brk_randomized:1;
#endif
625

626 627
	unsigned long atomic_flags; /* Flags needing atomic access. */

628 629
	struct restart_block restart_block;

L
Linus Torvalds 已提交
630 631
	pid_t pid;
	pid_t tgid;
632

633
#ifdef CONFIG_CC_STACKPROTECTOR
634 635
	/* Canary value for the -fstack-protector gcc feature */
	unsigned long stack_canary;
636
#endif
637
	/*
L
Linus Torvalds 已提交
638
	 * pointers to (original) parent process, youngest child, younger sibling,
639
	 * older sibling, respectively.  (p->father can be replaced with
R
Roland McGrath 已提交
640
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
641
	 */
642 643
	struct task_struct __rcu *real_parent; /* real parent process */
	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
L
Linus Torvalds 已提交
644
	/*
R
Roland McGrath 已提交
645
	 * children/sibling forms the list of my natural children
L
Linus Torvalds 已提交
646 647 648 649 650
	 */
	struct list_head children;	/* list of my children */
	struct list_head sibling;	/* linkage in my parent's children list */
	struct task_struct *group_leader;	/* threadgroup leader */

R
Roland McGrath 已提交
651 652 653 654 655 656 657 658
	/*
	 * ptraced is the list of tasks this task is using ptrace on.
	 * This includes both natural children and PTRACE_ATTACH targets.
	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
	 */
	struct list_head ptraced;
	struct list_head ptrace_entry;

L
Linus Torvalds 已提交
659
	/* PID/PID hash table linkage. */
660
	struct pid_link pids[PIDTYPE_MAX];
O
Oleg Nesterov 已提交
661
	struct list_head thread_group;
662
	struct list_head thread_node;
L
Linus Torvalds 已提交
663 664 665 666 667

	struct completion *vfork_done;		/* for vfork() */
	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */

668
	u64 utime, stime;
669
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
670
	u64 utimescaled, stimescaled;
671
#endif
672
	u64 gtime;
673
	struct prev_cputime prev_cputime;
674
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
675
	seqcount_t vtime_seqcount;
676 677
	unsigned long long vtime_snap;
	enum {
678 679 680
		/* Task is sleeping or running in a CPU with VTIME inactive */
		VTIME_INACTIVE = 0,
		/* Task runs in userspace in a CPU with VTIME active */
681
		VTIME_USER,
682
		/* Task runs in kernelspace in a CPU with VTIME active */
683 684
		VTIME_SYS,
	} vtime_snap_whence;
685
#endif
686 687

#ifdef CONFIG_NO_HZ_FULL
688
	atomic_t tick_dep_mask;
689
#endif
L
Linus Torvalds 已提交
690
	unsigned long nvcsw, nivcsw; /* context switch counts */
691
	u64 start_time;		/* monotonic time in nsec */
692
	u64 real_start_time;	/* boot based time in nsec */
L
Linus Torvalds 已提交
693 694 695
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
	unsigned long min_flt, maj_flt;

696
#ifdef CONFIG_POSIX_TIMERS
697
	struct task_cputime cputime_expires;
L
Linus Torvalds 已提交
698
	struct list_head cpu_timers[3];
699
#endif
L
Linus Torvalds 已提交
700 701

/* process credentials */
702
	const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
A
Arnd Bergmann 已提交
703
	const struct cred __rcu *real_cred; /* objective and real subjective task
704
					 * credentials (COW) */
A
Arnd Bergmann 已提交
705
	const struct cred __rcu *cred;	/* effective (overridable) subjective task
706
					 * credentials (COW) */
707 708 709
	char comm[TASK_COMM_LEN]; /* executable name excluding path
				     - access with [gs]et_task_comm (which lock
				       it with task_lock())
710
				     - initialized normally by setup_new_exec */
L
Linus Torvalds 已提交
711
/* file system info */
712
	struct nameidata *nameidata;
713
#ifdef CONFIG_SYSVIPC
L
Linus Torvalds 已提交
714 715
/* ipc stuff */
	struct sysv_sem sysvsem;
716
	struct sysv_shm sysvshm;
717
#endif
718
#ifdef CONFIG_DETECT_HUNG_TASK
719 720 721
/* hung task detection */
	unsigned long last_switch_count;
#endif
L
Linus Torvalds 已提交
722 723 724 725
/* filesystem information */
	struct fs_struct *fs;
/* open file information */
	struct files_struct *files;
726
/* namespaces */
S
Serge E. Hallyn 已提交
727
	struct nsproxy *nsproxy;
L
Linus Torvalds 已提交
728 729 730 731 732
/* signal handlers */
	struct signal_struct *signal;
	struct sighand_struct *sighand;

	sigset_t blocked, real_blocked;
733
	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
L
Linus Torvalds 已提交
734 735 736 737
	struct sigpending pending;

	unsigned long sas_ss_sp;
	size_t sas_ss_size;
738
	unsigned sas_ss_flags;
739

740
	struct callback_head *task_works;
741

L
Linus Torvalds 已提交
742
	struct audit_context *audit_context;
A
Al Viro 已提交
743
#ifdef CONFIG_AUDITSYSCALL
744
	kuid_t loginuid;
745
	unsigned int sessionid;
A
Al Viro 已提交
746
#endif
747
	struct seccomp seccomp;
L
Linus Torvalds 已提交
748 749 750 751

/* Thread group tracking */
   	u32 parent_exec_id;
   	u32 self_exec_id;
752 753
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
 * mempolicy */
L
Linus Torvalds 已提交
754 755
	spinlock_t alloc_lock;

756
	/* Protection of the PI data structures: */
757
	raw_spinlock_t pi_lock;
758

759 760
	struct wake_q_node wake_q;

I
Ingo Molnar 已提交
761 762
#ifdef CONFIG_RT_MUTEXES
	/* PI waiters blocked on a rt_mutex held by this task */
763 764
	struct rb_root pi_waiters;
	struct rb_node *pi_waiters_leftmost;
I
Ingo Molnar 已提交
765 766 767 768
	/* Deadlock detection and priority inheritance handling */
	struct rt_mutex_waiter *pi_blocked_on;
#endif

769 770 771 772
#ifdef CONFIG_DEBUG_MUTEXES
	/* mutex deadlock detection */
	struct mutex_waiter *blocked_on;
#endif
773 774 775 776
#ifdef CONFIG_TRACE_IRQFLAGS
	unsigned int irq_events;
	unsigned long hardirq_enable_ip;
	unsigned long hardirq_disable_ip;
777
	unsigned int hardirq_enable_event;
778
	unsigned int hardirq_disable_event;
779 780
	int hardirqs_enabled;
	int hardirq_context;
781 782
	unsigned long softirq_disable_ip;
	unsigned long softirq_enable_ip;
783
	unsigned int softirq_disable_event;
784
	unsigned int softirq_enable_event;
785
	int softirqs_enabled;
786 787
	int softirq_context;
#endif
I
Ingo Molnar 已提交
788
#ifdef CONFIG_LOCKDEP
789
# define MAX_LOCK_DEPTH 48UL
I
Ingo Molnar 已提交
790 791 792
	u64 curr_chain_key;
	int lockdep_depth;
	unsigned int lockdep_recursion;
793
	struct held_lock held_locks[MAX_LOCK_DEPTH];
794
	gfp_t lockdep_reclaim_gfp;
I
Ingo Molnar 已提交
795
#endif
796 797 798
#ifdef CONFIG_UBSAN
	unsigned int in_ubsan;
#endif
799

L
Linus Torvalds 已提交
800 801 802
/* journalling filesystem info */
	void *journal_info;

803
/* stacked block device info */
804
	struct bio_list *bio_list;
805

806 807 808 809 810
#ifdef CONFIG_BLOCK
/* stack plugging */
	struct blk_plug *plug;
#endif

L
Linus Torvalds 已提交
811 812 813 814 815 816 817 818 819
/* VM state */
	struct reclaim_state *reclaim_state;

	struct backing_dev_info *backing_dev_info;

	struct io_context *io_context;

	unsigned long ptrace_message;
	siginfo_t *last_siginfo; /* For ptrace use.  */
820
	struct task_io_accounting ioac;
821
#if defined(CONFIG_TASK_XACCT)
L
Linus Torvalds 已提交
822 823
	u64 acct_rss_mem1;	/* accumulated rss usage */
	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
824
	u64 acct_timexpd;	/* stime + utime since last update */
L
Linus Torvalds 已提交
825 826
#endif
#ifdef CONFIG_CPUSETS
827
	nodemask_t mems_allowed;	/* Protected by alloc_lock */
828
	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
829
	int cpuset_mem_spread_rotor;
830
	int cpuset_slab_spread_rotor;
L
Linus Torvalds 已提交
831
#endif
832
#ifdef CONFIG_CGROUPS
833
	/* Control Group info protected by css_set_lock */
A
Arnd Bergmann 已提交
834
	struct css_set __rcu *cgroups;
835 836
	/* cg_list protected by css_set_lock and tsk->alloc_lock */
	struct list_head cg_list;
837
#endif
F
Fenghua Yu 已提交
838 839 840
#ifdef CONFIG_INTEL_RDT_A
	int closid;
#endif
841
#ifdef CONFIG_FUTEX
842
	struct robust_list_head __user *robust_list;
843 844 845
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
846 847
	struct list_head pi_state_list;
	struct futex_pi_state *pi_state_cache;
848
#endif
849
#ifdef CONFIG_PERF_EVENTS
P
Peter Zijlstra 已提交
850
	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
851 852
	struct mutex perf_event_mutex;
	struct list_head perf_event_list;
853
#endif
854 855 856
#ifdef CONFIG_DEBUG_PREEMPT
	unsigned long preempt_disable_ip;
#endif
857
#ifdef CONFIG_NUMA
858
	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
859
	short il_next;
860
	short pref_node_fork;
861
#endif
862 863 864
#ifdef CONFIG_NUMA_BALANCING
	int numa_scan_seq;
	unsigned int numa_scan_period;
865
	unsigned int numa_scan_period_max;
866
	int numa_preferred_nid;
867
	unsigned long numa_migrate_retry;
868
	u64 node_stamp;			/* migration stamp  */
869 870
	u64 last_task_numa_placement;
	u64 last_sum_exec_runtime;
871
	struct callback_head numa_work;
872

873 874 875
	struct list_head numa_entry;
	struct numa_group *numa_group;

876
	/*
877 878 879 880 881 882 883 884 885 886 887 888
	 * numa_faults is an array split into four regions:
	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
	 * in this precise order.
	 *
	 * faults_memory: Exponential decaying average of faults on a per-node
	 * basis. Scheduling placement decisions are made based on these
	 * counts. The values remain static for the duration of a PTE scan.
	 * faults_cpu: Track the nodes the process was running on when a NUMA
	 * hinting fault was incurred.
	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
	 * during the current scan window. When the scan completes, the counts
	 * in faults_memory and faults_cpu decay and these values are copied.
889
	 */
890
	unsigned long *numa_faults;
891
	unsigned long total_numa_faults;
892

893 894
	/*
	 * numa_faults_locality tracks if faults recorded during the last
895 896 897
	 * scan window were remote/local or failed to migrate. The task scan
	 * period is adapted based on the locality of the faults with different
	 * weights depending on whether they were shared or private faults
898
	 */
899
	unsigned long numa_faults_locality[3];
900

I
Ingo Molnar 已提交
901
	unsigned long numa_pages_migrated;
902 903
#endif /* CONFIG_NUMA_BALANCING */

904 905 906 907
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
	struct tlbflush_unmap_batch tlb_ubc;
#endif

I
Ingo Molnar 已提交
908
	struct rcu_head rcu;
909 910 911 912 913

	/*
	 * cache last used pipe for splice
	 */
	struct pipe_inode_info *splice_pipe;
914 915 916

	struct page_frag task_frag;

917 918
#ifdef CONFIG_TASK_DELAY_ACCT
	struct task_delay_info		*delays;
919
#endif
920

921 922
#ifdef CONFIG_FAULT_INJECTION
	int make_it_fail;
923
#endif
924 925 926 927 928 929
	/*
	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
	 * balance_dirty_pages() for some dirty throttling pause
	 */
	int nr_dirtied;
	int nr_dirtied_pause;
930
	unsigned long dirty_paused_when; /* start of a write-and-pause period */
931

A
Arjan van de Ven 已提交
932 933 934 935
#ifdef CONFIG_LATENCYTOP
	int latency_record_count;
	struct latency_record latency_record[LT_SAVECOUNT];
#endif
936 937 938 939
	/*
	 * time slack values; these are used to round up poll() and
	 * select() etc timeout values. These are in nanoseconds.
	 */
940 941
	u64 timer_slack_ns;
	u64 default_timer_slack_ns;
942

943 944 945
#ifdef CONFIG_KASAN
	unsigned int kasan_depth;
#endif
946
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
D
Daniel Mack 已提交
947
	/* Index of current stored address in ret_stack */
948 949 950
	int curr_ret_stack;
	/* Stack of return addresses for return function tracing */
	struct ftrace_ret_stack	*ret_stack;
951 952
	/* time stamp for last schedule */
	unsigned long long ftrace_timestamp;
953 954 955 956 957
	/*
	 * Number of functions that haven't been traced
	 * because of depth overrun.
	 */
	atomic_t trace_overrun;
958 959
	/* Pause for the tracing */
	atomic_t tracing_graph_pause;
960
#endif
961 962 963
#ifdef CONFIG_TRACING
	/* state flags for use by tracers */
	unsigned long trace;
964
	/* bitmask and counter of trace recursion */
965 966
	unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
D
Dmitry Vyukov 已提交
967 968 969 970 971 972 973 974 975 976
#ifdef CONFIG_KCOV
	/* Coverage collection mode enabled for this task (0 if disabled). */
	enum kcov_mode kcov_mode;
	/* Size of the kcov_area. */
	unsigned	kcov_size;
	/* Buffer for coverage collection. */
	void		*kcov_area;
	/* kcov desciptor wired with this task or NULL. */
	struct kcov	*kcov;
#endif
977
#ifdef CONFIG_MEMCG
T
Tejun Heo 已提交
978 979 980
	struct mem_cgroup *memcg_in_oom;
	gfp_t memcg_oom_gfp_mask;
	int memcg_oom_order;
981 982 983

	/* number of pages to reclaim on returning to userland */
	unsigned int memcg_nr_pages_over_high;
984
#endif
985 986 987
#ifdef CONFIG_UPROBES
	struct uprobe_task *utask;
#endif
K
Kent Overstreet 已提交
988 989 990 991
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
	unsigned int	sequential_io;
	unsigned int	sequential_io_avg;
#endif
P
Peter Zijlstra 已提交
992 993 994
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
	unsigned long	task_state_change;
#endif
995
	int pagefault_disabled;
996
#ifdef CONFIG_MMU
997
	struct task_struct *oom_reaper_list;
998
#endif
999 1000 1001
#ifdef CONFIG_VMAP_STACK
	struct vm_struct *stack_vm_area;
#endif
1002 1003 1004 1005
#ifdef CONFIG_THREAD_INFO_IN_TASK
	/* A live task holds one reference. */
	atomic_t stack_refcount;
#endif
1006 1007 1008 1009 1010 1011 1012 1013
/* CPU-specific state of this task */
	struct thread_struct thread;
/*
 * WARNING: on x86, 'thread_struct' contains a variable-sized
 * structure.  It *MUST* be at the end of 'task_struct'.
 *
 * Do not put anything below here!
 */
L
Linus Torvalds 已提交
1014 1015
};

A
Alexey Dobriyan 已提交
1016
static inline struct pid *task_pid(struct task_struct *task)
1017 1018 1019 1020
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1021
static inline struct pid *task_tgid(struct task_struct *task)
1022 1023 1024 1025
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

1026 1027 1028 1029 1030
/*
 * Without tasklist or rcu lock it is not safe to dereference
 * the result of task_pgrp/task_session even if task == current,
 * we can race with another thread doing sys_setsid/sys_setpgid.
 */
A
Alexey Dobriyan 已提交
1031
static inline struct pid *task_pgrp(struct task_struct *task)
1032 1033 1034 1035
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
1036
static inline struct pid *task_session(struct task_struct *task)
1037 1038 1039 1040
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

1041 1042 1043 1044 1045
/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
1046 1047
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
1048 1049 1050 1051 1052 1053
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * set_task_vxid()   : assigns a virtual id to a task;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
1054 1055
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
			struct pid_namespace *ns);
1056

A
Alexey Dobriyan 已提交
1057
static inline pid_t task_pid_nr(struct task_struct *tsk)
1058 1059 1060 1061
{
	return tsk->pid;
}

1062 1063 1064 1065 1066
static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
1067 1068 1069

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
1070
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1071 1072 1073
}


A
Alexey Dobriyan 已提交
1074
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1075 1076 1077 1078
{
	return tsk->tgid;
}

1079
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1080 1081 1082 1083 1084 1085 1086

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_tgid(tsk));
}


1087
static inline int pid_alive(const struct task_struct *p);
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
	pid_t pid = 0;

	rcu_read_lock();
	if (pid_alive(tsk))
		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
	rcu_read_unlock();

	return pid;
}

static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
	return task_ppid_nr_ns(tsk, &init_pid_ns);
}

1105 1106
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1107
{
1108
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1109 1110 1111 1112
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
1113
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1114 1115 1116
}


1117 1118
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
1119
{
1120
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1121 1122 1123 1124
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
1125
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1126 1127
}

1128 1129 1130 1131 1132
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
1133

L
Linus Torvalds 已提交
1134 1135 1136 1137 1138 1139 1140
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
1141 1142
 *
 * Return: 1 if the process is alive. 0 otherwise.
L
Linus Torvalds 已提交
1143
 */
1144
static inline int pid_alive(const struct task_struct *p)
L
Linus Torvalds 已提交
1145
{
1146
	return p->pids[PIDTYPE_PID].pid != NULL;
L
Linus Torvalds 已提交
1147 1148
}

1149
/**
1150 1151
 * is_global_init - check if a task structure is init. Since init
 * is free to have sub-threads we need to check tgid.
1152 1153 1154
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1155 1156
 *
 * Return: 1 if the task structure is init. 0 otherwise.
1157
 */
A
Alexey Dobriyan 已提交
1158
static inline int is_global_init(struct task_struct *tsk)
1159
{
1160
	return task_tgid_nr(tsk) == 1;
1161
}
1162

1163 1164
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1165 1166
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
I
Ingo Molnar 已提交
1167

1168
extern void __put_task_struct(struct task_struct *t);
I
Ingo Molnar 已提交
1169 1170 1171 1172

static inline void put_task_struct(struct task_struct *t)
{
	if (atomic_dec_and_test(&t->usage))
1173
		__put_task_struct(t);
I
Ingo Molnar 已提交
1174
}
L
Linus Torvalds 已提交
1175

1176 1177 1178
struct task_struct *task_rcu_dereference(struct task_struct **ptask);
struct task_struct *try_get_task_struct(struct task_struct **ptask);

L
Linus Torvalds 已提交
1179 1180 1181
/*
 * Per process flags
 */
1182
#define PF_IDLE		0x00000002	/* I am an IDLE thread */
L
Linus Torvalds 已提交
1183
#define PF_EXITING	0x00000004	/* getting shut down */
1184
#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1185
#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
T
Tejun Heo 已提交
1186
#define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
L
Linus Torvalds 已提交
1187
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1188
#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
L
Linus Torvalds 已提交
1189 1190 1191 1192
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
#define PF_DUMPCORE	0x00000200	/* dumped core */
#define PF_SIGNALED	0x00000400	/* killed by a signal */
#define PF_MEMALLOC	0x00000800	/* Allocating memory */
1193
#define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
L
Linus Torvalds 已提交
1194
#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1195
#define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
L
Linus Torvalds 已提交
1196 1197 1198 1199
#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
#define PF_FROZEN	0x00010000	/* frozen for system suspend */
#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
#define PF_KSWAPD	0x00040000	/* I am kswapd */
1200
#define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
L
Linus Torvalds 已提交
1201
#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1202
#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
J
Jens Axboe 已提交
1203 1204
#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1205
#define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
1206
#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1207
#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1208
#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1209
#define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
L
Linus Torvalds 已提交
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) \
	conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)

1236
/* Per-process atomic flags. */
1237
#define PFA_NO_NEW_PRIVS 0	/* May not gain new privileges. */
1238 1239
#define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
1240
#define PFA_LMK_WAITING  3      /* Lowmemorykiller is waiting */
1241

1242

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
#define TASK_PFA_TEST(name, func)					\
	static inline bool task_##func(struct task_struct *p)		\
	{ return test_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_SET(name, func)					\
	static inline void task_set_##func(struct task_struct *p)	\
	{ set_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_CLEAR(name, func)					\
	static inline void task_clear_##func(struct task_struct *p)	\
	{ clear_bit(PFA_##name, &p->atomic_flags); }

TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1255

1256 1257 1258 1259 1260 1261 1262
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)

TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1263

1264 1265 1266
TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
TASK_PFA_SET(LMK_WAITING, lmk_waiting)

1267 1268 1269 1270 1271 1272 1273
static inline void tsk_restore_flags(struct task_struct *task,
				unsigned long orig_flags, unsigned long flags)
{
	task->flags &= ~flags;
	task->flags |= orig_flags & flags;
}

1274 1275
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
				     const struct cpumask *trial);
1276 1277
extern int task_can_attach(struct task_struct *p,
			   const struct cpumask *cs_cpus_allowed);
L
Linus Torvalds 已提交
1278
#ifdef CONFIG_SMP
1279 1280 1281
extern void do_set_cpus_allowed(struct task_struct *p,
			       const struct cpumask *new_mask);

1282
extern int set_cpus_allowed_ptr(struct task_struct *p,
1283
				const struct cpumask *new_mask);
L
Linus Torvalds 已提交
1284
#else
1285 1286 1287 1288
static inline void do_set_cpus_allowed(struct task_struct *p,
				      const struct cpumask *new_mask)
{
}
1289
static inline int set_cpus_allowed_ptr(struct task_struct *p,
1290
				       const struct cpumask *new_mask)
L
Linus Torvalds 已提交
1291
{
1292
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
1293 1294 1295 1296
		return -EINVAL;
	return 0;
}
#endif
1297

1298 1299 1300 1301
#ifndef cpu_relax_yield
#define cpu_relax_yield() cpu_relax()
#endif

L
Linus Torvalds 已提交
1302 1303 1304 1305 1306 1307 1308
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec()   {}
#endif

1309
extern int yield_to(struct task_struct *p, bool preempt);
1310 1311
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
/**
 * task_nice - return the nice value of a given task.
 * @p: the task in question.
 *
 * Return: The nice value [ -20 ... 0 ... 19 ].
 */
static inline int task_nice(const struct task_struct *p)
{
	return PRIO_TO_NICE((p)->static_prio);
}
1322 1323
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
1324
extern int idle_cpu(int cpu);
1325 1326
extern int sched_setscheduler(struct task_struct *, int,
			      const struct sched_param *);
1327
extern int sched_setscheduler_nocheck(struct task_struct *, int,
1328
				      const struct sched_param *);
1329 1330
extern int sched_setattr(struct task_struct *,
			 const struct sched_attr *);
1331
extern struct task_struct *idle_task(int cpu);
1332 1333
/**
 * is_idle_task - is the specified task an idle task?
1334
 * @p: the task in question.
1335 1336
 *
 * Return: 1 if @p is an idle task. 0 otherwise.
1337
 */
1338
static inline bool is_idle_task(const struct task_struct *p)
1339
{
1340
	return !!(p->flags & PF_IDLE);
1341
}
1342
extern struct task_struct *curr_task(int cpu);
1343
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
1344 1345 1346 1347

void yield(void);

union thread_union {
1348
#ifndef CONFIG_THREAD_INFO_IN_TASK
L
Linus Torvalds 已提交
1349
	struct thread_info thread_info;
1350
#endif
L
Linus Torvalds 已提交
1351 1352 1353
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

1354 1355 1356 1357 1358 1359 1360 1361 1362
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
	return &task->thread_info;
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
# define task_thread_info(task)	((struct thread_info *)(task)->stack)
#endif

L
Linus Torvalds 已提交
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
	/* Reliable end of stack detection:
	 * Some APM bios versions misalign the stack
	 */
	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif

1373 1374 1375 1376 1377 1378 1379
extern struct pid_namespace init_pid_ns;

/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
1380 1381
 * find_task_by_vpid():
 *      finds a task by its virtual pid
1382
 *
1383
 * see also find_vpid() etc in include/linux/pid.h
1384 1385
 */

1386 1387 1388
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
		struct pid_namespace *ns);
1389

1390 1391
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
1392
extern void wake_up_new_task(struct task_struct *tsk);
L
Linus Torvalds 已提交
1393 1394 1395 1396 1397 1398 1399
#ifdef CONFIG_SMP
 extern void kick_process(struct task_struct *tsk);
#else
 static inline void kick_process(struct task_struct *tsk) { }
#endif

extern void exit_files(struct task_struct *);
1400

L
Linus Torvalds 已提交
1401 1402
extern void exit_itimers(struct signal_struct *);

1403
extern int do_execve(struct filename *,
1404
		     const char __user * const __user *,
1405
		     const char __user * const __user *);
1406 1407 1408 1409
extern int do_execveat(int, struct filename *,
		       const char __user * const __user *,
		       const char __user * const __user *,
		       int);
L
Linus Torvalds 已提交
1410

1411 1412 1413 1414 1415
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
	__set_task_comm(tsk, from, false);
}
1416
extern char *get_task_comm(char *to, struct task_struct *tsk);
L
Linus Torvalds 已提交
1417 1418

#ifdef CONFIG_SMP
1419
void scheduler_ipi(void);
R
Roland McGrath 已提交
1420
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
1421
#else
1422
static inline void scheduler_ipi(void) { }
R
Roland McGrath 已提交
1423 1424 1425 1426 1427
static inline unsigned long wait_task_inactive(struct task_struct *p,
					       long match_state)
{
	return 1;
}
L
Linus Torvalds 已提交
1428 1429 1430 1431 1432 1433 1434
#endif

/* set thread flags in other task's structures
 * - see asm/thread_info.h for TIF_xxxx flags available
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1435
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1436 1437 1438 1439
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1440
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1441 1442 1443 1444
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1445
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1446 1447 1448 1449
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1450
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1451 1452 1453 1454
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1455
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

1468 1469 1470 1471 1472
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

L
Linus Torvalds 已提交
1473 1474 1475 1476 1477 1478 1479
/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
1480
#ifndef CONFIG_PREEMPT
1481
extern int _cond_resched(void);
1482 1483 1484
#else
static inline int _cond_resched(void) { return 0; }
#endif
1485

1486
#define cond_resched() ({			\
1487
	___might_sleep(__FILE__, __LINE__, 0);	\
1488 1489
	_cond_resched();			\
})
1490

1491 1492 1493
extern int __cond_resched_lock(spinlock_t *lock);

#define cond_resched_lock(lock) ({				\
1494
	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1495 1496 1497 1498 1499
	__cond_resched_lock(lock);				\
})

extern int __cond_resched_softirq(void);

1500
#define cond_resched_softirq() ({					\
1501
	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
1502
	__cond_resched_softirq();					\
1503
})
L
Linus Torvalds 已提交
1504

1505 1506 1507 1508 1509 1510 1511 1512 1513
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
	rcu_read_unlock();
	cond_resched();
	rcu_read_lock();
#endif
}

L
Linus Torvalds 已提交
1514 1515
/*
 * Does a critical section need to be broken due to another
N
Nick Piggin 已提交
1516 1517
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * but a general need for low latency)
L
Linus Torvalds 已提交
1518
 */
N
Nick Piggin 已提交
1519
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
1520
{
N
Nick Piggin 已提交
1521 1522 1523
#ifdef CONFIG_PREEMPT
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
1524
	return 0;
N
Nick Piggin 已提交
1525
#endif
L
Linus Torvalds 已提交
1526 1527
}

1528 1529 1530 1531 1532
static __always_inline bool need_resched(void)
{
	return unlikely(tif_need_resched());
}

L
Linus Torvalds 已提交
1533 1534 1535 1536 1537 1538 1539
/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
1540 1541 1542
#ifdef CONFIG_THREAD_INFO_IN_TASK
	return p->cpu;
#else
A
Al Viro 已提交
1543
	return task_thread_info(p)->cpu;
1544
#endif
L
Linus Torvalds 已提交
1545 1546
}

I
Ingo Molnar 已提交
1547 1548 1549 1550 1551
static inline int task_node(const struct task_struct *p)
{
	return cpu_to_node(task_cpu(p));
}

I
Ingo Molnar 已提交
1552
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
/*
 * In order to reduce various lock holder preemption latencies provide an
 * interface to see if a vCPU is currently running or not.
 *
 * This allows us to terminate optimistic spin loops and block, analogous to
 * the native optimistic spin heuristic of testing if the lock owner task is
 * running or not.
 */
#ifndef vcpu_is_preempted
# define vcpu_is_preempted(cpu)	false
#endif

1579 1580
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1581

1582 1583 1584
extern int task_can_switch_user(struct user_struct *up,
					struct task_struct *tsk);

D
Dave Hansen 已提交
1585 1586 1587 1588
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

L
Linus Torvalds 已提交
1589
#endif