sched.h 45.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4 5 6 7
/*
 * Define 'struct task_struct' and provide the main scheduler
 * APIs (schedule(), wakeup variants, etc.)
 */
8

9
#include <uapi/linux/sched.h>
10

11
#include <asm/current.h>
L
Linus Torvalds 已提交
12

13
#include <linux/pid.h>
L
Linus Torvalds 已提交
14
#include <linux/sem.h>
15
#include <linux/shm.h>
16 17 18 19
#include <linux/kcov.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
L
Linus Torvalds 已提交
20
#include <linux/seccomp.h>
21
#include <linux/nodemask.h>
22
#include <linux/rcupdate.h>
23
#include <linux/resource.h>
A
Arjan van de Ven 已提交
24
#include <linux/latencytop.h>
25 26 27 28
#include <linux/sched/prio.h>
#include <linux/signal_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
29

30
/* task_struct member predeclarations (sorted alphabetically): */
31 32
struct audit_context;
struct backing_dev_info;
33
struct bio_list;
34
struct blk_plug;
35 36 37 38 39
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct mempolicy;
40
struct nameidata;
41 42 43 44 45 46 47 48 49
struct nsproxy;
struct perf_event_context;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
struct sched_attr;
struct sched_param;
I
Ingo Molnar 已提交
50
struct seq_file;
51 52 53
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
54
struct task_group;
L
Linus Torvalds 已提交
55

56 57 58 59 60 61 62 63 64 65
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
66 67

/* Used in tsk->state: */
68 69 70 71 72
#define TASK_RUNNING			0x0000
#define TASK_INTERRUPTIBLE		0x0001
#define TASK_UNINTERRUPTIBLE		0x0002
#define __TASK_STOPPED			0x0004
#define __TASK_TRACED			0x0008
73
/* Used in tsk->exit_state: */
74 75
#define EXIT_DEAD			0x0010
#define EXIT_ZOMBIE			0x0020
76 77
#define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
/* Used in tsk->state again: */
78 79 80 81 82 83 84
#define TASK_DEAD			0x0040
#define TASK_WAKEKILL			0x0080
#define TASK_WAKING			0x0100
#define TASK_PARKED			0x0200
#define TASK_NOLOAD			0x0400
#define TASK_NEW			0x0800
#define TASK_STATE_MAX			0x1000
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99

/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)

#define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)

/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL			(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)

/* get_task_state(): */
#define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
100
					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE)
101 102 103 104 105 106 107 108 109 110

#define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)

#define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)

#define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)

#define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
					 (task->flags & PF_FROZEN) == 0 && \
					 (task->state & TASK_NOLOAD) == 0)
L
Linus Torvalds 已提交
111

P
Peter Zijlstra 已提交
112 113 114 115 116 117 118 119 120 121
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP

#define __set_current_state(state_value)			\
	do {							\
		current->task_state_change = _THIS_IP_;		\
		current->state = (state_value);			\
	} while (0)
#define set_current_state(state_value)				\
	do {							\
		current->task_state_change = _THIS_IP_;		\
122
		smp_store_mb(current->state, (state_value));	\
P
Peter Zijlstra 已提交
123 124 125
	} while (0)

#else
126 127 128 129 130
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
131
 *   for (;;) {
132
 *	set_current_state(TASK_UNINTERRUPTIBLE);
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
 *	if (!need_sleep)
 *		break;
 *
 *	schedule();
 *   }
 *   __set_current_state(TASK_RUNNING);
 *
 * If the caller does not need such serialisation (because, for instance, the
 * condition test and condition change and wakeup are under the same lock) then
 * use __set_current_state().
 *
 * The above is typically ordered against the wakeup, which does:
 *
 *	need_sleep = false;
 *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
 *
 * Where wake_up_state() (and all other wakeup primitives) imply enough
 * barriers to order the store of the variable against wakeup.
 *
 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
155
 *
156
 * This is obviously fine, since they both store the exact same value.
157
 *
158
 * Also see the comments of try_to_wake_up().
159
 */
160 161
#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
#define set_current_state(state_value)	 smp_store_mb(current->state, (state_value))
P
Peter Zijlstra 已提交
162 163
#endif

164 165
/* Task command name length: */
#define TASK_COMM_LEN			16
L
Linus Torvalds 已提交
166

167
extern cpumask_var_t			cpu_isolated_map;
168

L
Linus Torvalds 已提交
169 170
extern void scheduler_tick(void);

171 172 173 174 175 176 177
#define	MAX_SCHEDULE_TIMEOUT		LONG_MAX

extern long schedule_timeout(long timeout);
extern long schedule_timeout_interruptible(long timeout);
extern long schedule_timeout_killable(long timeout);
extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
L
Linus Torvalds 已提交
178
asmlinkage void schedule(void);
179
extern void schedule_preempt_disabled(void);
L
Linus Torvalds 已提交
180

181 182
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
183
extern long io_schedule_timeout(long timeout);
184
extern void io_schedule(void);
185

186
/**
187
 * struct prev_cputime - snapshot of system and user cputime
188 189
 * @utime: time spent in user mode
 * @stime: time spent in system mode
190
 * @lock: protects the above two fields
191
 *
192 193
 * Stores previous user/system time values such that we can guarantee
 * monotonicity.
194
 */
195 196
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
197 198 199
	u64				utime;
	u64				stime;
	raw_spinlock_t			lock;
200
#endif
201 202
};

203 204
/**
 * struct task_cputime - collected CPU time counts
205 206
 * @utime:		time spent in user mode, in nanoseconds
 * @stime:		time spent in kernel mode, in nanoseconds
207
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
208
 *
209 210 211
 * This structure groups together three kinds of CPU time that are tracked for
 * threads and thread groups.  Most things considering CPU time want to group
 * these counts together and treat all three of them in parallel.
212 213
 */
struct task_cputime {
214 215 216
	u64				utime;
	u64				stime;
	unsigned long long		sum_exec_runtime;
217
};
218

219 220 221 222
/* Alternate field names when used on cache expirations: */
#define virt_exp			utime
#define prof_exp			stime
#define sched_exp			sum_exec_runtime
223

224 225 226 227 228 229 230 231 232 233 234 235 236
enum vtime_state {
	/* Task is sleeping or running in a CPU with VTIME inactive: */
	VTIME_INACTIVE = 0,
	/* Task runs in userspace in a CPU with VTIME active: */
	VTIME_USER,
	/* Task runs in kernelspace in a CPU with VTIME active: */
	VTIME_SYS,
};

struct vtime {
	seqcount_t		seqcount;
	unsigned long long	starttime;
	enum vtime_state	state;
237 238 239
	u64			utime;
	u64			stime;
	u64			gtime;
240 241
};

L
Linus Torvalds 已提交
242
struct sched_info {
243
#ifdef CONFIG_SCHED_INFO
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
	/* Cumulative counters: */

	/* # of times we have run on this CPU: */
	unsigned long			pcount;

	/* Time spent waiting on a runqueue: */
	unsigned long long		run_delay;

	/* Timestamps: */

	/* When did we last run on a CPU? */
	unsigned long long		last_arrival;

	/* When were we last queued to run? */
	unsigned long long		last_queued;
L
Linus Torvalds 已提交
259

260
#endif /* CONFIG_SCHED_INFO */
261
};
L
Linus Torvalds 已提交
262

263 264 265 266 267 268 269
/*
 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 * has a few: load, load_avg, util_avg, freq, and capacity.
 *
 * We define a basic fixed point arithmetic range, and then formalize
 * all these metrics based on that basic range.
 */
270 271
# define SCHED_FIXEDPOINT_SHIFT		10
# define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
272

I
Ingo Molnar 已提交
273
struct load_weight {
274 275
	unsigned long			weight;
	u32				inv_weight;
I
Ingo Molnar 已提交
276 277
};

278
/*
279 280 281 282 283 284 285 286 287
 * The load_avg/util_avg accumulates an infinite geometric series
 * (see __update_load_avg() in kernel/sched/fair.c).
 *
 * [load_avg definition]
 *
 *   load_avg = runnable% * scale_load_down(load)
 *
 * where runnable% is the time ratio that a sched_entity is runnable.
 * For cfs_rq, it is the aggregated load_avg of all runnable and
288
 * blocked sched_entities.
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
 *
 * load_avg may also take frequency scaling into account:
 *
 *   load_avg = runnable% * scale_load_down(load) * freq%
 *
 * where freq% is the CPU frequency normalized to the highest frequency.
 *
 * [util_avg definition]
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE
 *
 * where running% is the time ratio that a sched_entity is running on
 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
 * and blocked sched_entities.
 *
 * util_avg may also factor frequency scaling and CPU capacity scaling:
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
 *
 * where freq% is the same as above, and capacity% is the CPU capacity
 * normalized to the greatest capacity (due to uarch differences, etc).
 *
 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
 * we therefore scale them to as large a range as necessary. This is for
 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
 *
 * [Overflow issue]
 *
 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
 * with the highest load (=88761), always runnable on a single cfs_rq,
 * and should not overflow as the number already hits PID_MAX_LIMIT.
 *
 * For all other cases (including 32-bit kernels), struct load_weight's
 * weight will overflow first before we do, because:
 *
 *    Max(load_avg) <= Max(load.weight)
 *
 * Then it is the load_weight's responsibility to consider overflow
 * issues.
329
 */
330
struct sched_avg {
331 332 333 334 335 336
	u64				last_update_time;
	u64				load_sum;
	u32				util_sum;
	u32				period_contrib;
	unsigned long			load_avg;
	unsigned long			util_avg;
337 338
};

339
struct sched_statistics {
340
#ifdef CONFIG_SCHEDSTATS
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
	u64				wait_start;
	u64				wait_max;
	u64				wait_count;
	u64				wait_sum;
	u64				iowait_count;
	u64				iowait_sum;

	u64				sleep_start;
	u64				sleep_max;
	s64				sum_sleep_runtime;

	u64				block_start;
	u64				block_max;
	u64				exec_max;
	u64				slice_max;

	u64				nr_migrations_cold;
	u64				nr_failed_migrations_affine;
	u64				nr_failed_migrations_running;
	u64				nr_failed_migrations_hot;
	u64				nr_forced_migrations;

	u64				nr_wakeups;
	u64				nr_wakeups_sync;
	u64				nr_wakeups_migrate;
	u64				nr_wakeups_local;
	u64				nr_wakeups_remote;
	u64				nr_wakeups_affine;
	u64				nr_wakeups_affine_attempts;
	u64				nr_wakeups_passive;
	u64				nr_wakeups_idle;
372
#endif
373
};
374 375

struct sched_entity {
376 377 378 379 380
	/* For load-balancing: */
	struct load_weight		load;
	struct rb_node			run_node;
	struct list_head		group_node;
	unsigned int			on_rq;
381

382 383 384 385
	u64				exec_start;
	u64				sum_exec_runtime;
	u64				vruntime;
	u64				prev_sum_exec_runtime;
386

387
	u64				nr_migrations;
388

389
	struct sched_statistics		statistics;
390

I
Ingo Molnar 已提交
391
#ifdef CONFIG_FAIR_GROUP_SCHED
392 393
	int				depth;
	struct sched_entity		*parent;
I
Ingo Molnar 已提交
394
	/* rq on which this entity is (to be) queued: */
395
	struct cfs_rq			*cfs_rq;
I
Ingo Molnar 已提交
396
	/* rq "owned" by this entity/group: */
397
	struct cfs_rq			*my_q;
I
Ingo Molnar 已提交
398
#endif
399

400
#ifdef CONFIG_SMP
401 402 403 404 405 406
	/*
	 * Per entity load average tracking.
	 *
	 * Put into separate cache line so it does not
	 * collide with read-mostly values above.
	 */
407
	struct sched_avg		avg ____cacheline_aligned_in_smp;
408
#endif
I
Ingo Molnar 已提交
409
};
410

P
Peter Zijlstra 已提交
411
struct sched_rt_entity {
412 413 414 415 416 417 418 419
	struct list_head		run_list;
	unsigned long			timeout;
	unsigned long			watchdog_stamp;
	unsigned int			time_slice;
	unsigned short			on_rq;
	unsigned short			on_list;

	struct sched_rt_entity		*back;
420
#ifdef CONFIG_RT_GROUP_SCHED
421
	struct sched_rt_entity		*parent;
P
Peter Zijlstra 已提交
422
	/* rq on which this entity is (to be) queued: */
423
	struct rt_rq			*rt_rq;
P
Peter Zijlstra 已提交
424
	/* rq "owned" by this entity/group: */
425
	struct rt_rq			*my_q;
P
Peter Zijlstra 已提交
426
#endif
427
} __randomize_layout;
P
Peter Zijlstra 已提交
428

429
struct sched_dl_entity {
430
	struct rb_node			rb_node;
431 432 433

	/*
	 * Original scheduling parameters. Copied here from sched_attr
434 435
	 * during sched_setattr(), they will remain the same until
	 * the next sched_setattr().
436
	 */
437 438 439
	u64				dl_runtime;	/* Maximum runtime for each instance	*/
	u64				dl_deadline;	/* Relative deadline of each instance	*/
	u64				dl_period;	/* Separation of two instances (period) */
440
	u64				dl_bw;		/* dl_runtime / dl_period		*/
441
	u64				dl_density;	/* dl_runtime / dl_deadline		*/
442 443 444 445 446 447

	/*
	 * Actual scheduling parameters. Initialized with the values above,
	 * they are continously updated during task execution. Note that
	 * the remaining runtime could be < 0 in case we are in overrun.
	 */
448 449 450
	s64				runtime;	/* Remaining runtime for this instance	*/
	u64				deadline;	/* Absolute deadline for this instance	*/
	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
451 452 453 454 455 456 457 458

	/*
	 * Some bool flags:
	 *
	 * @dl_throttled tells if we exhausted the runtime. If so, the
	 * task has to wait for a replenishment to be performed at the
	 * next firing of dl_timer.
	 *
459 460
	 * @dl_boosted tells if we are boosted due to DI. If so we are
	 * outside bandwidth enforcement mechanism (but only until we
461 462
	 * exit the critical section);
	 *
463
	 * @dl_yielded tells if task gave up the CPU before consuming
464
	 * all its available runtime during the last job.
465 466 467 468 469 470 471
	 *
	 * @dl_non_contending tells if the task is inactive while still
	 * contributing to the active utilization. In other words, it
	 * indicates if the inactive timer has been armed and its handler
	 * has not been executed yet. This flag is useful to avoid race
	 * conditions between the inactive timer handler and the wakeup
	 * code.
472
	 */
473 474 475
	int				dl_throttled;
	int				dl_boosted;
	int				dl_yielded;
476
	int				dl_non_contending;
477 478 479 480 481

	/*
	 * Bandwidth enforcement timer. Each -deadline task has its
	 * own bandwidth to be enforced, thus we need one timer per task.
	 */
482
	struct hrtimer			dl_timer;
483 484 485 486 487 488 489 490 491

	/*
	 * Inactive timer, responsible for decreasing the active utilization
	 * at the "0-lag time". When a -deadline task blocks, it contributes
	 * to GRUB's active utilization until the "0-lag time", hence a
	 * timer is needed to decrease the active utilization at the correct
	 * time.
	 */
	struct hrtimer inactive_timer;
492
};
493

494 495
union rcu_special {
	struct {
496 497 498 499 500 501
		u8			blocked;
		u8			need_qs;
		u8			exp_need_qs;

		/* Otherwise the compiler can store garbage here: */
		u8			pad;
502 503
	} b; /* Bits. */
	u32 s; /* Set of bits. */
504
};
505

P
Peter Zijlstra 已提交
506 507 508
enum perf_event_task_context {
	perf_invalid_context = -1,
	perf_hw_context = 0,
509
	perf_sw_context,
P
Peter Zijlstra 已提交
510 511 512
	perf_nr_task_contexts,
};

513 514 515 516
struct wake_q_node {
	struct wake_q_node *next;
};

L
Linus Torvalds 已提交
517
struct task_struct {
518 519 520 521 522
#ifdef CONFIG_THREAD_INFO_IN_TASK
	/*
	 * For reasons of header soup (see current_thread_info()), this
	 * must be the first element of task_struct.
	 */
523
	struct thread_info		thread_info;
524
#endif
525 526
	/* -1 unrunnable, 0 runnable, >0 stopped: */
	volatile long			state;
K
Kees Cook 已提交
527 528 529 530 531 532 533

	/*
	 * This begins the randomizable portion of task_struct. Only
	 * scheduling-critical items should be added above here.
	 */
	randomized_struct_fields_start

534 535 536 537 538
	void				*stack;
	atomic_t			usage;
	/* Per task flags (PF_*), defined further below: */
	unsigned int			flags;
	unsigned int			ptrace;
L
Linus Torvalds 已提交
539

540
#ifdef CONFIG_SMP
541 542
	struct llist_node		wake_entry;
	int				on_cpu;
543
#ifdef CONFIG_THREAD_INFO_IN_TASK
544 545
	/* Current CPU: */
	unsigned int			cpu;
546
#endif
547 548 549
	unsigned int			wakee_flips;
	unsigned long			wakee_flip_decay_ts;
	struct task_struct		*last_wakee;
550

551
	int				wake_cpu;
552
#endif
553 554 555 556 557 558
	int				on_rq;

	int				prio;
	int				static_prio;
	int				normal_prio;
	unsigned int			rt_priority;
559

560 561 562
	const struct sched_class	*sched_class;
	struct sched_entity		se;
	struct sched_rt_entity		rt;
P
Peter Zijlstra 已提交
563
#ifdef CONFIG_CGROUP_SCHED
564
	struct task_group		*sched_task_group;
P
Peter Zijlstra 已提交
565
#endif
566
	struct sched_dl_entity		dl;
L
Linus Torvalds 已提交
567

568
#ifdef CONFIG_PREEMPT_NOTIFIERS
569 570
	/* List of struct preempt_notifier: */
	struct hlist_head		preempt_notifiers;
571 572
#endif

573
#ifdef CONFIG_BLK_DEV_IO_TRACE
574
	unsigned int			btrace_seq;
575
#endif
L
Linus Torvalds 已提交
576

577 578 579
	unsigned int			policy;
	int				nr_cpus_allowed;
	cpumask_t			cpus_allowed;
L
Linus Torvalds 已提交
580

P
Paul E. McKenney 已提交
581
#ifdef CONFIG_PREEMPT_RCU
582 583 584 585
	int				rcu_read_lock_nesting;
	union rcu_special		rcu_read_unlock_special;
	struct list_head		rcu_node_entry;
	struct rcu_node			*rcu_blocked_node;
586
#endif /* #ifdef CONFIG_PREEMPT_RCU */
587

P
Paul E. McKenney 已提交
588
#ifdef CONFIG_TASKS_RCU
589
	unsigned long			rcu_tasks_nvcsw;
590 591
	u8				rcu_tasks_holdout;
	u8				rcu_tasks_idx;
592
	int				rcu_tasks_idle_cpu;
593
	struct list_head		rcu_tasks_holdout_list;
P
Paul E. McKenney 已提交
594
#endif /* #ifdef CONFIG_TASKS_RCU */
P
Paul E. McKenney 已提交
595

596
	struct sched_info		sched_info;
L
Linus Torvalds 已提交
597

598
	struct list_head		tasks;
599
#ifdef CONFIG_SMP
600 601
	struct plist_node		pushable_tasks;
	struct rb_node			pushable_dl_tasks;
602
#endif
L
Linus Torvalds 已提交
603

604 605
	struct mm_struct		*mm;
	struct mm_struct		*active_mm;
606 607

	/* Per-thread vma caching: */
608
	struct vmacache			vmacache;
609

610 611
#ifdef SPLIT_RSS_COUNTING
	struct task_rss_stat		rss_stat;
612
#endif
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
	int				exit_state;
	int				exit_code;
	int				exit_signal;
	/* The signal sent when the parent dies: */
	int				pdeath_signal;
	/* JOBCTL_*, siglock protected: */
	unsigned long			jobctl;

	/* Used for emulating ABI behavior of previous Linux versions: */
	unsigned int			personality;

	/* Scheduler bits, serialized by scheduler locks: */
	unsigned			sched_reset_on_fork:1;
	unsigned			sched_contributes_to_load:1;
	unsigned			sched_migrated:1;
	unsigned			sched_remote_wakeup:1;
	/* Force alignment to the next boundary: */
	unsigned			:0;

	/* Unserialized, strictly 'current' */

	/* Bit to tell LSMs we're in execve(): */
	unsigned			in_execve:1;
	unsigned			in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
	unsigned			restore_sigmask:1;
639
#endif
T
Tejun Heo 已提交
640
#ifdef CONFIG_MEMCG
641
	unsigned			memcg_may_oom:1;
642
#ifndef CONFIG_SLOB
643
	unsigned			memcg_kmem_skip_account:1;
644
#endif
645
#endif
646
#ifdef CONFIG_COMPAT_BRK
647
	unsigned			brk_randomized:1;
648
#endif
649 650 651 652
#ifdef CONFIG_CGROUPS
	/* disallow userland-initiated cgroup migration */
	unsigned			no_cgroup_migration:1;
#endif
653

654
	unsigned long			atomic_flags; /* Flags requiring atomic access. */
655

656
	struct restart_block		restart_block;
657

658 659
	pid_t				pid;
	pid_t				tgid;
660

661
#ifdef CONFIG_CC_STACKPROTECTOR
662 663
	/* Canary value for the -fstack-protector GCC feature: */
	unsigned long			stack_canary;
664
#endif
665
	/*
666
	 * Pointers to the (original) parent process, youngest child, younger sibling,
667
	 * older sibling, respectively.  (p->father can be replaced with
R
Roland McGrath 已提交
668
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
669
	 */
670 671 672 673 674 675 676

	/* Real parent process: */
	struct task_struct __rcu	*real_parent;

	/* Recipient of SIGCHLD, wait4() reports: */
	struct task_struct __rcu	*parent;

L
Linus Torvalds 已提交
677
	/*
678
	 * Children/sibling form the list of natural children:
L
Linus Torvalds 已提交
679
	 */
680 681 682
	struct list_head		children;
	struct list_head		sibling;
	struct task_struct		*group_leader;
L
Linus Torvalds 已提交
683

R
Roland McGrath 已提交
684
	/*
685 686
	 * 'ptraced' is the list of tasks this task is using ptrace() on.
	 *
R
Roland McGrath 已提交
687
	 * This includes both natural children and PTRACE_ATTACH targets.
688
	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
R
Roland McGrath 已提交
689
	 */
690 691
	struct list_head		ptraced;
	struct list_head		ptrace_entry;
R
Roland McGrath 已提交
692

L
Linus Torvalds 已提交
693
	/* PID/PID hash table linkage. */
694 695 696 697 698
	struct pid_link			pids[PIDTYPE_MAX];
	struct list_head		thread_group;
	struct list_head		thread_node;

	struct completion		*vfork_done;
L
Linus Torvalds 已提交
699

700 701
	/* CLONE_CHILD_SETTID: */
	int __user			*set_child_tid;
L
Linus Torvalds 已提交
702

703 704 705 706 707
	/* CLONE_CHILD_CLEARTID: */
	int __user			*clear_child_tid;

	u64				utime;
	u64				stime;
708
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
709 710
	u64				utimescaled;
	u64				stimescaled;
711
#endif
712 713
	u64				gtime;
	struct prev_cputime		prev_cputime;
714
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
715
	struct vtime			vtime;
716
#endif
717 718

#ifdef CONFIG_NO_HZ_FULL
719
	atomic_t			tick_dep_mask;
720
#endif
721 722 723 724 725 726 727 728 729 730 731 732 733
	/* Context switch counts: */
	unsigned long			nvcsw;
	unsigned long			nivcsw;

	/* Monotonic time in nsecs: */
	u64				start_time;

	/* Boot based time in nsecs: */
	u64				real_start_time;

	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
	unsigned long			min_flt;
	unsigned long			maj_flt;
L
Linus Torvalds 已提交
734

735
#ifdef CONFIG_POSIX_TIMERS
736 737
	struct task_cputime		cputime_expires;
	struct list_head		cpu_timers[3];
738
#endif
L
Linus Torvalds 已提交
739

740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	/* Process credentials: */

	/* Tracer's credentials at attach: */
	const struct cred __rcu		*ptracer_cred;

	/* Objective and real subjective task credentials (COW): */
	const struct cred __rcu		*real_cred;

	/* Effective (overridable) subjective task credentials (COW): */
	const struct cred __rcu		*cred;

	/*
	 * executable name, excluding path.
	 *
	 * - normally initialized setup_new_exec()
	 * - access it with [gs]et_task_comm()
	 * - lock it with task_lock()
	 */
	char				comm[TASK_COMM_LEN];

	struct nameidata		*nameidata;

762
#ifdef CONFIG_SYSVIPC
763 764
	struct sysv_sem			sysvsem;
	struct sysv_shm			sysvshm;
765
#endif
766
#ifdef CONFIG_DETECT_HUNG_TASK
767
	unsigned long			last_switch_count;
768
#endif
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
	/* Filesystem information: */
	struct fs_struct		*fs;

	/* Open file information: */
	struct files_struct		*files;

	/* Namespaces: */
	struct nsproxy			*nsproxy;

	/* Signal handlers: */
	struct signal_struct		*signal;
	struct sighand_struct		*sighand;
	sigset_t			blocked;
	sigset_t			real_blocked;
	/* Restored if set_restore_sigmask() was used: */
	sigset_t			saved_sigmask;
	struct sigpending		pending;
	unsigned long			sas_ss_sp;
	size_t				sas_ss_size;
	unsigned int			sas_ss_flags;

	struct callback_head		*task_works;

	struct audit_context		*audit_context;
A
Al Viro 已提交
793
#ifdef CONFIG_AUDITSYSCALL
794 795
	kuid_t				loginuid;
	unsigned int			sessionid;
A
Al Viro 已提交
796
#endif
797 798 799 800 801
	struct seccomp			seccomp;

	/* Thread group tracking: */
	u32				parent_exec_id;
	u32				self_exec_id;
L
Linus Torvalds 已提交
802

803 804
	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
	spinlock_t			alloc_lock;
L
Linus Torvalds 已提交
805

806
	/* Protection of the PI data structures: */
807
	raw_spinlock_t			pi_lock;
808

809
	struct wake_q_node		wake_q;
810

I
Ingo Molnar 已提交
811
#ifdef CONFIG_RT_MUTEXES
812
	/* PI waiters blocked on a rt_mutex held by this task: */
813
	struct rb_root_cached		pi_waiters;
814 815
	/* Updated under owner's pi_lock and rq lock */
	struct task_struct		*pi_top_task;
816 817
	/* Deadlock detection and priority inheritance handling: */
	struct rt_mutex_waiter		*pi_blocked_on;
I
Ingo Molnar 已提交
818 819
#endif

820
#ifdef CONFIG_DEBUG_MUTEXES
821 822
	/* Mutex deadlock detection: */
	struct mutex_waiter		*blocked_on;
823
#endif
824

825
#ifdef CONFIG_TRACE_IRQFLAGS
826 827 828 829 830 831 832 833 834 835 836 837 838
	unsigned int			irq_events;
	unsigned long			hardirq_enable_ip;
	unsigned long			hardirq_disable_ip;
	unsigned int			hardirq_enable_event;
	unsigned int			hardirq_disable_event;
	int				hardirqs_enabled;
	int				hardirq_context;
	unsigned long			softirq_disable_ip;
	unsigned long			softirq_enable_ip;
	unsigned int			softirq_disable_event;
	unsigned int			softirq_enable_event;
	int				softirqs_enabled;
	int				softirq_context;
839
#endif
840

I
Ingo Molnar 已提交
841
#ifdef CONFIG_LOCKDEP
842 843 844 845 846
# define MAX_LOCK_DEPTH			48UL
	u64				curr_chain_key;
	int				lockdep_depth;
	unsigned int			lockdep_recursion;
	struct held_lock		held_locks[MAX_LOCK_DEPTH];
I
Ingo Molnar 已提交
847
#endif
848

849 850 851 852 853 854
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCKS_NR 64UL
	struct hist_lock *xhlocks; /* Crossrelease history locks */
	unsigned int xhlock_idx;
	/* For restoring at history boundaries */
	unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
855 856 857
	unsigned int hist_id;
	/* For overwrite check at each context exit */
	unsigned int hist_id_save[XHLOCK_CTX_NR];
I
Ingo Molnar 已提交
858
#endif
859

860
#ifdef CONFIG_UBSAN
861
	unsigned int			in_ubsan;
862
#endif
863

864 865
	/* Journalling filesystem info: */
	void				*journal_info;
L
Linus Torvalds 已提交
866

867 868
	/* Stacked block device info: */
	struct bio_list			*bio_list;
869

870
#ifdef CONFIG_BLOCK
871 872
	/* Stack plugging: */
	struct blk_plug			*plug;
873 874
#endif

875 876 877 878
	/* VM state: */
	struct reclaim_state		*reclaim_state;

	struct backing_dev_info		*backing_dev_info;
L
Linus Torvalds 已提交
879

880
	struct io_context		*io_context;
L
Linus Torvalds 已提交
881

882 883 884
	/* Ptrace state: */
	unsigned long			ptrace_message;
	siginfo_t			*last_siginfo;
L
Linus Torvalds 已提交
885

886 887 888 889 890 891 892 893
	struct task_io_accounting	ioac;
#ifdef CONFIG_TASK_XACCT
	/* Accumulated RSS usage: */
	u64				acct_rss_mem1;
	/* Accumulated virtual memory usage: */
	u64				acct_vm_mem1;
	/* stime + utime since last update: */
	u64				acct_timexpd;
L
Linus Torvalds 已提交
894 895
#endif
#ifdef CONFIG_CPUSETS
896 897 898 899 900 901
	/* Protected by ->alloc_lock: */
	nodemask_t			mems_allowed;
	/* Seqence number to catch updates: */
	seqcount_t			mems_allowed_seq;
	int				cpuset_mem_spread_rotor;
	int				cpuset_slab_spread_rotor;
L
Linus Torvalds 已提交
902
#endif
903
#ifdef CONFIG_CGROUPS
904 905 906 907
	/* Control Group info protected by css_set_lock: */
	struct css_set __rcu		*cgroups;
	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
	struct list_head		cg_list;
908
#endif
909
#ifdef CONFIG_INTEL_RDT
910
	u32				closid;
911
	u32				rmid;
F
Fenghua Yu 已提交
912
#endif
913
#ifdef CONFIG_FUTEX
914
	struct robust_list_head __user	*robust_list;
915 916 917
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
918 919
	struct list_head		pi_state_list;
	struct futex_pi_state		*pi_state_cache;
920
#endif
921
#ifdef CONFIG_PERF_EVENTS
922 923 924
	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
	struct mutex			perf_event_mutex;
	struct list_head		perf_event_list;
925
#endif
926
#ifdef CONFIG_DEBUG_PREEMPT
927
	unsigned long			preempt_disable_ip;
928
#endif
929
#ifdef CONFIG_NUMA
930 931
	/* Protected by alloc_lock: */
	struct mempolicy		*mempolicy;
932
	short				il_prev;
933
	short				pref_node_fork;
934
#endif
935
#ifdef CONFIG_NUMA_BALANCING
936 937 938 939 940 941 942 943 944 945 946 947 948
	int				numa_scan_seq;
	unsigned int			numa_scan_period;
	unsigned int			numa_scan_period_max;
	int				numa_preferred_nid;
	unsigned long			numa_migrate_retry;
	/* Migration stamp: */
	u64				node_stamp;
	u64				last_task_numa_placement;
	u64				last_sum_exec_runtime;
	struct callback_head		numa_work;

	struct list_head		numa_entry;
	struct numa_group		*numa_group;
949

950
	/*
951 952 953 954 955 956 957 958 959 960 961 962
	 * numa_faults is an array split into four regions:
	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
	 * in this precise order.
	 *
	 * faults_memory: Exponential decaying average of faults on a per-node
	 * basis. Scheduling placement decisions are made based on these
	 * counts. The values remain static for the duration of a PTE scan.
	 * faults_cpu: Track the nodes the process was running on when a NUMA
	 * hinting fault was incurred.
	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
	 * during the current scan window. When the scan completes, the counts
	 * in faults_memory and faults_cpu decay and these values are copied.
963
	 */
964 965
	unsigned long			*numa_faults;
	unsigned long			total_numa_faults;
966

967 968
	/*
	 * numa_faults_locality tracks if faults recorded during the last
969 970 971
	 * scan window were remote/local or failed to migrate. The task scan
	 * period is adapted based on the locality of the faults with different
	 * weights depending on whether they were shared or private faults
972
	 */
973
	unsigned long			numa_faults_locality[3];
974

975
	unsigned long			numa_pages_migrated;
976 977
#endif /* CONFIG_NUMA_BALANCING */

978
	struct tlbflush_unmap_batch	tlb_ubc;
979

980
	struct rcu_head			rcu;
981

982 983
	/* Cache last used pipe for splice(): */
	struct pipe_inode_info		*splice_pipe;
984

985
	struct page_frag		task_frag;
986

987 988
#ifdef CONFIG_TASK_DELAY_ACCT
	struct task_delay_info		*delays;
989
#endif
990

991
#ifdef CONFIG_FAULT_INJECTION
992
	int				make_it_fail;
993
	unsigned int			fail_nth;
994
#endif
995
	/*
996 997
	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
	 * balance_dirty_pages() for a dirty throttling pause:
998
	 */
999 1000 1001 1002
	int				nr_dirtied;
	int				nr_dirtied_pause;
	/* Start of a write-and-pause period: */
	unsigned long			dirty_paused_when;
1003

A
Arjan van de Ven 已提交
1004
#ifdef CONFIG_LATENCYTOP
1005 1006
	int				latency_record_count;
	struct latency_record		latency_record[LT_SAVECOUNT];
A
Arjan van de Ven 已提交
1007
#endif
1008
	/*
1009
	 * Time slack values; these are used to round up poll() and
1010 1011
	 * select() etc timeout values. These are in nanoseconds.
	 */
1012 1013
	u64				timer_slack_ns;
	u64				default_timer_slack_ns;
1014

1015
#ifdef CONFIG_KASAN
1016
	unsigned int			kasan_depth;
1017
#endif
1018

1019
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1020 1021 1022 1023 1024 1025 1026 1027 1028
	/* Index of current stored address in ret_stack: */
	int				curr_ret_stack;

	/* Stack of return addresses for return function tracing: */
	struct ftrace_ret_stack		*ret_stack;

	/* Timestamp for last schedule: */
	unsigned long long		ftrace_timestamp;

1029 1030
	/*
	 * Number of functions that haven't been traced
1031
	 * because of depth overrun:
1032
	 */
1033 1034 1035 1036
	atomic_t			trace_overrun;

	/* Pause tracing: */
	atomic_t			tracing_graph_pause;
1037
#endif
1038

1039
#ifdef CONFIG_TRACING
1040 1041 1042 1043 1044
	/* State flags for use by tracers: */
	unsigned long			trace;

	/* Bitmask and counter of trace recursion: */
	unsigned long			trace_recursion;
1045
#endif /* CONFIG_TRACING */
1046

D
Dmitry Vyukov 已提交
1047
#ifdef CONFIG_KCOV
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	/* Coverage collection mode enabled for this task (0 if disabled): */
	enum kcov_mode			kcov_mode;

	/* Size of the kcov_area: */
	unsigned int			kcov_size;

	/* Buffer for coverage collection: */
	void				*kcov_area;

	/* KCOV descriptor wired with this task or NULL: */
	struct kcov			*kcov;
D
Dmitry Vyukov 已提交
1059
#endif
1060

1061
#ifdef CONFIG_MEMCG
1062 1063 1064
	struct mem_cgroup		*memcg_in_oom;
	gfp_t				memcg_oom_gfp_mask;
	int				memcg_oom_order;
1065

1066 1067
	/* Number of pages to reclaim on returning to userland: */
	unsigned int			memcg_nr_pages_over_high;
1068
#endif
1069

1070
#ifdef CONFIG_UPROBES
1071
	struct uprobe_task		*utask;
1072
#endif
K
Kent Overstreet 已提交
1073
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1074 1075
	unsigned int			sequential_io;
	unsigned int			sequential_io_avg;
K
Kent Overstreet 已提交
1076
#endif
P
Peter Zijlstra 已提交
1077
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1078
	unsigned long			task_state_change;
P
Peter Zijlstra 已提交
1079
#endif
1080
	int				pagefault_disabled;
1081
#ifdef CONFIG_MMU
1082
	struct task_struct		*oom_reaper_list;
1083
#endif
1084
#ifdef CONFIG_VMAP_STACK
1085
	struct vm_struct		*stack_vm_area;
1086
#endif
1087
#ifdef CONFIG_THREAD_INFO_IN_TASK
1088 1089
	/* A live task holds one reference: */
	atomic_t			stack_refcount;
1090 1091 1092
#endif
#ifdef CONFIG_LIVEPATCH
	int patch_state;
1093
#endif
1094 1095 1096
#ifdef CONFIG_SECURITY
	/* Used by LSM modules for access restriction: */
	void				*security;
1097
#endif
K
Kees Cook 已提交
1098 1099 1100 1101 1102 1103 1104

	/*
	 * New fields for task_struct should be added above here, so that
	 * they are included in the randomized portion of task_struct.
	 */
	randomized_struct_fields_end

1105 1106 1107 1108 1109 1110 1111 1112 1113
	/* CPU-specific state of this task: */
	struct thread_struct		thread;

	/*
	 * WARNING: on x86, 'thread_struct' contains a variable-sized
	 * structure.  It *MUST* be at the end of 'task_struct'.
	 *
	 * Do not put anything below here!
	 */
L
Linus Torvalds 已提交
1114 1115
};

A
Alexey Dobriyan 已提交
1116
static inline struct pid *task_pid(struct task_struct *task)
1117 1118 1119 1120
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1121
static inline struct pid *task_tgid(struct task_struct *task)
1122 1123 1124 1125
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

1126
/*
1127
 * Without tasklist or RCU lock it is not safe to dereference
1128 1129 1130
 * the result of task_pgrp/task_session even if task == current,
 * we can race with another thread doing sys_setsid/sys_setpgid.
 */
A
Alexey Dobriyan 已提交
1131
static inline struct pid *task_pgrp(struct task_struct *task)
1132 1133 1134 1135
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
1136
static inline struct pid *task_session(struct task_struct *task)
1137 1138 1139 1140
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

1141 1142 1143 1144 1145
/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
1146 1147
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
1148 1149 1150 1151
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
1152
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1153

A
Alexey Dobriyan 已提交
1154
static inline pid_t task_pid_nr(struct task_struct *tsk)
1155 1156 1157 1158
{
	return tsk->pid;
}

1159
static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1160 1161 1162
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
1163 1164 1165

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
1166
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1167 1168 1169
}


A
Alexey Dobriyan 已提交
1170
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1171 1172 1173 1174
{
	return tsk->tgid;
}

1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
 *
 * Return: 1 if the process is alive. 0 otherwise.
 */
static inline int pid_alive(const struct task_struct *p)
{
	return p->pids[PIDTYPE_PID].pid != NULL;
}
1189

1190
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1191
{
1192
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1193 1194 1195 1196
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
1197
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1198 1199 1200
}


1201
static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1202
{
1203
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1204 1205 1206 1207
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
1208
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1209 1210
}

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
	return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
}

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
}

static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
	pid_t pid = 0;

	rcu_read_lock();
	if (pid_alive(tsk))
		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
	rcu_read_unlock();

	return pid;
}

static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
	return task_ppid_nr_ns(tsk, &init_pid_ns);
}

1238
/* Obsolete, do not use: */
1239 1240 1241 1242
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
1243

1244
static inline unsigned int __get_task_state(struct task_struct *tsk)
1245
{
1246 1247
	unsigned int tsk_state = READ_ONCE(tsk->state);
	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1248

1249 1250
	if (tsk_state == TASK_PARKED)
		state = TASK_INTERRUPTIBLE;
1251

1252 1253 1254 1255 1256 1257 1258 1259
	return fls(state);
}

static inline char __task_state_to_char(unsigned int state)
{
	static const char state_char[] = "RSDTtXZ";

	BUILD_BUG_ON(1 + ilog2(TASK_REPORT) != sizeof(state_char) - 2);
1260

1261 1262 1263 1264 1265 1266
	return state_char[state];
}

static inline char task_state_to_char(struct task_struct *tsk)
{
	return __task_state_to_char(__get_task_state(tsk));
1267 1268
}

1269
/**
1270 1271
 * is_global_init - check if a task structure is init. Since init
 * is free to have sub-threads we need to check tgid.
1272 1273 1274
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1275 1276
 *
 * Return: 1 if the task structure is init. 0 otherwise.
1277
 */
A
Alexey Dobriyan 已提交
1278
static inline int is_global_init(struct task_struct *tsk)
1279
{
1280
	return task_tgid_nr(tsk) == 1;
1281
}
1282

1283 1284
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1285 1286 1287
/*
 * Per process flags
 */
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
#define PF_IDLE			0x00000002	/* I am an IDLE thread */
#define PF_EXITING		0x00000004	/* Getting shut down */
#define PF_EXITPIDONE		0x00000008	/* PI exit done on shut down */
#define PF_VCPU			0x00000010	/* I'm a virtual CPU */
#define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
#define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
#define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
#define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
#define PF_DUMPCORE		0x00000200	/* Dumped core */
#define PF_SIGNALED		0x00000400	/* Killed by a signal */
#define PF_MEMALLOC		0x00000800	/* Allocating memory */
#define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
#define PF_USED_ASYNC		0x00004000	/* Used async_schedule*(), used by module init */
#define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
#define PF_FROZEN		0x00010000	/* Frozen for system suspend */
1304 1305 1306
#define PF_KSWAPD		0x00020000	/* I am kswapd */
#define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
#define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1307 1308 1309 1310 1311 1312 1313 1314 1315
#define PF_LESS_THROTTLE	0x00100000	/* Throttle me less: I clean memory */
#define PF_KTHREAD		0x00200000	/* I am a kernel thread */
#define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
#define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
#define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
#define PF_MUTEX_TESTER		0x20000000	/* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
L
Linus Torvalds 已提交
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
1328 1329 1330 1331 1332
#define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math()			clear_stopped_child_used_math(current)
#define set_used_math()				set_stopped_child_used_math(current)

L
Linus Torvalds 已提交
1333 1334
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1335 1336 1337

#define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)

L
Linus Torvalds 已提交
1338 1339
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1340

L
Linus Torvalds 已提交
1341
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1342 1343
#define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
#define used_math()				tsk_used_math(current)
L
Linus Torvalds 已提交
1344

1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
static inline bool is_percpu_thread(void)
{
#ifdef CONFIG_SMP
	return (current->flags & PF_NO_SETAFFINITY) &&
		(current->nr_cpus_allowed  == 1);
#else
	return true;
#endif
}

1355
/* Per-process atomic flags. */
1356 1357 1358
#define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
#define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1359

1360

1361 1362 1363
#define TASK_PFA_TEST(name, func)					\
	static inline bool task_##func(struct task_struct *p)		\
	{ return test_bit(PFA_##name, &p->atomic_flags); }
1364

1365 1366 1367
#define TASK_PFA_SET(name, func)					\
	static inline void task_set_##func(struct task_struct *p)	\
	{ set_bit(PFA_##name, &p->atomic_flags); }
1368

1369 1370 1371 1372 1373 1374
#define TASK_PFA_CLEAR(name, func)					\
	static inline void task_clear_##func(struct task_struct *p)	\
	{ clear_bit(PFA_##name, &p->atomic_flags); }

TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1375

1376 1377 1378 1379 1380 1381 1382
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)

TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1383

1384
static inline void
1385
current_restore_flags(unsigned long orig_flags, unsigned long flags)
1386
{
1387 1388
	current->flags &= ~flags;
	current->flags |= orig_flags & flags;
1389 1390
}

1391 1392
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
L
Linus Torvalds 已提交
1393
#ifdef CONFIG_SMP
1394 1395
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
L
Linus Torvalds 已提交
1396
#else
1397
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1398 1399
{
}
1400
static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
L
Linus Torvalds 已提交
1401
{
1402
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
1403 1404 1405 1406
		return -EINVAL;
	return 0;
}
#endif
1407

1408 1409 1410 1411
#ifndef cpu_relax_yield
#define cpu_relax_yield() cpu_relax()
#endif

1412
extern int yield_to(struct task_struct *p, bool preempt);
1413 1414
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
1415

1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
/**
 * task_nice - return the nice value of a given task.
 * @p: the task in question.
 *
 * Return: The nice value [ -20 ... 0 ... 19 ].
 */
static inline int task_nice(const struct task_struct *p)
{
	return PRIO_TO_NICE((p)->static_prio);
}
1426

1427 1428
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
1429
extern int idle_cpu(int cpu);
1430 1431 1432
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1433
extern struct task_struct *idle_task(int cpu);
1434

1435 1436
/**
 * is_idle_task - is the specified task an idle task?
1437
 * @p: the task in question.
1438 1439
 *
 * Return: 1 if @p is an idle task. 0 otherwise.
1440
 */
1441
static inline bool is_idle_task(const struct task_struct *p)
1442
{
1443
	return !!(p->flags & PF_IDLE);
1444
}
1445

1446
extern struct task_struct *curr_task(int cpu);
1447
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
1448 1449 1450 1451

void yield(void);

union thread_union {
1452
#ifndef CONFIG_THREAD_INFO_IN_TASK
L
Linus Torvalds 已提交
1453
	struct thread_info thread_info;
1454
#endif
L
Linus Torvalds 已提交
1455 1456 1457
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

1458 1459 1460 1461 1462 1463 1464 1465 1466
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
	return &task->thread_info;
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
# define task_thread_info(task)	((struct thread_info *)(task)->stack)
#endif

1467 1468 1469 1470 1471
/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
1472 1473
 * find_task_by_vpid():
 *      finds a task by its virtual pid
1474
 *
1475
 * see also find_vpid() etc in include/linux/pid.h
1476 1477
 */

1478
extern struct task_struct *find_task_by_vpid(pid_t nr);
1479
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1480

1481 1482
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
1483
extern void wake_up_new_task(struct task_struct *tsk);
1484

L
Linus Torvalds 已提交
1485
#ifdef CONFIG_SMP
1486
extern void kick_process(struct task_struct *tsk);
L
Linus Torvalds 已提交
1487
#else
1488
static inline void kick_process(struct task_struct *tsk) { }
L
Linus Torvalds 已提交
1489 1490
#endif

1491
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1492

1493 1494 1495 1496
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
	__set_task_comm(tsk, from, false);
}
1497

1498
extern char *get_task_comm(char *to, struct task_struct *tsk);
L
Linus Torvalds 已提交
1499 1500

#ifdef CONFIG_SMP
1501
void scheduler_ipi(void);
R
Roland McGrath 已提交
1502
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
1503
#else
1504
static inline void scheduler_ipi(void) { }
1505
static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
R
Roland McGrath 已提交
1506 1507 1508
{
	return 1;
}
L
Linus Torvalds 已提交
1509 1510
#endif

1511 1512 1513
/*
 * Set thread flags in other task's structures.
 * See asm/thread_info.h for TIF_xxxx flags available:
L
Linus Torvalds 已提交
1514 1515 1516
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1517
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1518 1519 1520 1521
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1522
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1523 1524 1525 1526
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1527
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1528 1529 1530 1531
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1532
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1533 1534 1535 1536
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1537
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

1550 1551 1552 1553 1554
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

L
Linus Torvalds 已提交
1555 1556 1557 1558 1559 1560 1561
/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
1562
#ifndef CONFIG_PREEMPT
1563
extern int _cond_resched(void);
1564 1565 1566
#else
static inline int _cond_resched(void) { return 0; }
#endif
1567

1568
#define cond_resched() ({			\
1569
	___might_sleep(__FILE__, __LINE__, 0);	\
1570 1571
	_cond_resched();			\
})
1572

1573 1574 1575
extern int __cond_resched_lock(spinlock_t *lock);

#define cond_resched_lock(lock) ({				\
1576
	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1577 1578 1579 1580 1581
	__cond_resched_lock(lock);				\
})

extern int __cond_resched_softirq(void);

1582
#define cond_resched_softirq() ({					\
1583
	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
1584
	__cond_resched_softirq();					\
1585
})
L
Linus Torvalds 已提交
1586

1587 1588 1589 1590 1591 1592 1593 1594 1595
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
	rcu_read_unlock();
	cond_resched();
	rcu_read_lock();
#endif
}

L
Linus Torvalds 已提交
1596 1597
/*
 * Does a critical section need to be broken due to another
N
Nick Piggin 已提交
1598 1599
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * but a general need for low latency)
L
Linus Torvalds 已提交
1600
 */
N
Nick Piggin 已提交
1601
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
1602
{
N
Nick Piggin 已提交
1603 1604 1605
#ifdef CONFIG_PREEMPT
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
1606
	return 0;
N
Nick Piggin 已提交
1607
#endif
L
Linus Torvalds 已提交
1608 1609
}

1610 1611 1612 1613 1614
static __always_inline bool need_resched(void)
{
	return unlikely(tif_need_resched());
}

L
Linus Torvalds 已提交
1615 1616 1617 1618 1619 1620 1621
/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
1622 1623 1624
#ifdef CONFIG_THREAD_INFO_IN_TASK
	return p->cpu;
#else
A
Al Viro 已提交
1625
	return task_thread_info(p)->cpu;
1626
#endif
L
Linus Torvalds 已提交
1627 1628
}

I
Ingo Molnar 已提交
1629
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
/*
 * In order to reduce various lock holder preemption latencies provide an
 * interface to see if a vCPU is currently running or not.
 *
 * This allows us to terminate optimistic spin loops and block, analogous to
 * the native optimistic spin heuristic of testing if the lock owner task is
 * running or not.
 */
#ifndef vcpu_is_preempted
# define vcpu_is_preempted(cpu)	false
#endif

1656 1657
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1658

D
Dave Hansen 已提交
1659 1660 1661 1662
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

L
Linus Torvalds 已提交
1663
#endif