sched.h 45.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4 5 6 7
/*
 * Define 'struct task_struct' and provide the main scheduler
 * APIs (schedule(), wakeup variants, etc.)
 */
8

9
#include <uapi/linux/sched.h>
10

11
#include <asm/current.h>
L
Linus Torvalds 已提交
12

13
#include <linux/pid.h>
L
Linus Torvalds 已提交
14
#include <linux/sem.h>
15
#include <linux/shm.h>
16 17 18 19
#include <linux/kcov.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
L
Linus Torvalds 已提交
20
#include <linux/seccomp.h>
21
#include <linux/nodemask.h>
22
#include <linux/rcupdate.h>
23
#include <linux/resource.h>
A
Arjan van de Ven 已提交
24
#include <linux/latencytop.h>
25 26 27 28
#include <linux/sched/prio.h>
#include <linux/signal_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
29

30
/* task_struct member predeclarations (sorted alphabetically): */
31 32
struct audit_context;
struct backing_dev_info;
33
struct bio_list;
34
struct blk_plug;
35 36 37 38 39
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct mempolicy;
40
struct nameidata;
41 42 43 44 45 46 47 48 49
struct nsproxy;
struct perf_event_context;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
struct sched_attr;
struct sched_param;
I
Ingo Molnar 已提交
50
struct seq_file;
51 52 53
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
54
struct task_group;
L
Linus Torvalds 已提交
55

56 57 58 59 60 61 62 63 64 65
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
66 67

/* Used in tsk->state: */
68 69 70 71 72
#define TASK_RUNNING			0x0000
#define TASK_INTERRUPTIBLE		0x0001
#define TASK_UNINTERRUPTIBLE		0x0002
#define __TASK_STOPPED			0x0004
#define __TASK_TRACED			0x0008
73
/* Used in tsk->exit_state: */
74 75
#define EXIT_DEAD			0x0010
#define EXIT_ZOMBIE			0x0020
76 77
#define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
/* Used in tsk->state again: */
78 79 80 81
#define TASK_PARKED			0x0040
#define TASK_DEAD			0x0080
#define TASK_WAKEKILL			0x0100
#define TASK_WAKING			0x0200
82 83 84
#define TASK_NOLOAD			0x0400
#define TASK_NEW			0x0800
#define TASK_STATE_MAX			0x1000
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99

/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)

#define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)

/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL			(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)

/* get_task_state(): */
#define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
100 101
					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
					 TASK_PARKED)
102 103 104 105 106 107 108 109 110 111

#define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)

#define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)

#define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)

#define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
					 (task->flags & PF_FROZEN) == 0 && \
					 (task->state & TASK_NOLOAD) == 0)
L
Linus Torvalds 已提交
112

P
Peter Zijlstra 已提交
113 114 115 116 117 118 119 120 121 122
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP

#define __set_current_state(state_value)			\
	do {							\
		current->task_state_change = _THIS_IP_;		\
		current->state = (state_value);			\
	} while (0)
#define set_current_state(state_value)				\
	do {							\
		current->task_state_change = _THIS_IP_;		\
123
		smp_store_mb(current->state, (state_value));	\
P
Peter Zijlstra 已提交
124 125 126
	} while (0)

#else
127 128 129 130 131
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
132
 *   for (;;) {
133
 *	set_current_state(TASK_UNINTERRUPTIBLE);
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
 *	if (!need_sleep)
 *		break;
 *
 *	schedule();
 *   }
 *   __set_current_state(TASK_RUNNING);
 *
 * If the caller does not need such serialisation (because, for instance, the
 * condition test and condition change and wakeup are under the same lock) then
 * use __set_current_state().
 *
 * The above is typically ordered against the wakeup, which does:
 *
 *	need_sleep = false;
 *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
 *
 * Where wake_up_state() (and all other wakeup primitives) imply enough
 * barriers to order the store of the variable against wakeup.
 *
 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
156
 *
157
 * This is obviously fine, since they both store the exact same value.
158
 *
159
 * Also see the comments of try_to_wake_up().
160
 */
161 162
#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
#define set_current_state(state_value)	 smp_store_mb(current->state, (state_value))
P
Peter Zijlstra 已提交
163 164
#endif

165 166
/* Task command name length: */
#define TASK_COMM_LEN			16
L
Linus Torvalds 已提交
167 168 169

extern void scheduler_tick(void);

170 171 172 173 174 175 176
#define	MAX_SCHEDULE_TIMEOUT		LONG_MAX

extern long schedule_timeout(long timeout);
extern long schedule_timeout_interruptible(long timeout);
extern long schedule_timeout_killable(long timeout);
extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
L
Linus Torvalds 已提交
177
asmlinkage void schedule(void);
178
extern void schedule_preempt_disabled(void);
L
Linus Torvalds 已提交
179

180 181
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
182
extern long io_schedule_timeout(long timeout);
183
extern void io_schedule(void);
184

185
/**
186
 * struct prev_cputime - snapshot of system and user cputime
187 188
 * @utime: time spent in user mode
 * @stime: time spent in system mode
189
 * @lock: protects the above two fields
190
 *
191 192
 * Stores previous user/system time values such that we can guarantee
 * monotonicity.
193
 */
194 195
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
196 197 198
	u64				utime;
	u64				stime;
	raw_spinlock_t			lock;
199
#endif
200 201
};

202 203
/**
 * struct task_cputime - collected CPU time counts
204 205
 * @utime:		time spent in user mode, in nanoseconds
 * @stime:		time spent in kernel mode, in nanoseconds
206
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
207
 *
208 209 210
 * This structure groups together three kinds of CPU time that are tracked for
 * threads and thread groups.  Most things considering CPU time want to group
 * these counts together and treat all three of them in parallel.
211 212
 */
struct task_cputime {
213 214 215
	u64				utime;
	u64				stime;
	unsigned long long		sum_exec_runtime;
216
};
217

218 219 220 221
/* Alternate field names when used on cache expirations: */
#define virt_exp			utime
#define prof_exp			stime
#define sched_exp			sum_exec_runtime
222

223 224 225 226 227 228 229 230 231 232 233 234 235
enum vtime_state {
	/* Task is sleeping or running in a CPU with VTIME inactive: */
	VTIME_INACTIVE = 0,
	/* Task runs in userspace in a CPU with VTIME active: */
	VTIME_USER,
	/* Task runs in kernelspace in a CPU with VTIME active: */
	VTIME_SYS,
};

struct vtime {
	seqcount_t		seqcount;
	unsigned long long	starttime;
	enum vtime_state	state;
236 237 238
	u64			utime;
	u64			stime;
	u64			gtime;
239 240
};

L
Linus Torvalds 已提交
241
struct sched_info {
242
#ifdef CONFIG_SCHED_INFO
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
	/* Cumulative counters: */

	/* # of times we have run on this CPU: */
	unsigned long			pcount;

	/* Time spent waiting on a runqueue: */
	unsigned long long		run_delay;

	/* Timestamps: */

	/* When did we last run on a CPU? */
	unsigned long long		last_arrival;

	/* When were we last queued to run? */
	unsigned long long		last_queued;
L
Linus Torvalds 已提交
258

259
#endif /* CONFIG_SCHED_INFO */
260
};
L
Linus Torvalds 已提交
261

262 263 264 265 266 267 268
/*
 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 * has a few: load, load_avg, util_avg, freq, and capacity.
 *
 * We define a basic fixed point arithmetic range, and then formalize
 * all these metrics based on that basic range.
 */
269 270
# define SCHED_FIXEDPOINT_SHIFT		10
# define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
271

I
Ingo Molnar 已提交
272
struct load_weight {
273 274
	unsigned long			weight;
	u32				inv_weight;
I
Ingo Molnar 已提交
275 276
};

277
/*
278 279 280 281 282 283 284 285 286
 * The load_avg/util_avg accumulates an infinite geometric series
 * (see __update_load_avg() in kernel/sched/fair.c).
 *
 * [load_avg definition]
 *
 *   load_avg = runnable% * scale_load_down(load)
 *
 * where runnable% is the time ratio that a sched_entity is runnable.
 * For cfs_rq, it is the aggregated load_avg of all runnable and
287
 * blocked sched_entities.
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
 *
 * load_avg may also take frequency scaling into account:
 *
 *   load_avg = runnable% * scale_load_down(load) * freq%
 *
 * where freq% is the CPU frequency normalized to the highest frequency.
 *
 * [util_avg definition]
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE
 *
 * where running% is the time ratio that a sched_entity is running on
 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
 * and blocked sched_entities.
 *
 * util_avg may also factor frequency scaling and CPU capacity scaling:
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
 *
 * where freq% is the same as above, and capacity% is the CPU capacity
 * normalized to the greatest capacity (due to uarch differences, etc).
 *
 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
 * we therefore scale them to as large a range as necessary. This is for
 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
 *
 * [Overflow issue]
 *
 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
 * with the highest load (=88761), always runnable on a single cfs_rq,
 * and should not overflow as the number already hits PID_MAX_LIMIT.
 *
 * For all other cases (including 32-bit kernels), struct load_weight's
 * weight will overflow first before we do, because:
 *
 *    Max(load_avg) <= Max(load.weight)
 *
 * Then it is the load_weight's responsibility to consider overflow
 * issues.
328
 */
329
struct sched_avg {
330 331
	u64				last_update_time;
	u64				load_sum;
332
	u64				runnable_load_sum;
333 334 335
	u32				util_sum;
	u32				period_contrib;
	unsigned long			load_avg;
336
	unsigned long			runnable_load_avg;
337
	unsigned long			util_avg;
338 339
};

340
struct sched_statistics {
341
#ifdef CONFIG_SCHEDSTATS
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
	u64				wait_start;
	u64				wait_max;
	u64				wait_count;
	u64				wait_sum;
	u64				iowait_count;
	u64				iowait_sum;

	u64				sleep_start;
	u64				sleep_max;
	s64				sum_sleep_runtime;

	u64				block_start;
	u64				block_max;
	u64				exec_max;
	u64				slice_max;

	u64				nr_migrations_cold;
	u64				nr_failed_migrations_affine;
	u64				nr_failed_migrations_running;
	u64				nr_failed_migrations_hot;
	u64				nr_forced_migrations;

	u64				nr_wakeups;
	u64				nr_wakeups_sync;
	u64				nr_wakeups_migrate;
	u64				nr_wakeups_local;
	u64				nr_wakeups_remote;
	u64				nr_wakeups_affine;
	u64				nr_wakeups_affine_attempts;
	u64				nr_wakeups_passive;
	u64				nr_wakeups_idle;
373
#endif
374
};
375 376

struct sched_entity {
377 378
	/* For load-balancing: */
	struct load_weight		load;
379
	unsigned long			runnable_weight;
380 381 382
	struct rb_node			run_node;
	struct list_head		group_node;
	unsigned int			on_rq;
383

384 385 386 387
	u64				exec_start;
	u64				sum_exec_runtime;
	u64				vruntime;
	u64				prev_sum_exec_runtime;
388

389
	u64				nr_migrations;
390

391
	struct sched_statistics		statistics;
392

I
Ingo Molnar 已提交
393
#ifdef CONFIG_FAIR_GROUP_SCHED
394 395
	int				depth;
	struct sched_entity		*parent;
I
Ingo Molnar 已提交
396
	/* rq on which this entity is (to be) queued: */
397
	struct cfs_rq			*cfs_rq;
I
Ingo Molnar 已提交
398
	/* rq "owned" by this entity/group: */
399
	struct cfs_rq			*my_q;
I
Ingo Molnar 已提交
400
#endif
401

402
#ifdef CONFIG_SMP
403 404 405 406 407 408
	/*
	 * Per entity load average tracking.
	 *
	 * Put into separate cache line so it does not
	 * collide with read-mostly values above.
	 */
409
	struct sched_avg		avg ____cacheline_aligned_in_smp;
410
#endif
I
Ingo Molnar 已提交
411
};
412

P
Peter Zijlstra 已提交
413
struct sched_rt_entity {
414 415 416 417 418 419 420 421
	struct list_head		run_list;
	unsigned long			timeout;
	unsigned long			watchdog_stamp;
	unsigned int			time_slice;
	unsigned short			on_rq;
	unsigned short			on_list;

	struct sched_rt_entity		*back;
422
#ifdef CONFIG_RT_GROUP_SCHED
423
	struct sched_rt_entity		*parent;
P
Peter Zijlstra 已提交
424
	/* rq on which this entity is (to be) queued: */
425
	struct rt_rq			*rt_rq;
P
Peter Zijlstra 已提交
426
	/* rq "owned" by this entity/group: */
427
	struct rt_rq			*my_q;
P
Peter Zijlstra 已提交
428
#endif
429
} __randomize_layout;
P
Peter Zijlstra 已提交
430

431
struct sched_dl_entity {
432
	struct rb_node			rb_node;
433 434 435

	/*
	 * Original scheduling parameters. Copied here from sched_attr
436 437
	 * during sched_setattr(), they will remain the same until
	 * the next sched_setattr().
438
	 */
439 440 441
	u64				dl_runtime;	/* Maximum runtime for each instance	*/
	u64				dl_deadline;	/* Relative deadline of each instance	*/
	u64				dl_period;	/* Separation of two instances (period) */
442
	u64				dl_bw;		/* dl_runtime / dl_period		*/
443
	u64				dl_density;	/* dl_runtime / dl_deadline		*/
444 445 446 447 448 449

	/*
	 * Actual scheduling parameters. Initialized with the values above,
	 * they are continously updated during task execution. Note that
	 * the remaining runtime could be < 0 in case we are in overrun.
	 */
450 451 452
	s64				runtime;	/* Remaining runtime for this instance	*/
	u64				deadline;	/* Absolute deadline for this instance	*/
	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
453 454 455 456 457 458 459 460

	/*
	 * Some bool flags:
	 *
	 * @dl_throttled tells if we exhausted the runtime. If so, the
	 * task has to wait for a replenishment to be performed at the
	 * next firing of dl_timer.
	 *
461 462
	 * @dl_boosted tells if we are boosted due to DI. If so we are
	 * outside bandwidth enforcement mechanism (but only until we
463 464
	 * exit the critical section);
	 *
465
	 * @dl_yielded tells if task gave up the CPU before consuming
466
	 * all its available runtime during the last job.
467 468 469 470 471 472 473
	 *
	 * @dl_non_contending tells if the task is inactive while still
	 * contributing to the active utilization. In other words, it
	 * indicates if the inactive timer has been armed and its handler
	 * has not been executed yet. This flag is useful to avoid race
	 * conditions between the inactive timer handler and the wakeup
	 * code.
474
	 */
475 476 477 478
	int				dl_throttled      : 1;
	int				dl_boosted        : 1;
	int				dl_yielded        : 1;
	int				dl_non_contending : 1;
479 480 481 482 483

	/*
	 * Bandwidth enforcement timer. Each -deadline task has its
	 * own bandwidth to be enforced, thus we need one timer per task.
	 */
484
	struct hrtimer			dl_timer;
485 486 487 488 489 490 491 492 493

	/*
	 * Inactive timer, responsible for decreasing the active utilization
	 * at the "0-lag time". When a -deadline task blocks, it contributes
	 * to GRUB's active utilization until the "0-lag time", hence a
	 * timer is needed to decrease the active utilization at the correct
	 * time.
	 */
	struct hrtimer inactive_timer;
494
};
495

496 497
union rcu_special {
	struct {
498 499 500 501 502 503
		u8			blocked;
		u8			need_qs;
		u8			exp_need_qs;

		/* Otherwise the compiler can store garbage here: */
		u8			pad;
504 505
	} b; /* Bits. */
	u32 s; /* Set of bits. */
506
};
507

P
Peter Zijlstra 已提交
508 509 510
enum perf_event_task_context {
	perf_invalid_context = -1,
	perf_hw_context = 0,
511
	perf_sw_context,
P
Peter Zijlstra 已提交
512 513 514
	perf_nr_task_contexts,
};

515 516 517 518
struct wake_q_node {
	struct wake_q_node *next;
};

L
Linus Torvalds 已提交
519
struct task_struct {
520 521 522 523 524
#ifdef CONFIG_THREAD_INFO_IN_TASK
	/*
	 * For reasons of header soup (see current_thread_info()), this
	 * must be the first element of task_struct.
	 */
525
	struct thread_info		thread_info;
526
#endif
527 528
	/* -1 unrunnable, 0 runnable, >0 stopped: */
	volatile long			state;
K
Kees Cook 已提交
529 530 531 532 533 534 535

	/*
	 * This begins the randomizable portion of task_struct. Only
	 * scheduling-critical items should be added above here.
	 */
	randomized_struct_fields_start

536 537 538 539 540
	void				*stack;
	atomic_t			usage;
	/* Per task flags (PF_*), defined further below: */
	unsigned int			flags;
	unsigned int			ptrace;
L
Linus Torvalds 已提交
541

542
#ifdef CONFIG_SMP
543 544
	struct llist_node		wake_entry;
	int				on_cpu;
545
#ifdef CONFIG_THREAD_INFO_IN_TASK
546 547
	/* Current CPU: */
	unsigned int			cpu;
548
#endif
549 550 551
	unsigned int			wakee_flips;
	unsigned long			wakee_flip_decay_ts;
	struct task_struct		*last_wakee;
552

553
	int				wake_cpu;
554
#endif
555 556 557 558 559 560
	int				on_rq;

	int				prio;
	int				static_prio;
	int				normal_prio;
	unsigned int			rt_priority;
561

562 563 564
	const struct sched_class	*sched_class;
	struct sched_entity		se;
	struct sched_rt_entity		rt;
P
Peter Zijlstra 已提交
565
#ifdef CONFIG_CGROUP_SCHED
566
	struct task_group		*sched_task_group;
P
Peter Zijlstra 已提交
567
#endif
568
	struct sched_dl_entity		dl;
L
Linus Torvalds 已提交
569

570
#ifdef CONFIG_PREEMPT_NOTIFIERS
571 572
	/* List of struct preempt_notifier: */
	struct hlist_head		preempt_notifiers;
573 574
#endif

575
#ifdef CONFIG_BLK_DEV_IO_TRACE
576
	unsigned int			btrace_seq;
577
#endif
L
Linus Torvalds 已提交
578

579 580 581
	unsigned int			policy;
	int				nr_cpus_allowed;
	cpumask_t			cpus_allowed;
L
Linus Torvalds 已提交
582

P
Paul E. McKenney 已提交
583
#ifdef CONFIG_PREEMPT_RCU
584 585 586 587
	int				rcu_read_lock_nesting;
	union rcu_special		rcu_read_unlock_special;
	struct list_head		rcu_node_entry;
	struct rcu_node			*rcu_blocked_node;
588
#endif /* #ifdef CONFIG_PREEMPT_RCU */
589

P
Paul E. McKenney 已提交
590
#ifdef CONFIG_TASKS_RCU
591
	unsigned long			rcu_tasks_nvcsw;
592 593
	u8				rcu_tasks_holdout;
	u8				rcu_tasks_idx;
594
	int				rcu_tasks_idle_cpu;
595
	struct list_head		rcu_tasks_holdout_list;
P
Paul E. McKenney 已提交
596
#endif /* #ifdef CONFIG_TASKS_RCU */
P
Paul E. McKenney 已提交
597

598
	struct sched_info		sched_info;
L
Linus Torvalds 已提交
599

600
	struct list_head		tasks;
601
#ifdef CONFIG_SMP
602 603
	struct plist_node		pushable_tasks;
	struct rb_node			pushable_dl_tasks;
604
#endif
L
Linus Torvalds 已提交
605

606 607
	struct mm_struct		*mm;
	struct mm_struct		*active_mm;
608 609

	/* Per-thread vma caching: */
610
	struct vmacache			vmacache;
611

612 613
#ifdef SPLIT_RSS_COUNTING
	struct task_rss_stat		rss_stat;
614
#endif
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
	int				exit_state;
	int				exit_code;
	int				exit_signal;
	/* The signal sent when the parent dies: */
	int				pdeath_signal;
	/* JOBCTL_*, siglock protected: */
	unsigned long			jobctl;

	/* Used for emulating ABI behavior of previous Linux versions: */
	unsigned int			personality;

	/* Scheduler bits, serialized by scheduler locks: */
	unsigned			sched_reset_on_fork:1;
	unsigned			sched_contributes_to_load:1;
	unsigned			sched_migrated:1;
	unsigned			sched_remote_wakeup:1;
	/* Force alignment to the next boundary: */
	unsigned			:0;

	/* Unserialized, strictly 'current' */

	/* Bit to tell LSMs we're in execve(): */
	unsigned			in_execve:1;
	unsigned			in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
	unsigned			restore_sigmask:1;
641
#endif
T
Tejun Heo 已提交
642
#ifdef CONFIG_MEMCG
643
	unsigned			memcg_may_oom:1;
644
#ifndef CONFIG_SLOB
645
	unsigned			memcg_kmem_skip_account:1;
646
#endif
647
#endif
648
#ifdef CONFIG_COMPAT_BRK
649
	unsigned			brk_randomized:1;
650
#endif
651 652 653 654
#ifdef CONFIG_CGROUPS
	/* disallow userland-initiated cgroup migration */
	unsigned			no_cgroup_migration:1;
#endif
655

656
	unsigned long			atomic_flags; /* Flags requiring atomic access. */
657

658
	struct restart_block		restart_block;
659

660 661
	pid_t				pid;
	pid_t				tgid;
662

663
#ifdef CONFIG_CC_STACKPROTECTOR
664 665
	/* Canary value for the -fstack-protector GCC feature: */
	unsigned long			stack_canary;
666
#endif
667
	/*
668
	 * Pointers to the (original) parent process, youngest child, younger sibling,
669
	 * older sibling, respectively.  (p->father can be replaced with
R
Roland McGrath 已提交
670
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
671
	 */
672 673 674 675 676 677 678

	/* Real parent process: */
	struct task_struct __rcu	*real_parent;

	/* Recipient of SIGCHLD, wait4() reports: */
	struct task_struct __rcu	*parent;

L
Linus Torvalds 已提交
679
	/*
680
	 * Children/sibling form the list of natural children:
L
Linus Torvalds 已提交
681
	 */
682 683 684
	struct list_head		children;
	struct list_head		sibling;
	struct task_struct		*group_leader;
L
Linus Torvalds 已提交
685

R
Roland McGrath 已提交
686
	/*
687 688
	 * 'ptraced' is the list of tasks this task is using ptrace() on.
	 *
R
Roland McGrath 已提交
689
	 * This includes both natural children and PTRACE_ATTACH targets.
690
	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
R
Roland McGrath 已提交
691
	 */
692 693
	struct list_head		ptraced;
	struct list_head		ptrace_entry;
R
Roland McGrath 已提交
694

L
Linus Torvalds 已提交
695
	/* PID/PID hash table linkage. */
696 697 698 699 700
	struct pid_link			pids[PIDTYPE_MAX];
	struct list_head		thread_group;
	struct list_head		thread_node;

	struct completion		*vfork_done;
L
Linus Torvalds 已提交
701

702 703
	/* CLONE_CHILD_SETTID: */
	int __user			*set_child_tid;
L
Linus Torvalds 已提交
704

705 706 707 708 709
	/* CLONE_CHILD_CLEARTID: */
	int __user			*clear_child_tid;

	u64				utime;
	u64				stime;
710
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
711 712
	u64				utimescaled;
	u64				stimescaled;
713
#endif
714 715
	u64				gtime;
	struct prev_cputime		prev_cputime;
716
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
717
	struct vtime			vtime;
718
#endif
719 720

#ifdef CONFIG_NO_HZ_FULL
721
	atomic_t			tick_dep_mask;
722
#endif
723 724 725 726 727 728 729 730 731 732 733 734 735
	/* Context switch counts: */
	unsigned long			nvcsw;
	unsigned long			nivcsw;

	/* Monotonic time in nsecs: */
	u64				start_time;

	/* Boot based time in nsecs: */
	u64				real_start_time;

	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
	unsigned long			min_flt;
	unsigned long			maj_flt;
L
Linus Torvalds 已提交
736

737
#ifdef CONFIG_POSIX_TIMERS
738 739
	struct task_cputime		cputime_expires;
	struct list_head		cpu_timers[3];
740
#endif
L
Linus Torvalds 已提交
741

742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
	/* Process credentials: */

	/* Tracer's credentials at attach: */
	const struct cred __rcu		*ptracer_cred;

	/* Objective and real subjective task credentials (COW): */
	const struct cred __rcu		*real_cred;

	/* Effective (overridable) subjective task credentials (COW): */
	const struct cred __rcu		*cred;

	/*
	 * executable name, excluding path.
	 *
	 * - normally initialized setup_new_exec()
	 * - access it with [gs]et_task_comm()
	 * - lock it with task_lock()
	 */
	char				comm[TASK_COMM_LEN];

	struct nameidata		*nameidata;

764
#ifdef CONFIG_SYSVIPC
765 766
	struct sysv_sem			sysvsem;
	struct sysv_shm			sysvshm;
767
#endif
768
#ifdef CONFIG_DETECT_HUNG_TASK
769
	unsigned long			last_switch_count;
770
#endif
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
	/* Filesystem information: */
	struct fs_struct		*fs;

	/* Open file information: */
	struct files_struct		*files;

	/* Namespaces: */
	struct nsproxy			*nsproxy;

	/* Signal handlers: */
	struct signal_struct		*signal;
	struct sighand_struct		*sighand;
	sigset_t			blocked;
	sigset_t			real_blocked;
	/* Restored if set_restore_sigmask() was used: */
	sigset_t			saved_sigmask;
	struct sigpending		pending;
	unsigned long			sas_ss_sp;
	size_t				sas_ss_size;
	unsigned int			sas_ss_flags;

	struct callback_head		*task_works;

	struct audit_context		*audit_context;
A
Al Viro 已提交
795
#ifdef CONFIG_AUDITSYSCALL
796 797
	kuid_t				loginuid;
	unsigned int			sessionid;
A
Al Viro 已提交
798
#endif
799 800 801 802 803
	struct seccomp			seccomp;

	/* Thread group tracking: */
	u32				parent_exec_id;
	u32				self_exec_id;
L
Linus Torvalds 已提交
804

805 806
	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
	spinlock_t			alloc_lock;
L
Linus Torvalds 已提交
807

808
	/* Protection of the PI data structures: */
809
	raw_spinlock_t			pi_lock;
810

811
	struct wake_q_node		wake_q;
812

I
Ingo Molnar 已提交
813
#ifdef CONFIG_RT_MUTEXES
814
	/* PI waiters blocked on a rt_mutex held by this task: */
815
	struct rb_root_cached		pi_waiters;
816 817
	/* Updated under owner's pi_lock and rq lock */
	struct task_struct		*pi_top_task;
818 819
	/* Deadlock detection and priority inheritance handling: */
	struct rt_mutex_waiter		*pi_blocked_on;
I
Ingo Molnar 已提交
820 821
#endif

822
#ifdef CONFIG_DEBUG_MUTEXES
823 824
	/* Mutex deadlock detection: */
	struct mutex_waiter		*blocked_on;
825
#endif
826

827
#ifdef CONFIG_TRACE_IRQFLAGS
828 829 830 831 832 833 834 835 836 837 838 839 840
	unsigned int			irq_events;
	unsigned long			hardirq_enable_ip;
	unsigned long			hardirq_disable_ip;
	unsigned int			hardirq_enable_event;
	unsigned int			hardirq_disable_event;
	int				hardirqs_enabled;
	int				hardirq_context;
	unsigned long			softirq_disable_ip;
	unsigned long			softirq_enable_ip;
	unsigned int			softirq_disable_event;
	unsigned int			softirq_enable_event;
	int				softirqs_enabled;
	int				softirq_context;
841
#endif
842

I
Ingo Molnar 已提交
843
#ifdef CONFIG_LOCKDEP
844 845 846 847 848
# define MAX_LOCK_DEPTH			48UL
	u64				curr_chain_key;
	int				lockdep_depth;
	unsigned int			lockdep_recursion;
	struct held_lock		held_locks[MAX_LOCK_DEPTH];
I
Ingo Molnar 已提交
849
#endif
850

851 852 853 854 855 856
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCKS_NR 64UL
	struct hist_lock *xhlocks; /* Crossrelease history locks */
	unsigned int xhlock_idx;
	/* For restoring at history boundaries */
	unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
857 858 859
	unsigned int hist_id;
	/* For overwrite check at each context exit */
	unsigned int hist_id_save[XHLOCK_CTX_NR];
I
Ingo Molnar 已提交
860
#endif
861

862
#ifdef CONFIG_UBSAN
863
	unsigned int			in_ubsan;
864
#endif
865

866 867
	/* Journalling filesystem info: */
	void				*journal_info;
L
Linus Torvalds 已提交
868

869 870
	/* Stacked block device info: */
	struct bio_list			*bio_list;
871

872
#ifdef CONFIG_BLOCK
873 874
	/* Stack plugging: */
	struct blk_plug			*plug;
875 876
#endif

877 878 879 880
	/* VM state: */
	struct reclaim_state		*reclaim_state;

	struct backing_dev_info		*backing_dev_info;
L
Linus Torvalds 已提交
881

882
	struct io_context		*io_context;
L
Linus Torvalds 已提交
883

884 885 886
	/* Ptrace state: */
	unsigned long			ptrace_message;
	siginfo_t			*last_siginfo;
L
Linus Torvalds 已提交
887

888 889 890 891 892 893 894 895
	struct task_io_accounting	ioac;
#ifdef CONFIG_TASK_XACCT
	/* Accumulated RSS usage: */
	u64				acct_rss_mem1;
	/* Accumulated virtual memory usage: */
	u64				acct_vm_mem1;
	/* stime + utime since last update: */
	u64				acct_timexpd;
L
Linus Torvalds 已提交
896 897
#endif
#ifdef CONFIG_CPUSETS
898 899 900 901 902 903
	/* Protected by ->alloc_lock: */
	nodemask_t			mems_allowed;
	/* Seqence number to catch updates: */
	seqcount_t			mems_allowed_seq;
	int				cpuset_mem_spread_rotor;
	int				cpuset_slab_spread_rotor;
L
Linus Torvalds 已提交
904
#endif
905
#ifdef CONFIG_CGROUPS
906 907 908 909
	/* Control Group info protected by css_set_lock: */
	struct css_set __rcu		*cgroups;
	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
	struct list_head		cg_list;
910
#endif
911
#ifdef CONFIG_INTEL_RDT
912
	u32				closid;
913
	u32				rmid;
F
Fenghua Yu 已提交
914
#endif
915
#ifdef CONFIG_FUTEX
916
	struct robust_list_head __user	*robust_list;
917 918 919
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
920 921
	struct list_head		pi_state_list;
	struct futex_pi_state		*pi_state_cache;
922
#endif
923
#ifdef CONFIG_PERF_EVENTS
924 925 926
	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
	struct mutex			perf_event_mutex;
	struct list_head		perf_event_list;
927
#endif
928
#ifdef CONFIG_DEBUG_PREEMPT
929
	unsigned long			preempt_disable_ip;
930
#endif
931
#ifdef CONFIG_NUMA
932 933
	/* Protected by alloc_lock: */
	struct mempolicy		*mempolicy;
934
	short				il_prev;
935
	short				pref_node_fork;
936
#endif
937
#ifdef CONFIG_NUMA_BALANCING
938 939 940 941 942 943 944 945 946 947 948 949 950
	int				numa_scan_seq;
	unsigned int			numa_scan_period;
	unsigned int			numa_scan_period_max;
	int				numa_preferred_nid;
	unsigned long			numa_migrate_retry;
	/* Migration stamp: */
	u64				node_stamp;
	u64				last_task_numa_placement;
	u64				last_sum_exec_runtime;
	struct callback_head		numa_work;

	struct list_head		numa_entry;
	struct numa_group		*numa_group;
951

952
	/*
953 954 955 956 957 958 959 960 961 962 963 964
	 * numa_faults is an array split into four regions:
	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
	 * in this precise order.
	 *
	 * faults_memory: Exponential decaying average of faults on a per-node
	 * basis. Scheduling placement decisions are made based on these
	 * counts. The values remain static for the duration of a PTE scan.
	 * faults_cpu: Track the nodes the process was running on when a NUMA
	 * hinting fault was incurred.
	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
	 * during the current scan window. When the scan completes, the counts
	 * in faults_memory and faults_cpu decay and these values are copied.
965
	 */
966 967
	unsigned long			*numa_faults;
	unsigned long			total_numa_faults;
968

969 970
	/*
	 * numa_faults_locality tracks if faults recorded during the last
971 972 973
	 * scan window were remote/local or failed to migrate. The task scan
	 * period is adapted based on the locality of the faults with different
	 * weights depending on whether they were shared or private faults
974
	 */
975
	unsigned long			numa_faults_locality[3];
976

977
	unsigned long			numa_pages_migrated;
978 979
#endif /* CONFIG_NUMA_BALANCING */

980
	struct tlbflush_unmap_batch	tlb_ubc;
981

982
	struct rcu_head			rcu;
983

984 985
	/* Cache last used pipe for splice(): */
	struct pipe_inode_info		*splice_pipe;
986

987
	struct page_frag		task_frag;
988

989 990
#ifdef CONFIG_TASK_DELAY_ACCT
	struct task_delay_info		*delays;
991
#endif
992

993
#ifdef CONFIG_FAULT_INJECTION
994
	int				make_it_fail;
995
	unsigned int			fail_nth;
996
#endif
997
	/*
998 999
	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
	 * balance_dirty_pages() for a dirty throttling pause:
1000
	 */
1001 1002 1003 1004
	int				nr_dirtied;
	int				nr_dirtied_pause;
	/* Start of a write-and-pause period: */
	unsigned long			dirty_paused_when;
1005

A
Arjan van de Ven 已提交
1006
#ifdef CONFIG_LATENCYTOP
1007 1008
	int				latency_record_count;
	struct latency_record		latency_record[LT_SAVECOUNT];
A
Arjan van de Ven 已提交
1009
#endif
1010
	/*
1011
	 * Time slack values; these are used to round up poll() and
1012 1013
	 * select() etc timeout values. These are in nanoseconds.
	 */
1014 1015
	u64				timer_slack_ns;
	u64				default_timer_slack_ns;
1016

1017
#ifdef CONFIG_KASAN
1018
	unsigned int			kasan_depth;
1019
#endif
1020

1021
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1022 1023 1024 1025 1026 1027 1028 1029 1030
	/* Index of current stored address in ret_stack: */
	int				curr_ret_stack;

	/* Stack of return addresses for return function tracing: */
	struct ftrace_ret_stack		*ret_stack;

	/* Timestamp for last schedule: */
	unsigned long long		ftrace_timestamp;

1031 1032
	/*
	 * Number of functions that haven't been traced
1033
	 * because of depth overrun:
1034
	 */
1035 1036 1037 1038
	atomic_t			trace_overrun;

	/* Pause tracing: */
	atomic_t			tracing_graph_pause;
1039
#endif
1040

1041
#ifdef CONFIG_TRACING
1042 1043 1044 1045 1046
	/* State flags for use by tracers: */
	unsigned long			trace;

	/* Bitmask and counter of trace recursion: */
	unsigned long			trace_recursion;
1047
#endif /* CONFIG_TRACING */
1048

D
Dmitry Vyukov 已提交
1049
#ifdef CONFIG_KCOV
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
	/* Coverage collection mode enabled for this task (0 if disabled): */
	enum kcov_mode			kcov_mode;

	/* Size of the kcov_area: */
	unsigned int			kcov_size;

	/* Buffer for coverage collection: */
	void				*kcov_area;

	/* KCOV descriptor wired with this task or NULL: */
	struct kcov			*kcov;
D
Dmitry Vyukov 已提交
1061
#endif
1062

1063
#ifdef CONFIG_MEMCG
1064 1065 1066
	struct mem_cgroup		*memcg_in_oom;
	gfp_t				memcg_oom_gfp_mask;
	int				memcg_oom_order;
1067

1068 1069
	/* Number of pages to reclaim on returning to userland: */
	unsigned int			memcg_nr_pages_over_high;
1070
#endif
1071

1072
#ifdef CONFIG_UPROBES
1073
	struct uprobe_task		*utask;
1074
#endif
K
Kent Overstreet 已提交
1075
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1076 1077
	unsigned int			sequential_io;
	unsigned int			sequential_io_avg;
K
Kent Overstreet 已提交
1078
#endif
P
Peter Zijlstra 已提交
1079
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1080
	unsigned long			task_state_change;
P
Peter Zijlstra 已提交
1081
#endif
1082
	int				pagefault_disabled;
1083
#ifdef CONFIG_MMU
1084
	struct task_struct		*oom_reaper_list;
1085
#endif
1086
#ifdef CONFIG_VMAP_STACK
1087
	struct vm_struct		*stack_vm_area;
1088
#endif
1089
#ifdef CONFIG_THREAD_INFO_IN_TASK
1090 1091
	/* A live task holds one reference: */
	atomic_t			stack_refcount;
1092 1093 1094
#endif
#ifdef CONFIG_LIVEPATCH
	int patch_state;
1095
#endif
1096 1097 1098
#ifdef CONFIG_SECURITY
	/* Used by LSM modules for access restriction: */
	void				*security;
1099
#endif
K
Kees Cook 已提交
1100 1101 1102 1103 1104 1105 1106

	/*
	 * New fields for task_struct should be added above here, so that
	 * they are included in the randomized portion of task_struct.
	 */
	randomized_struct_fields_end

1107 1108 1109 1110 1111 1112 1113 1114 1115
	/* CPU-specific state of this task: */
	struct thread_struct		thread;

	/*
	 * WARNING: on x86, 'thread_struct' contains a variable-sized
	 * structure.  It *MUST* be at the end of 'task_struct'.
	 *
	 * Do not put anything below here!
	 */
L
Linus Torvalds 已提交
1116 1117
};

A
Alexey Dobriyan 已提交
1118
static inline struct pid *task_pid(struct task_struct *task)
1119 1120 1121 1122
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1123
static inline struct pid *task_tgid(struct task_struct *task)
1124 1125 1126 1127
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

1128
/*
1129
 * Without tasklist or RCU lock it is not safe to dereference
1130 1131 1132
 * the result of task_pgrp/task_session even if task == current,
 * we can race with another thread doing sys_setsid/sys_setpgid.
 */
A
Alexey Dobriyan 已提交
1133
static inline struct pid *task_pgrp(struct task_struct *task)
1134 1135 1136 1137
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
1138
static inline struct pid *task_session(struct task_struct *task)
1139 1140 1141 1142
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

1143 1144 1145 1146 1147
/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
1148 1149
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
1150 1151 1152 1153
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
1154
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1155

A
Alexey Dobriyan 已提交
1156
static inline pid_t task_pid_nr(struct task_struct *tsk)
1157 1158 1159 1160
{
	return tsk->pid;
}

1161
static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1162 1163 1164
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
1165 1166 1167

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
1168
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1169 1170 1171
}


A
Alexey Dobriyan 已提交
1172
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1173 1174 1175 1176
{
	return tsk->tgid;
}

1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
 *
 * Return: 1 if the process is alive. 0 otherwise.
 */
static inline int pid_alive(const struct task_struct *p)
{
	return p->pids[PIDTYPE_PID].pid != NULL;
}
1191

1192
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1193
{
1194
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1195 1196 1197 1198
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
1199
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1200 1201 1202
}


1203
static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1204
{
1205
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1206 1207 1208 1209
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
1210
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1211 1212
}

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
	return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
}

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
}

static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
	pid_t pid = 0;

	rcu_read_lock();
	if (pid_alive(tsk))
		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
	rcu_read_unlock();

	return pid;
}

static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
	return task_ppid_nr_ns(tsk, &init_pid_ns);
}

1240
/* Obsolete, do not use: */
1241 1242 1243 1244
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
1245

1246 1247 1248
#define TASK_REPORT_IDLE	(TASK_REPORT + 1)
#define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)

1249
static inline unsigned int task_state_index(struct task_struct *tsk)
1250
{
1251 1252
	unsigned int tsk_state = READ_ONCE(tsk->state);
	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1253

1254 1255 1256 1257 1258
	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);

	if (tsk_state == TASK_IDLE)
		state = TASK_REPORT_IDLE;

1259 1260 1261
	return fls(state);
}

1262
static inline char task_index_to_char(unsigned int state)
1263
{
1264
	static const char state_char[] = "RSDTtXZPI";
1265

1266
	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1267

1268 1269 1270 1271 1272
	return state_char[state];
}

static inline char task_state_to_char(struct task_struct *tsk)
{
1273
	return task_index_to_char(task_state_index(tsk));
1274 1275
}

1276
/**
1277 1278
 * is_global_init - check if a task structure is init. Since init
 * is free to have sub-threads we need to check tgid.
1279 1280 1281
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1282 1283
 *
 * Return: 1 if the task structure is init. 0 otherwise.
1284
 */
A
Alexey Dobriyan 已提交
1285
static inline int is_global_init(struct task_struct *tsk)
1286
{
1287
	return task_tgid_nr(tsk) == 1;
1288
}
1289

1290 1291
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1292 1293 1294
/*
 * Per process flags
 */
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
#define PF_IDLE			0x00000002	/* I am an IDLE thread */
#define PF_EXITING		0x00000004	/* Getting shut down */
#define PF_EXITPIDONE		0x00000008	/* PI exit done on shut down */
#define PF_VCPU			0x00000010	/* I'm a virtual CPU */
#define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
#define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
#define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
#define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
#define PF_DUMPCORE		0x00000200	/* Dumped core */
#define PF_SIGNALED		0x00000400	/* Killed by a signal */
#define PF_MEMALLOC		0x00000800	/* Allocating memory */
#define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
#define PF_USED_ASYNC		0x00004000	/* Used async_schedule*(), used by module init */
#define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
#define PF_FROZEN		0x00010000	/* Frozen for system suspend */
1311 1312 1313
#define PF_KSWAPD		0x00020000	/* I am kswapd */
#define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
#define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1314 1315 1316 1317 1318 1319 1320 1321 1322
#define PF_LESS_THROTTLE	0x00100000	/* Throttle me less: I clean memory */
#define PF_KTHREAD		0x00200000	/* I am a kernel thread */
#define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
#define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
#define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
#define PF_MUTEX_TESTER		0x20000000	/* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
L
Linus Torvalds 已提交
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
1335 1336 1337 1338 1339
#define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math()			clear_stopped_child_used_math(current)
#define set_used_math()				set_stopped_child_used_math(current)

L
Linus Torvalds 已提交
1340 1341
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1342 1343 1344

#define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)

L
Linus Torvalds 已提交
1345 1346
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1347

L
Linus Torvalds 已提交
1348
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1349 1350
#define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
#define used_math()				tsk_used_math(current)
L
Linus Torvalds 已提交
1351

1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
static inline bool is_percpu_thread(void)
{
#ifdef CONFIG_SMP
	return (current->flags & PF_NO_SETAFFINITY) &&
		(current->nr_cpus_allowed  == 1);
#else
	return true;
#endif
}

1362
/* Per-process atomic flags. */
1363 1364 1365
#define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
#define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1366

1367

1368 1369 1370
#define TASK_PFA_TEST(name, func)					\
	static inline bool task_##func(struct task_struct *p)		\
	{ return test_bit(PFA_##name, &p->atomic_flags); }
1371

1372 1373 1374
#define TASK_PFA_SET(name, func)					\
	static inline void task_set_##func(struct task_struct *p)	\
	{ set_bit(PFA_##name, &p->atomic_flags); }
1375

1376 1377 1378 1379 1380 1381
#define TASK_PFA_CLEAR(name, func)					\
	static inline void task_clear_##func(struct task_struct *p)	\
	{ clear_bit(PFA_##name, &p->atomic_flags); }

TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1382

1383 1384 1385 1386 1387 1388 1389
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)

TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1390

1391
static inline void
1392
current_restore_flags(unsigned long orig_flags, unsigned long flags)
1393
{
1394 1395
	current->flags &= ~flags;
	current->flags |= orig_flags & flags;
1396 1397
}

1398 1399
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
L
Linus Torvalds 已提交
1400
#ifdef CONFIG_SMP
1401 1402
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
L
Linus Torvalds 已提交
1403
#else
1404
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1405 1406
{
}
1407
static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
L
Linus Torvalds 已提交
1408
{
1409
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
1410 1411 1412 1413
		return -EINVAL;
	return 0;
}
#endif
1414

1415 1416 1417 1418
#ifndef cpu_relax_yield
#define cpu_relax_yield() cpu_relax()
#endif

1419
extern int yield_to(struct task_struct *p, bool preempt);
1420 1421
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
1422

1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
/**
 * task_nice - return the nice value of a given task.
 * @p: the task in question.
 *
 * Return: The nice value [ -20 ... 0 ... 19 ].
 */
static inline int task_nice(const struct task_struct *p)
{
	return PRIO_TO_NICE((p)->static_prio);
}
1433

1434 1435
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
1436
extern int idle_cpu(int cpu);
1437 1438 1439
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1440
extern struct task_struct *idle_task(int cpu);
1441

1442 1443
/**
 * is_idle_task - is the specified task an idle task?
1444
 * @p: the task in question.
1445 1446
 *
 * Return: 1 if @p is an idle task. 0 otherwise.
1447
 */
1448
static inline bool is_idle_task(const struct task_struct *p)
1449
{
1450
	return !!(p->flags & PF_IDLE);
1451
}
1452

1453
extern struct task_struct *curr_task(int cpu);
1454
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
1455 1456 1457 1458

void yield(void);

union thread_union {
1459
#ifndef CONFIG_THREAD_INFO_IN_TASK
L
Linus Torvalds 已提交
1460
	struct thread_info thread_info;
1461
#endif
L
Linus Torvalds 已提交
1462 1463 1464
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

1465 1466 1467 1468 1469 1470 1471 1472 1473
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
	return &task->thread_info;
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
# define task_thread_info(task)	((struct thread_info *)(task)->stack)
#endif

1474 1475 1476 1477 1478
/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
1479 1480
 * find_task_by_vpid():
 *      finds a task by its virtual pid
1481
 *
1482
 * see also find_vpid() etc in include/linux/pid.h
1483 1484
 */

1485
extern struct task_struct *find_task_by_vpid(pid_t nr);
1486
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1487

1488 1489
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
1490
extern void wake_up_new_task(struct task_struct *tsk);
1491

L
Linus Torvalds 已提交
1492
#ifdef CONFIG_SMP
1493
extern void kick_process(struct task_struct *tsk);
L
Linus Torvalds 已提交
1494
#else
1495
static inline void kick_process(struct task_struct *tsk) { }
L
Linus Torvalds 已提交
1496 1497
#endif

1498
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1499

1500 1501 1502 1503
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
	__set_task_comm(tsk, from, false);
}
1504

1505
extern char *get_task_comm(char *to, struct task_struct *tsk);
L
Linus Torvalds 已提交
1506 1507

#ifdef CONFIG_SMP
1508
void scheduler_ipi(void);
R
Roland McGrath 已提交
1509
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
1510
#else
1511
static inline void scheduler_ipi(void) { }
1512
static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
R
Roland McGrath 已提交
1513 1514 1515
{
	return 1;
}
L
Linus Torvalds 已提交
1516 1517
#endif

1518 1519 1520
/*
 * Set thread flags in other task's structures.
 * See asm/thread_info.h for TIF_xxxx flags available:
L
Linus Torvalds 已提交
1521 1522 1523
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1524
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1525 1526 1527 1528
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1529
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1530 1531 1532 1533
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1534
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1535 1536 1537 1538
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1539
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1540 1541 1542 1543
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1544
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

1557 1558 1559 1560 1561
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

L
Linus Torvalds 已提交
1562 1563 1564 1565 1566 1567 1568
/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
1569
#ifndef CONFIG_PREEMPT
1570
extern int _cond_resched(void);
1571 1572 1573
#else
static inline int _cond_resched(void) { return 0; }
#endif
1574

1575
#define cond_resched() ({			\
1576
	___might_sleep(__FILE__, __LINE__, 0);	\
1577 1578
	_cond_resched();			\
})
1579

1580 1581 1582
extern int __cond_resched_lock(spinlock_t *lock);

#define cond_resched_lock(lock) ({				\
1583
	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1584 1585 1586 1587 1588
	__cond_resched_lock(lock);				\
})

extern int __cond_resched_softirq(void);

1589
#define cond_resched_softirq() ({					\
1590
	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
1591
	__cond_resched_softirq();					\
1592
})
L
Linus Torvalds 已提交
1593

1594 1595 1596 1597 1598 1599 1600 1601 1602
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
	rcu_read_unlock();
	cond_resched();
	rcu_read_lock();
#endif
}

L
Linus Torvalds 已提交
1603 1604
/*
 * Does a critical section need to be broken due to another
N
Nick Piggin 已提交
1605 1606
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * but a general need for low latency)
L
Linus Torvalds 已提交
1607
 */
N
Nick Piggin 已提交
1608
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
1609
{
N
Nick Piggin 已提交
1610 1611 1612
#ifdef CONFIG_PREEMPT
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
1613
	return 0;
N
Nick Piggin 已提交
1614
#endif
L
Linus Torvalds 已提交
1615 1616
}

1617 1618 1619 1620 1621
static __always_inline bool need_resched(void)
{
	return unlikely(tif_need_resched());
}

L
Linus Torvalds 已提交
1622 1623 1624 1625 1626 1627 1628
/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
1629 1630 1631
#ifdef CONFIG_THREAD_INFO_IN_TASK
	return p->cpu;
#else
A
Al Viro 已提交
1632
	return task_thread_info(p)->cpu;
1633
#endif
L
Linus Torvalds 已提交
1634 1635
}

I
Ingo Molnar 已提交
1636
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
/*
 * In order to reduce various lock holder preemption latencies provide an
 * interface to see if a vCPU is currently running or not.
 *
 * This allows us to terminate optimistic spin loops and block, analogous to
 * the native optimistic spin heuristic of testing if the lock owner task is
 * running or not.
 */
#ifndef vcpu_is_preempted
# define vcpu_is_preempted(cpu)	false
#endif

1663 1664
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1665

D
Dave Hansen 已提交
1666 1667 1668 1669
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

L
Linus Torvalds 已提交
1670
#endif