sched.h 55.8 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

5 6 7 8
/*
 * Define 'struct task_struct' and provide the main scheduler
 * APIs (schedule(), wakeup variants, etc.)
 */
9

10
#include <uapi/linux/sched.h>
11

12
#include <asm/current.h>
L
Linus Torvalds 已提交
13

14
#include <linux/pid.h>
L
Linus Torvalds 已提交
15
#include <linux/sem.h>
16
#include <linux/shm.h>
17 18 19 20
#include <linux/kcov.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
L
Linus Torvalds 已提交
21
#include <linux/seccomp.h>
22
#include <linux/nodemask.h>
23
#include <linux/rcupdate.h>
24
#include <linux/refcount.h>
25
#include <linux/resource.h>
A
Arjan van de Ven 已提交
26
#include <linux/latencytop.h>
27
#include <linux/sched/prio.h>
28
#include <linux/sched/types.h>
29 30 31
#include <linux/signal_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
32
#include <linux/posix-timers.h>
33
#include <linux/rseq.h>
34

35
/* task_struct member predeclarations (sorted alphabetically): */
36 37
struct audit_context;
struct backing_dev_info;
38
struct bio_list;
39
struct blk_plug;
40
struct capture_control;
41 42 43 44 45
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct mempolicy;
46
struct nameidata;
47 48 49 50 51 52 53
struct nsproxy;
struct perf_event_context;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
54 55
struct root_domain;
struct rq;
56 57
struct sched_attr;
struct sched_param;
I
Ingo Molnar 已提交
58
struct seq_file;
59 60 61
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
62
struct task_group;
L
Linus Torvalds 已提交
63

64 65 66 67 68 69 70 71 72 73
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
74 75

/* Used in tsk->state: */
76 77 78 79 80
#define TASK_RUNNING			0x0000
#define TASK_INTERRUPTIBLE		0x0001
#define TASK_UNINTERRUPTIBLE		0x0002
#define __TASK_STOPPED			0x0004
#define __TASK_TRACED			0x0008
81
/* Used in tsk->exit_state: */
82 83
#define EXIT_DEAD			0x0010
#define EXIT_ZOMBIE			0x0020
84 85
#define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
/* Used in tsk->state again: */
86 87 88 89
#define TASK_PARKED			0x0040
#define TASK_DEAD			0x0080
#define TASK_WAKEKILL			0x0100
#define TASK_WAKING			0x0200
90 91 92
#define TASK_NOLOAD			0x0400
#define TASK_NEW			0x0800
#define TASK_STATE_MAX			0x1000
93 94 95 96 97 98 99 100 101 102 103 104 105 106

/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)

#define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)

/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)

/* get_task_state(): */
#define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
107 108
					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
					 TASK_PARKED)
109 110 111 112 113 114 115 116 117 118

#define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)

#define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)

#define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)

#define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
					 (task->flags & PF_FROZEN) == 0 && \
					 (task->state & TASK_NOLOAD) == 0)
L
Linus Torvalds 已提交
119

P
Peter Zijlstra 已提交
120 121
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP

122 123 124 125 126
/*
 * Special states are those that do not use the normal wait-loop pattern. See
 * the comment with set_special_state().
 */
#define is_special_task_state(state)				\
127
	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
128

P
Peter Zijlstra 已提交
129 130
#define __set_current_state(state_value)			\
	do {							\
131
		WARN_ON_ONCE(is_special_task_state(state_value));\
P
Peter Zijlstra 已提交
132 133 134
		current->task_state_change = _THIS_IP_;		\
		current->state = (state_value);			\
	} while (0)
135

P
Peter Zijlstra 已提交
136 137
#define set_current_state(state_value)				\
	do {							\
138
		WARN_ON_ONCE(is_special_task_state(state_value));\
P
Peter Zijlstra 已提交
139
		current->task_state_change = _THIS_IP_;		\
140
		smp_store_mb(current->state, (state_value));	\
P
Peter Zijlstra 已提交
141 142
	} while (0)

143 144 145 146 147 148 149 150 151
#define set_special_state(state_value)					\
	do {								\
		unsigned long flags; /* may shadow */			\
		WARN_ON_ONCE(!is_special_task_state(state_value));	\
		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
		current->task_state_change = _THIS_IP_;			\
		current->state = (state_value);				\
		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
	} while (0)
P
Peter Zijlstra 已提交
152
#else
153 154 155 156 157
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
158
 *   for (;;) {
159
 *	set_current_state(TASK_UNINTERRUPTIBLE);
160 161 162 163 164 165 166 167 168 169 170 171 172
 *	if (!need_sleep)
 *		break;
 *
 *	schedule();
 *   }
 *   __set_current_state(TASK_RUNNING);
 *
 * If the caller does not need such serialisation (because, for instance, the
 * condition test and condition change and wakeup are under the same lock) then
 * use __set_current_state().
 *
 * The above is typically ordered against the wakeup, which does:
 *
173 174
 *   need_sleep = false;
 *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
175
 *
176 177
 * where wake_up_state() executes a full memory barrier before accessing the
 * task state.
178 179 180 181
 *
 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
182
 *
183
 * However, with slightly different timing the wakeup TASK_RUNNING store can
I
Ingo Molnar 已提交
184
 * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
185 186
 * a problem either because that will result in one extra go around the loop
 * and our @cond test will save the day.
187
 *
188
 * Also see the comments of try_to_wake_up().
189
 */
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
#define __set_current_state(state_value)				\
	current->state = (state_value)

#define set_current_state(state_value)					\
	smp_store_mb(current->state, (state_value))

/*
 * set_special_state() should be used for those states when the blocking task
 * can not use the regular condition based wait-loop. In that case we must
 * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
 * will not collide with our state change.
 */
#define set_special_state(state_value)					\
	do {								\
		unsigned long flags; /* may shadow */			\
		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
		current->state = (state_value);				\
		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
	} while (0)

P
Peter Zijlstra 已提交
210 211
#endif

212 213
/* Task command name length: */
#define TASK_COMM_LEN			16
L
Linus Torvalds 已提交
214 215 216

extern void scheduler_tick(void);

217 218 219 220 221 222 223
#define	MAX_SCHEDULE_TIMEOUT		LONG_MAX

extern long schedule_timeout(long timeout);
extern long schedule_timeout_interruptible(long timeout);
extern long schedule_timeout_killable(long timeout);
extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
L
Linus Torvalds 已提交
224
asmlinkage void schedule(void);
225
extern void schedule_preempt_disabled(void);
226
asmlinkage void preempt_schedule_irq(void);
L
Linus Torvalds 已提交
227

228 229
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
230
extern long io_schedule_timeout(long timeout);
231
extern void io_schedule(void);
232

233
/**
234
 * struct prev_cputime - snapshot of system and user cputime
235 236
 * @utime: time spent in user mode
 * @stime: time spent in system mode
237
 * @lock: protects the above two fields
238
 *
239 240
 * Stores previous user/system time values such that we can guarantee
 * monotonicity.
241
 */
242 243
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
244 245 246
	u64				utime;
	u64				stime;
	raw_spinlock_t			lock;
247
#endif
248 249
};

250 251 252
enum vtime_state {
	/* Task is sleeping or running in a CPU with VTIME inactive: */
	VTIME_INACTIVE = 0,
253 254
	/* Task is idle */
	VTIME_IDLE,
255 256
	/* Task runs in kernelspace in a CPU with VTIME active: */
	VTIME_SYS,
257 258
	/* Task runs in userspace in a CPU with VTIME active: */
	VTIME_USER,
259 260
	/* Task runs as guests in a CPU with VTIME active: */
	VTIME_GUEST,
261 262 263 264 265 266
};

struct vtime {
	seqcount_t		seqcount;
	unsigned long long	starttime;
	enum vtime_state	state;
267
	unsigned int		cpu;
268 269 270
	u64			utime;
	u64			stime;
	u64			gtime;
271 272
};

273 274 275 276 277 278 279 280 281 282 283 284
/*
 * Utilization clamp constraints.
 * @UCLAMP_MIN:	Minimum utilization
 * @UCLAMP_MAX:	Maximum utilization
 * @UCLAMP_CNT:	Utilization clamp constraints count
 */
enum uclamp_id {
	UCLAMP_MIN = 0,
	UCLAMP_MAX,
	UCLAMP_CNT
};

285 286 287 288 289
#ifdef CONFIG_SMP
extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
#endif

L
Linus Torvalds 已提交
290
struct sched_info {
291
#ifdef CONFIG_SCHED_INFO
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
	/* Cumulative counters: */

	/* # of times we have run on this CPU: */
	unsigned long			pcount;

	/* Time spent waiting on a runqueue: */
	unsigned long long		run_delay;

	/* Timestamps: */

	/* When did we last run on a CPU? */
	unsigned long long		last_arrival;

	/* When were we last queued to run? */
	unsigned long long		last_queued;
L
Linus Torvalds 已提交
307

308
#endif /* CONFIG_SCHED_INFO */
309
};
L
Linus Torvalds 已提交
310

311 312 313 314 315 316 317
/*
 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 * has a few: load, load_avg, util_avg, freq, and capacity.
 *
 * We define a basic fixed point arithmetic range, and then formalize
 * all these metrics based on that basic range.
 */
318 319
# define SCHED_FIXEDPOINT_SHIFT		10
# define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
320

321 322 323 324
/* Increase resolution of cpu_capacity calculations */
# define SCHED_CAPACITY_SHIFT		SCHED_FIXEDPOINT_SHIFT
# define SCHED_CAPACITY_SCALE		(1L << SCHED_CAPACITY_SHIFT)

I
Ingo Molnar 已提交
325
struct load_weight {
326 327
	unsigned long			weight;
	u32				inv_weight;
I
Ingo Molnar 已提交
328 329
};

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
/**
 * struct util_est - Estimation utilization of FAIR tasks
 * @enqueued: instantaneous estimated utilization of a task/cpu
 * @ewma:     the Exponential Weighted Moving Average (EWMA)
 *            utilization of a task
 *
 * Support data structure to track an Exponential Weighted Moving Average
 * (EWMA) of a FAIR task's utilization. New samples are added to the moving
 * average each time a task completes an activation. Sample's weight is chosen
 * so that the EWMA will be relatively insensitive to transient changes to the
 * task's workload.
 *
 * The enqueued attribute has a slightly different meaning for tasks and cpus:
 * - task:   the task's util_avg at last task dequeue time
 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
 * Thus, the util_est.enqueued of a task represents the contribution on the
 * estimated utilization of the CPU where that task is currently enqueued.
 *
 * Only for tasks we track a moving average of the past instantaneous
 * estimated utilization. This allows to absorb sporadic drops in utilization
 * of an otherwise almost periodic task.
 */
struct util_est {
	unsigned int			enqueued;
	unsigned int			ewma;
#define UTIL_EST_WEIGHT_SHIFT		2
356
} __attribute__((__aligned__(sizeof(u64))));
357

358
/*
359 360 361 362 363 364 365 366 367
 * The load_avg/util_avg accumulates an infinite geometric series
 * (see __update_load_avg() in kernel/sched/fair.c).
 *
 * [load_avg definition]
 *
 *   load_avg = runnable% * scale_load_down(load)
 *
 * where runnable% is the time ratio that a sched_entity is runnable.
 * For cfs_rq, it is the aggregated load_avg of all runnable and
368
 * blocked sched_entities.
369 370 371 372 373 374 375 376 377
 *
 * [util_avg definition]
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE
 *
 * where running% is the time ratio that a sched_entity is running on
 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
 * and blocked sched_entities.
 *
378 379 380
 * load_avg and util_avg don't direcly factor frequency scaling and CPU
 * capacity scaling. The scaling is done through the rq_clock_pelt that
 * is used for computing those signals (see update_rq_clock_pelt())
381
 *
382 383 384 385
 * N.B., the above ratios (runnable% and running%) themselves are in the
 * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
 * to as large a range as necessary. This is for example reflected by
 * util_avg's SCHED_CAPACITY_SCALE.
386 387 388 389 390 391 392 393 394 395 396 397 398 399
 *
 * [Overflow issue]
 *
 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
 * with the highest load (=88761), always runnable on a single cfs_rq,
 * and should not overflow as the number already hits PID_MAX_LIMIT.
 *
 * For all other cases (including 32-bit kernels), struct load_weight's
 * weight will overflow first before we do, because:
 *
 *    Max(load_avg) <= Max(load.weight)
 *
 * Then it is the load_weight's responsibility to consider overflow
 * issues.
400
 */
401
struct sched_avg {
402 403
	u64				last_update_time;
	u64				load_sum;
404
	u64				runnable_load_sum;
405 406 407
	u32				util_sum;
	u32				period_contrib;
	unsigned long			load_avg;
408
	unsigned long			runnable_load_avg;
409
	unsigned long			util_avg;
410
	struct util_est			util_est;
411
} ____cacheline_aligned;
412

413
struct sched_statistics {
414
#ifdef CONFIG_SCHEDSTATS
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
	u64				wait_start;
	u64				wait_max;
	u64				wait_count;
	u64				wait_sum;
	u64				iowait_count;
	u64				iowait_sum;

	u64				sleep_start;
	u64				sleep_max;
	s64				sum_sleep_runtime;

	u64				block_start;
	u64				block_max;
	u64				exec_max;
	u64				slice_max;

	u64				nr_migrations_cold;
	u64				nr_failed_migrations_affine;
	u64				nr_failed_migrations_running;
	u64				nr_failed_migrations_hot;
	u64				nr_forced_migrations;

	u64				nr_wakeups;
	u64				nr_wakeups_sync;
	u64				nr_wakeups_migrate;
	u64				nr_wakeups_local;
	u64				nr_wakeups_remote;
	u64				nr_wakeups_affine;
	u64				nr_wakeups_affine_attempts;
	u64				nr_wakeups_passive;
	u64				nr_wakeups_idle;
446
#endif
447
};
448 449

struct sched_entity {
450 451
	/* For load-balancing: */
	struct load_weight		load;
452
	unsigned long			runnable_weight;
453 454 455
	struct rb_node			run_node;
	struct list_head		group_node;
	unsigned int			on_rq;
456

457 458 459 460
	u64				exec_start;
	u64				sum_exec_runtime;
	u64				vruntime;
	u64				prev_sum_exec_runtime;
461

462
	u64				nr_migrations;
463

464
	struct sched_statistics		statistics;
465

I
Ingo Molnar 已提交
466
#ifdef CONFIG_FAIR_GROUP_SCHED
467 468
	int				depth;
	struct sched_entity		*parent;
I
Ingo Molnar 已提交
469
	/* rq on which this entity is (to be) queued: */
470
	struct cfs_rq			*cfs_rq;
I
Ingo Molnar 已提交
471
	/* rq "owned" by this entity/group: */
472
	struct cfs_rq			*my_q;
I
Ingo Molnar 已提交
473
#endif
474

475
#ifdef CONFIG_SMP
476 477 478 479 480 481
	/*
	 * Per entity load average tracking.
	 *
	 * Put into separate cache line so it does not
	 * collide with read-mostly values above.
	 */
482
	struct sched_avg		avg;
483
#endif
I
Ingo Molnar 已提交
484
};
485

P
Peter Zijlstra 已提交
486
struct sched_rt_entity {
487 488 489 490 491 492 493 494
	struct list_head		run_list;
	unsigned long			timeout;
	unsigned long			watchdog_stamp;
	unsigned int			time_slice;
	unsigned short			on_rq;
	unsigned short			on_list;

	struct sched_rt_entity		*back;
495
#ifdef CONFIG_RT_GROUP_SCHED
496
	struct sched_rt_entity		*parent;
P
Peter Zijlstra 已提交
497
	/* rq on which this entity is (to be) queued: */
498
	struct rt_rq			*rt_rq;
P
Peter Zijlstra 已提交
499
	/* rq "owned" by this entity/group: */
500
	struct rt_rq			*my_q;
P
Peter Zijlstra 已提交
501
#endif
502
} __randomize_layout;
P
Peter Zijlstra 已提交
503

504
struct sched_dl_entity {
505
	struct rb_node			rb_node;
506 507 508

	/*
	 * Original scheduling parameters. Copied here from sched_attr
509 510
	 * during sched_setattr(), they will remain the same until
	 * the next sched_setattr().
511
	 */
512 513 514
	u64				dl_runtime;	/* Maximum runtime for each instance	*/
	u64				dl_deadline;	/* Relative deadline of each instance	*/
	u64				dl_period;	/* Separation of two instances (period) */
515
	u64				dl_bw;		/* dl_runtime / dl_period		*/
516
	u64				dl_density;	/* dl_runtime / dl_deadline		*/
517 518 519

	/*
	 * Actual scheduling parameters. Initialized with the values above,
I
Ingo Molnar 已提交
520
	 * they are continuously updated during task execution. Note that
521 522
	 * the remaining runtime could be < 0 in case we are in overrun.
	 */
523 524 525
	s64				runtime;	/* Remaining runtime for this instance	*/
	u64				deadline;	/* Absolute deadline for this instance	*/
	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
526 527 528 529 530 531 532 533

	/*
	 * Some bool flags:
	 *
	 * @dl_throttled tells if we exhausted the runtime. If so, the
	 * task has to wait for a replenishment to be performed at the
	 * next firing of dl_timer.
	 *
534 535
	 * @dl_boosted tells if we are boosted due to DI. If so we are
	 * outside bandwidth enforcement mechanism (but only until we
536 537
	 * exit the critical section);
	 *
538
	 * @dl_yielded tells if task gave up the CPU before consuming
539
	 * all its available runtime during the last job.
540 541 542 543 544 545 546
	 *
	 * @dl_non_contending tells if the task is inactive while still
	 * contributing to the active utilization. In other words, it
	 * indicates if the inactive timer has been armed and its handler
	 * has not been executed yet. This flag is useful to avoid race
	 * conditions between the inactive timer handler and the wakeup
	 * code.
547 548 549
	 *
	 * @dl_overrun tells if the task asked to be informed about runtime
	 * overruns.
550
	 */
551 552 553 554
	unsigned int			dl_throttled      : 1;
	unsigned int			dl_boosted        : 1;
	unsigned int			dl_yielded        : 1;
	unsigned int			dl_non_contending : 1;
555
	unsigned int			dl_overrun	  : 1;
556 557 558 559 560

	/*
	 * Bandwidth enforcement timer. Each -deadline task has its
	 * own bandwidth to be enforced, thus we need one timer per task.
	 */
561
	struct hrtimer			dl_timer;
562 563 564 565 566 567 568 569 570

	/*
	 * Inactive timer, responsible for decreasing the active utilization
	 * at the "0-lag time". When a -deadline task blocks, it contributes
	 * to GRUB's active utilization until the "0-lag time", hence a
	 * timer is needed to decrease the active utilization at the correct
	 * time.
	 */
	struct hrtimer inactive_timer;
571
};
572

573 574 575 576 577 578 579 580
#ifdef CONFIG_UCLAMP_TASK
/* Number of utilization clamp buckets (shorter alias) */
#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT

/*
 * Utilization clamp for a scheduling entity
 * @value:		clamp value "assigned" to a se
 * @bucket_id:		bucket index corresponding to the "assigned" value
581
 * @active:		the se is currently refcounted in a rq's bucket
582
 * @user_defined:	the requested clamp value comes from user-space
583 584 585 586
 *
 * The bucket_id is the index of the clamp bucket matching the clamp value
 * which is pre-computed and stored to avoid expensive integer divisions from
 * the fast path.
587 588 589 590 591
 *
 * The active bit is set whenever a task has got an "effective" value assigned,
 * which can be different from the clamp value "requested" from user-space.
 * This allows to know a task is refcounted in the rq's bucket corresponding
 * to the "effective" bucket_id.
592 593 594 595 596 597 598
 *
 * The user_defined bit is set whenever a task has got a task-specific clamp
 * value requested from userspace, i.e. the system defaults apply to this task
 * just as a restriction. This allows to relax default clamps when a less
 * restrictive task-specific value has been requested, thus allowing to
 * implement a "nice" semantic. For example, a task running with a 20%
 * default boost can still drop its own boosting to 0%.
599 600 601 602
 */
struct uclamp_se {
	unsigned int value		: bits_per(SCHED_CAPACITY_SCALE);
	unsigned int bucket_id		: bits_per(UCLAMP_BUCKETS);
603
	unsigned int active		: 1;
604
	unsigned int user_defined	: 1;
605 606 607
};
#endif /* CONFIG_UCLAMP_TASK */

608 609
union rcu_special {
	struct {
610 611
		u8			blocked;
		u8			need_qs;
612
		u8			exp_hint; /* Hint for performance. */
613
		u8			deferred_qs;
614
	} b; /* Bits. */
615
	u32 s; /* Set of bits. */
616
};
617

P
Peter Zijlstra 已提交
618 619 620
enum perf_event_task_context {
	perf_invalid_context = -1,
	perf_hw_context = 0,
621
	perf_sw_context,
P
Peter Zijlstra 已提交
622 623 624
	perf_nr_task_contexts,
};

625 626 627 628
struct wake_q_node {
	struct wake_q_node *next;
};

L
Linus Torvalds 已提交
629
struct task_struct {
630 631 632 633 634
#ifdef CONFIG_THREAD_INFO_IN_TASK
	/*
	 * For reasons of header soup (see current_thread_info()), this
	 * must be the first element of task_struct.
	 */
635
	struct thread_info		thread_info;
636
#endif
637 638
	/* -1 unrunnable, 0 runnable, >0 stopped: */
	volatile long			state;
K
Kees Cook 已提交
639 640 641 642 643 644 645

	/*
	 * This begins the randomizable portion of task_struct. Only
	 * scheduling-critical items should be added above here.
	 */
	randomized_struct_fields_start

646
	void				*stack;
647
	refcount_t			usage;
648 649 650
	/* Per task flags (PF_*), defined further below: */
	unsigned int			flags;
	unsigned int			ptrace;
L
Linus Torvalds 已提交
651

652
#ifdef CONFIG_SMP
653 654
	struct llist_node		wake_entry;
	int				on_cpu;
655
#ifdef CONFIG_THREAD_INFO_IN_TASK
656 657
	/* Current CPU: */
	unsigned int			cpu;
658
#endif
659 660 661
	unsigned int			wakee_flips;
	unsigned long			wakee_flip_decay_ts;
	struct task_struct		*last_wakee;
662

663 664 665 666 667 668 669 670
	/*
	 * recent_used_cpu is initially set as the last CPU used by a task
	 * that wakes affine another task. Waker/wakee relationships can
	 * push tasks around a CPU where each wakeup moves to the next one.
	 * Tracking a recently used CPU allows a quick search for a recently
	 * used CPU that may be idle.
	 */
	int				recent_used_cpu;
671
	int				wake_cpu;
672
#endif
673 674 675 676 677 678
	int				on_rq;

	int				prio;
	int				static_prio;
	int				normal_prio;
	unsigned int			rt_priority;
679

680 681 682
	const struct sched_class	*sched_class;
	struct sched_entity		se;
	struct sched_rt_entity		rt;
P
Peter Zijlstra 已提交
683
#ifdef CONFIG_CGROUP_SCHED
684
	struct task_group		*sched_task_group;
P
Peter Zijlstra 已提交
685
#endif
686
	struct sched_dl_entity		dl;
L
Linus Torvalds 已提交
687

688
#ifdef CONFIG_UCLAMP_TASK
689 690 691
	/* Clamp values requested for a scheduling entity */
	struct uclamp_se		uclamp_req[UCLAMP_CNT];
	/* Effective clamp values used for a scheduling entity */
692 693 694
	struct uclamp_se		uclamp[UCLAMP_CNT];
#endif

695
#ifdef CONFIG_PREEMPT_NOTIFIERS
696 697
	/* List of struct preempt_notifier: */
	struct hlist_head		preempt_notifiers;
698 699
#endif

700
#ifdef CONFIG_BLK_DEV_IO_TRACE
701
	unsigned int			btrace_seq;
702
#endif
L
Linus Torvalds 已提交
703

704 705
	unsigned int			policy;
	int				nr_cpus_allowed;
706 707
	const cpumask_t			*cpus_ptr;
	cpumask_t			cpus_mask;
L
Linus Torvalds 已提交
708

P
Paul E. McKenney 已提交
709
#ifdef CONFIG_PREEMPT_RCU
710 711 712 713
	int				rcu_read_lock_nesting;
	union rcu_special		rcu_read_unlock_special;
	struct list_head		rcu_node_entry;
	struct rcu_node			*rcu_blocked_node;
714
#endif /* #ifdef CONFIG_PREEMPT_RCU */
715

P
Paul E. McKenney 已提交
716
#ifdef CONFIG_TASKS_RCU
717
	unsigned long			rcu_tasks_nvcsw;
718 719
	u8				rcu_tasks_holdout;
	u8				rcu_tasks_idx;
720
	int				rcu_tasks_idle_cpu;
721
	struct list_head		rcu_tasks_holdout_list;
P
Paul E. McKenney 已提交
722
#endif /* #ifdef CONFIG_TASKS_RCU */
P
Paul E. McKenney 已提交
723

724
	struct sched_info		sched_info;
L
Linus Torvalds 已提交
725

726
	struct list_head		tasks;
727
#ifdef CONFIG_SMP
728 729
	struct plist_node		pushable_tasks;
	struct rb_node			pushable_dl_tasks;
730
#endif
L
Linus Torvalds 已提交
731

732 733
	struct mm_struct		*mm;
	struct mm_struct		*active_mm;
734 735

	/* Per-thread vma caching: */
736
	struct vmacache			vmacache;
737

738 739
#ifdef SPLIT_RSS_COUNTING
	struct task_rss_stat		rss_stat;
740
#endif
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
	int				exit_state;
	int				exit_code;
	int				exit_signal;
	/* The signal sent when the parent dies: */
	int				pdeath_signal;
	/* JOBCTL_*, siglock protected: */
	unsigned long			jobctl;

	/* Used for emulating ABI behavior of previous Linux versions: */
	unsigned int			personality;

	/* Scheduler bits, serialized by scheduler locks: */
	unsigned			sched_reset_on_fork:1;
	unsigned			sched_contributes_to_load:1;
	unsigned			sched_migrated:1;
	unsigned			sched_remote_wakeup:1;
757 758 759 760
#ifdef CONFIG_PSI
	unsigned			sched_psi_wake_requeue:1;
#endif

761 762 763 764 765 766 767 768 769 770
	/* Force alignment to the next boundary: */
	unsigned			:0;

	/* Unserialized, strictly 'current' */

	/* Bit to tell LSMs we're in execve(): */
	unsigned			in_execve:1;
	unsigned			in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
	unsigned			restore_sigmask:1;
771
#endif
T
Tejun Heo 已提交
772
#ifdef CONFIG_MEMCG
773
	unsigned			in_user_fault:1;
774
#endif
775
#ifdef CONFIG_COMPAT_BRK
776
	unsigned			brk_randomized:1;
777
#endif
778 779 780
#ifdef CONFIG_CGROUPS
	/* disallow userland-initiated cgroup migration */
	unsigned			no_cgroup_migration:1;
R
Roman Gushchin 已提交
781 782
	/* task is frozen/stopped (used by the cgroup freezer) */
	unsigned			frozen:1;
783
#endif
784 785 786 787
#ifdef CONFIG_BLK_CGROUP
	/* to be used once the psi infrastructure lands upstream. */
	unsigned			use_memdelay:1;
#endif
788

789
	unsigned long			atomic_flags; /* Flags requiring atomic access. */
790

791
	struct restart_block		restart_block;
792

793 794
	pid_t				pid;
	pid_t				tgid;
795

796
#ifdef CONFIG_STACKPROTECTOR
797 798
	/* Canary value for the -fstack-protector GCC feature: */
	unsigned long			stack_canary;
799
#endif
800
	/*
801
	 * Pointers to the (original) parent process, youngest child, younger sibling,
802
	 * older sibling, respectively.  (p->father can be replaced with
R
Roland McGrath 已提交
803
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
804
	 */
805 806 807 808 809 810 811

	/* Real parent process: */
	struct task_struct __rcu	*real_parent;

	/* Recipient of SIGCHLD, wait4() reports: */
	struct task_struct __rcu	*parent;

L
Linus Torvalds 已提交
812
	/*
813
	 * Children/sibling form the list of natural children:
L
Linus Torvalds 已提交
814
	 */
815 816 817
	struct list_head		children;
	struct list_head		sibling;
	struct task_struct		*group_leader;
L
Linus Torvalds 已提交
818

R
Roland McGrath 已提交
819
	/*
820 821
	 * 'ptraced' is the list of tasks this task is using ptrace() on.
	 *
R
Roland McGrath 已提交
822
	 * This includes both natural children and PTRACE_ATTACH targets.
823
	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
R
Roland McGrath 已提交
824
	 */
825 826
	struct list_head		ptraced;
	struct list_head		ptrace_entry;
R
Roland McGrath 已提交
827

L
Linus Torvalds 已提交
828
	/* PID/PID hash table linkage. */
829 830
	struct pid			*thread_pid;
	struct hlist_node		pid_links[PIDTYPE_MAX];
831 832 833 834
	struct list_head		thread_group;
	struct list_head		thread_node;

	struct completion		*vfork_done;
L
Linus Torvalds 已提交
835

836 837
	/* CLONE_CHILD_SETTID: */
	int __user			*set_child_tid;
L
Linus Torvalds 已提交
838

839 840 841 842 843
	/* CLONE_CHILD_CLEARTID: */
	int __user			*clear_child_tid;

	u64				utime;
	u64				stime;
844
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
845 846
	u64				utimescaled;
	u64				stimescaled;
847
#endif
848 849
	u64				gtime;
	struct prev_cputime		prev_cputime;
850
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
851
	struct vtime			vtime;
852
#endif
853 854

#ifdef CONFIG_NO_HZ_FULL
855
	atomic_t			tick_dep_mask;
856
#endif
857 858 859 860 861 862 863 864
	/* Context switch counts: */
	unsigned long			nvcsw;
	unsigned long			nivcsw;

	/* Monotonic time in nsecs: */
	u64				start_time;

	/* Boot based time in nsecs: */
865
	u64				start_boottime;
866 867 868 869

	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
	unsigned long			min_flt;
	unsigned long			maj_flt;
L
Linus Torvalds 已提交
870

871 872
	/* Empty if CONFIG_POSIX_CPUTIMERS=n */
	struct posix_cputimers		posix_cputimers;
L
Linus Torvalds 已提交
873

874 875 876 877 878 879 880 881 882 883 884
	/* Process credentials: */

	/* Tracer's credentials at attach: */
	const struct cred __rcu		*ptracer_cred;

	/* Objective and real subjective task credentials (COW): */
	const struct cred __rcu		*real_cred;

	/* Effective (overridable) subjective task credentials (COW): */
	const struct cred __rcu		*cred;

885 886 887 888 889
#ifdef CONFIG_KEYS
	/* Cached requested key. */
	struct key			*cached_requested_key;
#endif

890 891 892 893 894 895 896 897 898 899 900
	/*
	 * executable name, excluding path.
	 *
	 * - normally initialized setup_new_exec()
	 * - access it with [gs]et_task_comm()
	 * - lock it with task_lock()
	 */
	char				comm[TASK_COMM_LEN];

	struct nameidata		*nameidata;

901
#ifdef CONFIG_SYSVIPC
902 903
	struct sysv_sem			sysvsem;
	struct sysv_shm			sysvshm;
904
#endif
905
#ifdef CONFIG_DETECT_HUNG_TASK
906
	unsigned long			last_switch_count;
907
	unsigned long			last_switch_time;
908
#endif
909 910 911 912 913 914 915 916 917 918 919
	/* Filesystem information: */
	struct fs_struct		*fs;

	/* Open file information: */
	struct files_struct		*files;

	/* Namespaces: */
	struct nsproxy			*nsproxy;

	/* Signal handlers: */
	struct signal_struct		*signal;
920
	struct sighand_struct __rcu		*sighand;
921 922 923 924 925 926 927 928 929 930 931
	sigset_t			blocked;
	sigset_t			real_blocked;
	/* Restored if set_restore_sigmask() was used: */
	sigset_t			saved_sigmask;
	struct sigpending		pending;
	unsigned long			sas_ss_sp;
	size_t				sas_ss_size;
	unsigned int			sas_ss_flags;

	struct callback_head		*task_works;

932
#ifdef CONFIG_AUDIT
A
Al Viro 已提交
933
#ifdef CONFIG_AUDITSYSCALL
934 935
	struct audit_context		*audit_context;
#endif
936 937
	kuid_t				loginuid;
	unsigned int			sessionid;
A
Al Viro 已提交
938
#endif
939 940 941 942 943
	struct seccomp			seccomp;

	/* Thread group tracking: */
	u32				parent_exec_id;
	u32				self_exec_id;
L
Linus Torvalds 已提交
944

945 946
	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
	spinlock_t			alloc_lock;
L
Linus Torvalds 已提交
947

948
	/* Protection of the PI data structures: */
949
	raw_spinlock_t			pi_lock;
950

951
	struct wake_q_node		wake_q;
952

I
Ingo Molnar 已提交
953
#ifdef CONFIG_RT_MUTEXES
954
	/* PI waiters blocked on a rt_mutex held by this task: */
955
	struct rb_root_cached		pi_waiters;
956 957
	/* Updated under owner's pi_lock and rq lock */
	struct task_struct		*pi_top_task;
958 959
	/* Deadlock detection and priority inheritance handling: */
	struct rt_mutex_waiter		*pi_blocked_on;
I
Ingo Molnar 已提交
960 961
#endif

962
#ifdef CONFIG_DEBUG_MUTEXES
963 964
	/* Mutex deadlock detection: */
	struct mutex_waiter		*blocked_on;
965
#endif
966

967 968 969 970
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
	int				non_block_count;
#endif

971
#ifdef CONFIG_TRACE_IRQFLAGS
972
	unsigned int			irq_events;
973
	unsigned int			hardirq_threaded;
974 975 976 977 978 979 980 981 982 983 984 985
	unsigned long			hardirq_enable_ip;
	unsigned long			hardirq_disable_ip;
	unsigned int			hardirq_enable_event;
	unsigned int			hardirq_disable_event;
	int				hardirqs_enabled;
	int				hardirq_context;
	unsigned long			softirq_disable_ip;
	unsigned long			softirq_enable_ip;
	unsigned int			softirq_disable_event;
	unsigned int			softirq_enable_event;
	int				softirqs_enabled;
	int				softirq_context;
986
#endif
987

I
Ingo Molnar 已提交
988
#ifdef CONFIG_LOCKDEP
989 990 991 992 993
# define MAX_LOCK_DEPTH			48UL
	u64				curr_chain_key;
	int				lockdep_depth;
	unsigned int			lockdep_recursion;
	struct held_lock		held_locks[MAX_LOCK_DEPTH];
I
Ingo Molnar 已提交
994
#endif
995

996
#ifdef CONFIG_UBSAN
997
	unsigned int			in_ubsan;
998
#endif
999

1000 1001
	/* Journalling filesystem info: */
	void				*journal_info;
L
Linus Torvalds 已提交
1002

1003 1004
	/* Stacked block device info: */
	struct bio_list			*bio_list;
1005

1006
#ifdef CONFIG_BLOCK
1007 1008
	/* Stack plugging: */
	struct blk_plug			*plug;
1009 1010
#endif

1011 1012 1013 1014
	/* VM state: */
	struct reclaim_state		*reclaim_state;

	struct backing_dev_info		*backing_dev_info;
L
Linus Torvalds 已提交
1015

1016
	struct io_context		*io_context;
L
Linus Torvalds 已提交
1017

1018 1019 1020
#ifdef CONFIG_COMPACTION
	struct capture_control		*capture_control;
#endif
1021 1022
	/* Ptrace state: */
	unsigned long			ptrace_message;
1023
	kernel_siginfo_t		*last_siginfo;
L
Linus Torvalds 已提交
1024

1025
	struct task_io_accounting	ioac;
1026 1027 1028 1029
#ifdef CONFIG_PSI
	/* Pressure stall state */
	unsigned int			psi_flags;
#endif
1030 1031 1032 1033 1034 1035 1036
#ifdef CONFIG_TASK_XACCT
	/* Accumulated RSS usage: */
	u64				acct_rss_mem1;
	/* Accumulated virtual memory usage: */
	u64				acct_vm_mem1;
	/* stime + utime since last update: */
	u64				acct_timexpd;
L
Linus Torvalds 已提交
1037 1038
#endif
#ifdef CONFIG_CPUSETS
1039 1040 1041 1042 1043 1044
	/* Protected by ->alloc_lock: */
	nodemask_t			mems_allowed;
	/* Seqence number to catch updates: */
	seqcount_t			mems_allowed_seq;
	int				cpuset_mem_spread_rotor;
	int				cpuset_slab_spread_rotor;
L
Linus Torvalds 已提交
1045
#endif
1046
#ifdef CONFIG_CGROUPS
1047 1048 1049 1050
	/* Control Group info protected by css_set_lock: */
	struct css_set __rcu		*cgroups;
	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
	struct list_head		cg_list;
1051
#endif
1052
#ifdef CONFIG_X86_CPU_RESCTRL
1053
	u32				closid;
1054
	u32				rmid;
F
Fenghua Yu 已提交
1055
#endif
1056
#ifdef CONFIG_FUTEX
1057
	struct robust_list_head __user	*robust_list;
1058 1059 1060
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
1061 1062
	struct list_head		pi_state_list;
	struct futex_pi_state		*pi_state_cache;
1063
	struct mutex			futex_exit_mutex;
1064
	unsigned int			futex_state;
1065
#endif
1066
#ifdef CONFIG_PERF_EVENTS
1067 1068 1069
	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
	struct mutex			perf_event_mutex;
	struct list_head		perf_event_list;
1070
#endif
1071
#ifdef CONFIG_DEBUG_PREEMPT
1072
	unsigned long			preempt_disable_ip;
1073
#endif
1074
#ifdef CONFIG_NUMA
1075 1076
	/* Protected by alloc_lock: */
	struct mempolicy		*mempolicy;
1077
	short				il_prev;
1078
	short				pref_node_fork;
1079
#endif
1080
#ifdef CONFIG_NUMA_BALANCING
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
	int				numa_scan_seq;
	unsigned int			numa_scan_period;
	unsigned int			numa_scan_period_max;
	int				numa_preferred_nid;
	unsigned long			numa_migrate_retry;
	/* Migration stamp: */
	u64				node_stamp;
	u64				last_task_numa_placement;
	u64				last_sum_exec_runtime;
	struct callback_head		numa_work;

1092 1093 1094 1095 1096 1097 1098 1099 1100
	/*
	 * This pointer is only modified for current in syscall and
	 * pagefault context (and for tasks being destroyed), so it can be read
	 * from any of the following contexts:
	 *  - RCU read-side critical section
	 *  - current->numa_group from everywhere
	 *  - task's runqueue locked, task not running
	 */
	struct numa_group __rcu		*numa_group;
1101

1102
	/*
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
	 * numa_faults is an array split into four regions:
	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
	 * in this precise order.
	 *
	 * faults_memory: Exponential decaying average of faults on a per-node
	 * basis. Scheduling placement decisions are made based on these
	 * counts. The values remain static for the duration of a PTE scan.
	 * faults_cpu: Track the nodes the process was running on when a NUMA
	 * hinting fault was incurred.
	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
	 * during the current scan window. When the scan completes, the counts
	 * in faults_memory and faults_cpu decay and these values are copied.
1115
	 */
1116 1117
	unsigned long			*numa_faults;
	unsigned long			total_numa_faults;
1118

1119 1120
	/*
	 * numa_faults_locality tracks if faults recorded during the last
1121 1122 1123
	 * scan window were remote/local or failed to migrate. The task scan
	 * period is adapted based on the locality of the faults with different
	 * weights depending on whether they were shared or private faults
1124
	 */
1125
	unsigned long			numa_faults_locality[3];
1126

1127
	unsigned long			numa_pages_migrated;
1128 1129
#endif /* CONFIG_NUMA_BALANCING */

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
#ifdef CONFIG_RSEQ
	struct rseq __user *rseq;
	u32 rseq_sig;
	/*
	 * RmW on rseq_event_mask must be performed atomically
	 * with respect to preemption.
	 */
	unsigned long rseq_event_mask;
#endif

1140
	struct tlbflush_unmap_batch	tlb_ubc;
1141

1142 1143 1144 1145
	union {
		refcount_t		rcu_users;
		struct rcu_head		rcu;
	};
1146

1147 1148
	/* Cache last used pipe for splice(): */
	struct pipe_inode_info		*splice_pipe;
1149

1150
	struct page_frag		task_frag;
1151

1152 1153
#ifdef CONFIG_TASK_DELAY_ACCT
	struct task_delay_info		*delays;
1154
#endif
1155

1156
#ifdef CONFIG_FAULT_INJECTION
1157
	int				make_it_fail;
1158
	unsigned int			fail_nth;
1159
#endif
1160
	/*
1161 1162
	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
	 * balance_dirty_pages() for a dirty throttling pause:
1163
	 */
1164 1165 1166 1167
	int				nr_dirtied;
	int				nr_dirtied_pause;
	/* Start of a write-and-pause period: */
	unsigned long			dirty_paused_when;
1168

A
Arjan van de Ven 已提交
1169
#ifdef CONFIG_LATENCYTOP
1170 1171
	int				latency_record_count;
	struct latency_record		latency_record[LT_SAVECOUNT];
A
Arjan van de Ven 已提交
1172
#endif
1173
	/*
1174
	 * Time slack values; these are used to round up poll() and
1175 1176
	 * select() etc timeout values. These are in nanoseconds.
	 */
1177 1178
	u64				timer_slack_ns;
	u64				default_timer_slack_ns;
1179

1180
#ifdef CONFIG_KASAN
1181
	unsigned int			kasan_depth;
1182
#endif
1183

1184
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1185 1186
	/* Index of current stored address in ret_stack: */
	int				curr_ret_stack;
1187
	int				curr_ret_depth;
1188 1189 1190 1191 1192 1193 1194

	/* Stack of return addresses for return function tracing: */
	struct ftrace_ret_stack		*ret_stack;

	/* Timestamp for last schedule: */
	unsigned long long		ftrace_timestamp;

1195 1196
	/*
	 * Number of functions that haven't been traced
1197
	 * because of depth overrun:
1198
	 */
1199 1200 1201 1202
	atomic_t			trace_overrun;

	/* Pause tracing: */
	atomic_t			tracing_graph_pause;
1203
#endif
1204

1205
#ifdef CONFIG_TRACING
1206 1207 1208 1209 1210
	/* State flags for use by tracers: */
	unsigned long			trace;

	/* Bitmask and counter of trace recursion: */
	unsigned long			trace_recursion;
1211
#endif /* CONFIG_TRACING */
1212

D
Dmitry Vyukov 已提交
1213
#ifdef CONFIG_KCOV
A
Andrey Konovalov 已提交
1214 1215
	/* See kernel/kcov.c for more details. */

1216
	/* Coverage collection mode enabled for this task (0 if disabled): */
1217
	unsigned int			kcov_mode;
1218 1219 1220 1221 1222 1223 1224 1225 1226

	/* Size of the kcov_area: */
	unsigned int			kcov_size;

	/* Buffer for coverage collection: */
	void				*kcov_area;

	/* KCOV descriptor wired with this task or NULL: */
	struct kcov			*kcov;
A
Andrey Konovalov 已提交
1227 1228 1229 1230 1231 1232

	/* KCOV common handle for remote coverage collection: */
	u64				kcov_handle;

	/* KCOV sequence number: */
	int				kcov_sequence;
D
Dmitry Vyukov 已提交
1233
#endif
1234

1235
#ifdef CONFIG_MEMCG
1236 1237 1238
	struct mem_cgroup		*memcg_in_oom;
	gfp_t				memcg_oom_gfp_mask;
	int				memcg_oom_order;
1239

1240 1241
	/* Number of pages to reclaim on returning to userland: */
	unsigned int			memcg_nr_pages_over_high;
1242 1243 1244

	/* Used by memcontrol for targeted memcg charge: */
	struct mem_cgroup		*active_memcg;
1245
#endif
1246

1247 1248 1249 1250
#ifdef CONFIG_BLK_CGROUP
	struct request_queue		*throttle_queue;
#endif

1251
#ifdef CONFIG_UPROBES
1252
	struct uprobe_task		*utask;
1253
#endif
K
Kent Overstreet 已提交
1254
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1255 1256
	unsigned int			sequential_io;
	unsigned int			sequential_io_avg;
K
Kent Overstreet 已提交
1257
#endif
P
Peter Zijlstra 已提交
1258
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1259
	unsigned long			task_state_change;
P
Peter Zijlstra 已提交
1260
#endif
1261
	int				pagefault_disabled;
1262
#ifdef CONFIG_MMU
1263
	struct task_struct		*oom_reaper_list;
1264
#endif
1265
#ifdef CONFIG_VMAP_STACK
1266
	struct vm_struct		*stack_vm_area;
1267
#endif
1268
#ifdef CONFIG_THREAD_INFO_IN_TASK
1269
	/* A live task holds one reference: */
1270
	refcount_t			stack_refcount;
1271 1272 1273
#endif
#ifdef CONFIG_LIVEPATCH
	int patch_state;
1274
#endif
1275 1276 1277
#ifdef CONFIG_SECURITY
	/* Used by LSM modules for access restriction: */
	void				*security;
1278
#endif
K
Kees Cook 已提交
1279

1280 1281
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
	unsigned long			lowest_stack;
1282
	unsigned long			prev_lowest_stack;
1283 1284
#endif

K
Kees Cook 已提交
1285 1286 1287 1288 1289 1290
	/*
	 * New fields for task_struct should be added above here, so that
	 * they are included in the randomized portion of task_struct.
	 */
	randomized_struct_fields_end

1291 1292 1293 1294 1295 1296 1297 1298 1299
	/* CPU-specific state of this task: */
	struct thread_struct		thread;

	/*
	 * WARNING: on x86, 'thread_struct' contains a variable-sized
	 * structure.  It *MUST* be at the end of 'task_struct'.
	 *
	 * Do not put anything below here!
	 */
L
Linus Torvalds 已提交
1300 1301
};

A
Alexey Dobriyan 已提交
1302
static inline struct pid *task_pid(struct task_struct *task)
1303
{
1304
	return task->thread_pid;
1305 1306
}

1307 1308 1309 1310 1311
/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
1312 1313
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
1314 1315 1316 1317
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
1318
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1319

A
Alexey Dobriyan 已提交
1320
static inline pid_t task_pid_nr(struct task_struct *tsk)
1321 1322 1323 1324
{
	return tsk->pid;
}

1325
static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1326 1327 1328
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
1329 1330 1331

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
1332
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1333 1334 1335
}


A
Alexey Dobriyan 已提交
1336
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1337 1338 1339 1340
{
	return tsk->tgid;
}

1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
 *
 * Return: 1 if the process is alive. 0 otherwise.
 */
static inline int pid_alive(const struct task_struct *p)
{
1353
	return p->thread_pid != NULL;
1354
}
1355

1356
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1357
{
1358
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1359 1360 1361 1362
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
1363
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1364 1365 1366
}


1367
static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1368
{
1369
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1370 1371 1372 1373
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
1374
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1375 1376
}

1377 1378
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
E
Eric W. Biederman 已提交
1379
	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1380 1381 1382 1383
}

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
E
Eric W. Biederman 已提交
1384
	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
}

static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
	pid_t pid = 0;

	rcu_read_lock();
	if (pid_alive(tsk))
		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
	rcu_read_unlock();

	return pid;
}

static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
	return task_ppid_nr_ns(tsk, &init_pid_ns);
}

1404
/* Obsolete, do not use: */
1405 1406 1407 1408
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
1409

1410 1411 1412
#define TASK_REPORT_IDLE	(TASK_REPORT + 1)
#define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)

1413
static inline unsigned int task_state_index(struct task_struct *tsk)
1414
{
1415 1416
	unsigned int tsk_state = READ_ONCE(tsk->state);
	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1417

1418 1419 1420 1421 1422
	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);

	if (tsk_state == TASK_IDLE)
		state = TASK_REPORT_IDLE;

1423 1424 1425
	return fls(state);
}

1426
static inline char task_index_to_char(unsigned int state)
1427
{
1428
	static const char state_char[] = "RSDTtXZPI";
1429

1430
	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1431

1432 1433 1434 1435 1436
	return state_char[state];
}

static inline char task_state_to_char(struct task_struct *tsk)
{
1437
	return task_index_to_char(task_state_index(tsk));
1438 1439
}

1440
/**
1441 1442
 * is_global_init - check if a task structure is init. Since init
 * is free to have sub-threads we need to check tgid.
1443 1444 1445
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1446 1447
 *
 * Return: 1 if the task structure is init. 0 otherwise.
1448
 */
A
Alexey Dobriyan 已提交
1449
static inline int is_global_init(struct task_struct *tsk)
1450
{
1451
	return task_tgid_nr(tsk) == 1;
1452
}
1453

1454 1455
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1456 1457 1458
/*
 * Per process flags
 */
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
#define PF_IDLE			0x00000002	/* I am an IDLE thread */
#define PF_EXITING		0x00000004	/* Getting shut down */
#define PF_VCPU			0x00000010	/* I'm a virtual CPU */
#define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
#define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
#define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
#define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
#define PF_DUMPCORE		0x00000200	/* Dumped core */
#define PF_SIGNALED		0x00000400	/* Killed by a signal */
#define PF_MEMALLOC		0x00000800	/* Allocating memory */
#define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
#define PF_USED_ASYNC		0x00004000	/* Used async_schedule*(), used by module init */
#define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
#define PF_FROZEN		0x00010000	/* Frozen for system suspend */
1474 1475 1476
#define PF_KSWAPD		0x00020000	/* I am kswapd */
#define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
#define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1477 1478 1479 1480
#define PF_LESS_THROTTLE	0x00100000	/* Throttle me less: I clean memory */
#define PF_KTHREAD		0x00200000	/* I am a kernel thread */
#define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
#define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
1481
#define PF_MEMSTALL		0x01000000	/* Stalled due to lack of memory */
1482
#define PF_UMH			0x02000000	/* I'm an Usermodehelper process */
1483
#define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_mask */
1484
#define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
1485
#define PF_MEMALLOC_NOCMA	0x10000000	/* All allocation request will have _GFP_MOVABLE cleared */
1486
#define PF_IO_WORKER		0x20000000	/* Task is an IO worker */
1487 1488
#define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
L
Linus Torvalds 已提交
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
1501 1502 1503 1504 1505
#define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math()			clear_stopped_child_used_math(current)
#define set_used_math()				set_stopped_child_used_math(current)

L
Linus Torvalds 已提交
1506 1507
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1508 1509 1510

#define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)

L
Linus Torvalds 已提交
1511 1512
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1513

L
Linus Torvalds 已提交
1514
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1515 1516
#define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
#define used_math()				tsk_used_math(current)
L
Linus Torvalds 已提交
1517

1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
static inline bool is_percpu_thread(void)
{
#ifdef CONFIG_SMP
	return (current->flags & PF_NO_SETAFFINITY) &&
		(current->nr_cpus_allowed  == 1);
#else
	return true;
#endif
}

1528
/* Per-process atomic flags. */
1529 1530 1531
#define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
#define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1532 1533
#define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
1534 1535
#define PFA_SPEC_IB_DISABLE		5	/* Indirect branch speculation restricted */
#define PFA_SPEC_IB_FORCE_DISABLE	6	/* Indirect branch speculation permanently restricted */
1536
#define PFA_SPEC_SSB_NOEXEC		7	/* Speculative Store Bypass clear on execve() */
1537

1538 1539 1540
#define TASK_PFA_TEST(name, func)					\
	static inline bool task_##func(struct task_struct *p)		\
	{ return test_bit(PFA_##name, &p->atomic_flags); }
1541

1542 1543 1544
#define TASK_PFA_SET(name, func)					\
	static inline void task_set_##func(struct task_struct *p)	\
	{ set_bit(PFA_##name, &p->atomic_flags); }
1545

1546 1547 1548 1549 1550 1551
#define TASK_PFA_CLEAR(name, func)					\
	static inline void task_clear_##func(struct task_struct *p)	\
	{ clear_bit(PFA_##name, &p->atomic_flags); }

TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1552

1553 1554 1555 1556 1557 1558 1559
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)

TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1560

1561 1562 1563 1564
TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)

1565 1566 1567 1568
TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)

1569 1570 1571
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)

1572 1573 1574 1575 1576 1577 1578
TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)

TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)

1579
static inline void
1580
current_restore_flags(unsigned long orig_flags, unsigned long flags)
1581
{
1582 1583
	current->flags &= ~flags;
	current->flags |= orig_flags & flags;
1584 1585
}

1586 1587
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
L
Linus Torvalds 已提交
1588
#ifdef CONFIG_SMP
1589 1590
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
L
Linus Torvalds 已提交
1591
#else
1592
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1593 1594
{
}
1595
static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
L
Linus Torvalds 已提交
1596
{
1597
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
1598 1599 1600 1601
		return -EINVAL;
	return 0;
}
#endif
1602

1603
extern int yield_to(struct task_struct *p, bool preempt);
1604 1605
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
1606

1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
/**
 * task_nice - return the nice value of a given task.
 * @p: the task in question.
 *
 * Return: The nice value [ -20 ... 0 ... 19 ].
 */
static inline int task_nice(const struct task_struct *p)
{
	return PRIO_TO_NICE((p)->static_prio);
}
1617

1618 1619
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
1620
extern int idle_cpu(int cpu);
1621
extern int available_idle_cpu(int cpu);
1622 1623 1624
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1625
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1626
extern struct task_struct *idle_task(int cpu);
1627

1628 1629
/**
 * is_idle_task - is the specified task an idle task?
1630
 * @p: the task in question.
1631 1632
 *
 * Return: 1 if @p is an idle task. 0 otherwise.
1633
 */
1634
static inline bool is_idle_task(const struct task_struct *p)
1635
{
1636
	return !!(p->flags & PF_IDLE);
1637
}
1638

1639
extern struct task_struct *curr_task(int cpu);
1640
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
1641 1642 1643 1644

void yield(void);

union thread_union {
1645 1646 1647
#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
	struct task_struct task;
#endif
1648
#ifndef CONFIG_THREAD_INFO_IN_TASK
L
Linus Torvalds 已提交
1649
	struct thread_info thread_info;
1650
#endif
L
Linus Torvalds 已提交
1651 1652 1653
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

1654 1655 1656 1657 1658 1659
#ifndef CONFIG_THREAD_INFO_IN_TASK
extern struct thread_info init_thread_info;
#endif

extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];

1660 1661 1662 1663 1664 1665 1666 1667 1668
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
	return &task->thread_info;
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
# define task_thread_info(task)	((struct thread_info *)(task)->stack)
#endif

1669 1670 1671 1672 1673
/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
1674 1675
 * find_task_by_vpid():
 *      finds a task by its virtual pid
1676
 *
1677
 * see also find_vpid() etc in include/linux/pid.h
1678 1679
 */

1680
extern struct task_struct *find_task_by_vpid(pid_t nr);
1681
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1682

1683 1684 1685 1686 1687
/*
 * find a task by its virtual pid and get the task struct
 */
extern struct task_struct *find_get_task_by_vpid(pid_t nr);

1688 1689
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
1690
extern void wake_up_new_task(struct task_struct *tsk);
1691

L
Linus Torvalds 已提交
1692
#ifdef CONFIG_SMP
1693
extern void kick_process(struct task_struct *tsk);
L
Linus Torvalds 已提交
1694
#else
1695
static inline void kick_process(struct task_struct *tsk) { }
L
Linus Torvalds 已提交
1696 1697
#endif

1698
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1699

1700 1701 1702 1703
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
	__set_task_comm(tsk, from, false);
}
1704

1705 1706 1707 1708 1709
extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
#define get_task_comm(buf, tsk) ({			\
	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
	__get_task_comm(buf, sizeof(buf), tsk);		\
})
L
Linus Torvalds 已提交
1710 1711

#ifdef CONFIG_SMP
1712
void scheduler_ipi(void);
R
Roland McGrath 已提交
1713
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
1714
#else
1715
static inline void scheduler_ipi(void) { }
1716
static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
R
Roland McGrath 已提交
1717 1718 1719
{
	return 1;
}
L
Linus Torvalds 已提交
1720 1721
#endif

1722 1723 1724
/*
 * Set thread flags in other task's structures.
 * See asm/thread_info.h for TIF_xxxx flags available:
L
Linus Torvalds 已提交
1725 1726 1727
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1728
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1729 1730 1731 1732
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1733
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1734 1735
}

1736 1737 1738 1739 1740 1741
static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
					  bool value)
{
	update_ti_thread_flag(task_thread_info(tsk), flag, value);
}

L
Linus Torvalds 已提交
1742 1743
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1744
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1745 1746 1747 1748
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1749
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1750 1751 1752 1753
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1754
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

1767 1768 1769 1770 1771
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

L
Linus Torvalds 已提交
1772 1773 1774 1775 1776 1777
/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 */
1778
#ifndef CONFIG_PREEMPTION
1779
extern int _cond_resched(void);
1780 1781 1782
#else
static inline int _cond_resched(void) { return 0; }
#endif
1783

1784
#define cond_resched() ({			\
1785
	___might_sleep(__FILE__, __LINE__, 0);	\
1786 1787
	_cond_resched();			\
})
1788

1789 1790 1791
extern int __cond_resched_lock(spinlock_t *lock);

#define cond_resched_lock(lock) ({				\
1792
	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1793 1794 1795
	__cond_resched_lock(lock);				\
})

1796 1797 1798 1799 1800 1801 1802 1803 1804
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
	rcu_read_unlock();
	cond_resched();
	rcu_read_lock();
#endif
}

L
Linus Torvalds 已提交
1805 1806
/*
 * Does a critical section need to be broken due to another
1807
 * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
N
Nick Piggin 已提交
1808
 * but a general need for low latency)
L
Linus Torvalds 已提交
1809
 */
N
Nick Piggin 已提交
1810
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
1811
{
1812
#ifdef CONFIG_PREEMPTION
N
Nick Piggin 已提交
1813 1814
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
1815
	return 0;
N
Nick Piggin 已提交
1816
#endif
L
Linus Torvalds 已提交
1817 1818
}

1819 1820 1821 1822 1823
static __always_inline bool need_resched(void)
{
	return unlikely(tif_need_resched());
}

L
Linus Torvalds 已提交
1824 1825 1826 1827 1828 1829 1830
/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
1831
#ifdef CONFIG_THREAD_INFO_IN_TASK
1832
	return READ_ONCE(p->cpu);
1833
#else
1834
	return READ_ONCE(task_thread_info(p)->cpu);
1835
#endif
L
Linus Torvalds 已提交
1836 1837
}

I
Ingo Molnar 已提交
1838
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

1853 1854 1855 1856 1857 1858 1859 1860 1861
/*
 * In order to reduce various lock holder preemption latencies provide an
 * interface to see if a vCPU is currently running or not.
 *
 * This allows us to terminate optimistic spin loops and block, analogous to
 * the native optimistic spin heuristic of testing if the lock owner task is
 * running or not.
 */
#ifndef vcpu_is_preempted
1862 1863 1864 1865
static inline bool vcpu_is_preempted(int cpu)
{
	return false;
}
1866 1867
#endif

1868 1869
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1870

D
Dave Hansen 已提交
1871 1872 1873 1874
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
#ifdef CONFIG_RSEQ

/*
 * Map the event mask on the user-space ABI enum rseq_cs_flags
 * for direct mask checks.
 */
enum rseq_event_mask_bits {
	RSEQ_EVENT_PREEMPT_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
	RSEQ_EVENT_SIGNAL_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
	RSEQ_EVENT_MIGRATE_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
};

enum rseq_event_mask {
	RSEQ_EVENT_PREEMPT	= (1U << RSEQ_EVENT_PREEMPT_BIT),
	RSEQ_EVENT_SIGNAL	= (1U << RSEQ_EVENT_SIGNAL_BIT),
	RSEQ_EVENT_MIGRATE	= (1U << RSEQ_EVENT_MIGRATE_BIT),
};

static inline void rseq_set_notify_resume(struct task_struct *t)
{
	if (t->rseq)
		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}

1899
void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1900

1901 1902
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
					     struct pt_regs *regs)
1903 1904
{
	if (current->rseq)
1905
		__rseq_handle_notify_resume(ksig, regs);
1906 1907
}

1908 1909
static inline void rseq_signal_deliver(struct ksignal *ksig,
				       struct pt_regs *regs)
1910 1911 1912 1913
{
	preempt_disable();
	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
	preempt_enable();
1914
	rseq_handle_notify_resume(ksig, regs);
1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
}

/* rseq_preempt() requires preemption to be disabled. */
static inline void rseq_preempt(struct task_struct *t)
{
	__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
	rseq_set_notify_resume(t);
}

/* rseq_migrate() requires preemption to be disabled. */
static inline void rseq_migrate(struct task_struct *t)
{
	__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
	rseq_set_notify_resume(t);
}

/*
 * If parent process has a registered restartable sequences area, the
1933
 * child inherits. Unregister rseq for a clone with CLONE_VM set.
1934 1935 1936
 */
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
1937
	if (clone_flags & CLONE_VM) {
1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
		t->rseq = NULL;
		t->rseq_sig = 0;
		t->rseq_event_mask = 0;
	} else {
		t->rseq = current->rseq;
		t->rseq_sig = current->rseq_sig;
		t->rseq_event_mask = current->rseq_event_mask;
	}
}

static inline void rseq_execve(struct task_struct *t)
{
	t->rseq = NULL;
	t->rseq_sig = 0;
	t->rseq_event_mask = 0;
}

#else

static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
1960 1961
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
					     struct pt_regs *regs)
1962 1963
{
}
1964 1965
static inline void rseq_signal_deliver(struct ksignal *ksig,
				       struct pt_regs *regs)
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
{
}
static inline void rseq_preempt(struct task_struct *t)
{
}
static inline void rseq_migrate(struct task_struct *t)
{
}
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
}
static inline void rseq_execve(struct task_struct *t)
{
}

#endif

1983 1984 1985 1986 1987 1988 1989 1990
void __exit_umh(struct task_struct *tsk);

static inline void exit_umh(struct task_struct *tsk)
{
	if (unlikely(tsk->flags & PF_UMH))
		__exit_umh(tsk);
}

1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
#ifdef CONFIG_DEBUG_RSEQ

void rseq_syscall(struct pt_regs *regs);

#else

static inline void rseq_syscall(struct pt_regs *regs)
{
}

#endif

2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);

const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);

int sched_trace_rq_cpu(struct rq *rq);

const struct cpumask *sched_trace_rd_span(struct root_domain *rd);

L
Linus Torvalds 已提交
2015
#endif