sched.h 43.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4 5 6 7
/*
 * Define 'struct task_struct' and provide the main scheduler
 * APIs (schedule(), wakeup variants, etc.)
 */
8

9
#include <uapi/linux/sched.h>
10

11
#include <asm/current.h>
L
Linus Torvalds 已提交
12

13
#include <linux/pid.h>
L
Linus Torvalds 已提交
14
#include <linux/sem.h>
15
#include <linux/shm.h>
16 17 18 19
#include <linux/kcov.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
L
Linus Torvalds 已提交
20
#include <linux/seccomp.h>
21
#include <linux/nodemask.h>
22
#include <linux/rcupdate.h>
23
#include <linux/resource.h>
A
Arjan van de Ven 已提交
24
#include <linux/latencytop.h>
25 26 27 28
#include <linux/sched/prio.h>
#include <linux/signal_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
29

30
/* task_struct member predeclarations (sorted alphabetically): */
31 32
struct audit_context;
struct backing_dev_info;
33
struct bio_list;
34
struct blk_plug;
35 36 37 38 39
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct mempolicy;
40
struct nameidata;
41 42 43 44 45 46 47 48 49
struct nsproxy;
struct perf_event_context;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
struct sched_attr;
struct sched_param;
I
Ingo Molnar 已提交
50
struct seq_file;
51 52 53
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
54
struct task_group;
L
Linus Torvalds 已提交
55

56 57 58 59 60 61 62 63 64 65
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112

/* Used in tsk->state: */
#define TASK_RUNNING			0
#define TASK_INTERRUPTIBLE		1
#define TASK_UNINTERRUPTIBLE		2
#define __TASK_STOPPED			4
#define __TASK_TRACED			8
/* Used in tsk->exit_state: */
#define EXIT_DEAD			16
#define EXIT_ZOMBIE			32
#define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
/* Used in tsk->state again: */
#define TASK_DEAD			64
#define TASK_WAKEKILL			128
#define TASK_WAKING			256
#define TASK_PARKED			512
#define TASK_NOLOAD			1024
#define TASK_NEW			2048
#define TASK_STATE_MAX			4096

#define TASK_STATE_TO_CHAR_STR		"RSDTtXZxKWPNn"

/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)

#define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)

/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL			(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)

/* get_task_state(): */
#define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
					 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)

#define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)

#define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)

#define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)

#define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
					 (task->flags & PF_FROZEN) == 0 && \
					 (task->state & TASK_NOLOAD) == 0)
L
Linus Torvalds 已提交
113

P
Peter Zijlstra 已提交
114 115 116 117 118 119 120 121 122 123
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP

#define __set_current_state(state_value)			\
	do {							\
		current->task_state_change = _THIS_IP_;		\
		current->state = (state_value);			\
	} while (0)
#define set_current_state(state_value)				\
	do {							\
		current->task_state_change = _THIS_IP_;		\
124
		smp_store_mb(current->state, (state_value));	\
P
Peter Zijlstra 已提交
125 126 127
	} while (0)

#else
128 129 130 131 132
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
133
 *   for (;;) {
134
 *	set_current_state(TASK_UNINTERRUPTIBLE);
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
 *	if (!need_sleep)
 *		break;
 *
 *	schedule();
 *   }
 *   __set_current_state(TASK_RUNNING);
 *
 * If the caller does not need such serialisation (because, for instance, the
 * condition test and condition change and wakeup are under the same lock) then
 * use __set_current_state().
 *
 * The above is typically ordered against the wakeup, which does:
 *
 *	need_sleep = false;
 *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
 *
 * Where wake_up_state() (and all other wakeup primitives) imply enough
 * barriers to order the store of the variable against wakeup.
 *
 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
157
 *
158
 * This is obviously fine, since they both store the exact same value.
159
 *
160
 * Also see the comments of try_to_wake_up().
161
 */
162 163
#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
#define set_current_state(state_value)	 smp_store_mb(current->state, (state_value))
P
Peter Zijlstra 已提交
164 165
#endif

166 167
/* Task command name length: */
#define TASK_COMM_LEN			16
L
Linus Torvalds 已提交
168

169
extern cpumask_var_t			cpu_isolated_map;
170

L
Linus Torvalds 已提交
171 172
extern void scheduler_tick(void);

173 174 175 176 177 178 179
#define	MAX_SCHEDULE_TIMEOUT		LONG_MAX

extern long schedule_timeout(long timeout);
extern long schedule_timeout_interruptible(long timeout);
extern long schedule_timeout_killable(long timeout);
extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
L
Linus Torvalds 已提交
180
asmlinkage void schedule(void);
181
extern void schedule_preempt_disabled(void);
L
Linus Torvalds 已提交
182

183 184
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
185
extern long io_schedule_timeout(long timeout);
186
extern void io_schedule(void);
187

188
/**
189
 * struct prev_cputime - snapshot of system and user cputime
190 191
 * @utime: time spent in user mode
 * @stime: time spent in system mode
192
 * @lock: protects the above two fields
193
 *
194 195
 * Stores previous user/system time values such that we can guarantee
 * monotonicity.
196
 */
197 198
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
199 200 201
	u64				utime;
	u64				stime;
	raw_spinlock_t			lock;
202
#endif
203 204
};

205 206
/**
 * struct task_cputime - collected CPU time counts
207 208
 * @utime:		time spent in user mode, in nanoseconds
 * @stime:		time spent in kernel mode, in nanoseconds
209
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
210
 *
211 212 213
 * This structure groups together three kinds of CPU time that are tracked for
 * threads and thread groups.  Most things considering CPU time want to group
 * these counts together and treat all three of them in parallel.
214 215
 */
struct task_cputime {
216 217 218
	u64				utime;
	u64				stime;
	unsigned long long		sum_exec_runtime;
219
};
220

221 222 223 224
/* Alternate field names when used on cache expirations: */
#define virt_exp			utime
#define prof_exp			stime
#define sched_exp			sum_exec_runtime
225

226 227 228 229 230 231 232 233 234 235 236 237 238
enum vtime_state {
	/* Task is sleeping or running in a CPU with VTIME inactive: */
	VTIME_INACTIVE = 0,
	/* Task runs in userspace in a CPU with VTIME active: */
	VTIME_USER,
	/* Task runs in kernelspace in a CPU with VTIME active: */
	VTIME_SYS,
};

struct vtime {
	seqcount_t		seqcount;
	unsigned long long	starttime;
	enum vtime_state	state;
239 240 241
	u64			utime;
	u64			stime;
	u64			gtime;
242 243
};

L
Linus Torvalds 已提交
244
struct sched_info {
245
#ifdef CONFIG_SCHED_INFO
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
	/* Cumulative counters: */

	/* # of times we have run on this CPU: */
	unsigned long			pcount;

	/* Time spent waiting on a runqueue: */
	unsigned long long		run_delay;

	/* Timestamps: */

	/* When did we last run on a CPU? */
	unsigned long long		last_arrival;

	/* When were we last queued to run? */
	unsigned long long		last_queued;
L
Linus Torvalds 已提交
261

262
#endif /* CONFIG_SCHED_INFO */
263
};
L
Linus Torvalds 已提交
264

265 266 267 268 269 270 271
/*
 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 * has a few: load, load_avg, util_avg, freq, and capacity.
 *
 * We define a basic fixed point arithmetic range, and then formalize
 * all these metrics based on that basic range.
 */
272 273
# define SCHED_FIXEDPOINT_SHIFT		10
# define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
274

I
Ingo Molnar 已提交
275
struct load_weight {
276 277
	unsigned long			weight;
	u32				inv_weight;
I
Ingo Molnar 已提交
278 279
};

280
/*
281 282 283 284 285 286 287 288 289
 * The load_avg/util_avg accumulates an infinite geometric series
 * (see __update_load_avg() in kernel/sched/fair.c).
 *
 * [load_avg definition]
 *
 *   load_avg = runnable% * scale_load_down(load)
 *
 * where runnable% is the time ratio that a sched_entity is runnable.
 * For cfs_rq, it is the aggregated load_avg of all runnable and
290
 * blocked sched_entities.
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
 *
 * load_avg may also take frequency scaling into account:
 *
 *   load_avg = runnable% * scale_load_down(load) * freq%
 *
 * where freq% is the CPU frequency normalized to the highest frequency.
 *
 * [util_avg definition]
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE
 *
 * where running% is the time ratio that a sched_entity is running on
 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
 * and blocked sched_entities.
 *
 * util_avg may also factor frequency scaling and CPU capacity scaling:
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
 *
 * where freq% is the same as above, and capacity% is the CPU capacity
 * normalized to the greatest capacity (due to uarch differences, etc).
 *
 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
 * we therefore scale them to as large a range as necessary. This is for
 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
 *
 * [Overflow issue]
 *
 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
 * with the highest load (=88761), always runnable on a single cfs_rq,
 * and should not overflow as the number already hits PID_MAX_LIMIT.
 *
 * For all other cases (including 32-bit kernels), struct load_weight's
 * weight will overflow first before we do, because:
 *
 *    Max(load_avg) <= Max(load.weight)
 *
 * Then it is the load_weight's responsibility to consider overflow
 * issues.
331
 */
332
struct sched_avg {
333 334 335 336 337 338
	u64				last_update_time;
	u64				load_sum;
	u32				util_sum;
	u32				period_contrib;
	unsigned long			load_avg;
	unsigned long			util_avg;
339 340
};

341
struct sched_statistics {
342
#ifdef CONFIG_SCHEDSTATS
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
	u64				wait_start;
	u64				wait_max;
	u64				wait_count;
	u64				wait_sum;
	u64				iowait_count;
	u64				iowait_sum;

	u64				sleep_start;
	u64				sleep_max;
	s64				sum_sleep_runtime;

	u64				block_start;
	u64				block_max;
	u64				exec_max;
	u64				slice_max;

	u64				nr_migrations_cold;
	u64				nr_failed_migrations_affine;
	u64				nr_failed_migrations_running;
	u64				nr_failed_migrations_hot;
	u64				nr_forced_migrations;

	u64				nr_wakeups;
	u64				nr_wakeups_sync;
	u64				nr_wakeups_migrate;
	u64				nr_wakeups_local;
	u64				nr_wakeups_remote;
	u64				nr_wakeups_affine;
	u64				nr_wakeups_affine_attempts;
	u64				nr_wakeups_passive;
	u64				nr_wakeups_idle;
374
#endif
375
};
376 377

struct sched_entity {
378 379 380 381 382
	/* For load-balancing: */
	struct load_weight		load;
	struct rb_node			run_node;
	struct list_head		group_node;
	unsigned int			on_rq;
383

384 385 386 387
	u64				exec_start;
	u64				sum_exec_runtime;
	u64				vruntime;
	u64				prev_sum_exec_runtime;
388

389
	u64				nr_migrations;
390

391
	struct sched_statistics		statistics;
392

I
Ingo Molnar 已提交
393
#ifdef CONFIG_FAIR_GROUP_SCHED
394 395
	int				depth;
	struct sched_entity		*parent;
I
Ingo Molnar 已提交
396
	/* rq on which this entity is (to be) queued: */
397
	struct cfs_rq			*cfs_rq;
I
Ingo Molnar 已提交
398
	/* rq "owned" by this entity/group: */
399
	struct cfs_rq			*my_q;
I
Ingo Molnar 已提交
400
#endif
401

402
#ifdef CONFIG_SMP
403 404 405 406 407 408
	/*
	 * Per entity load average tracking.
	 *
	 * Put into separate cache line so it does not
	 * collide with read-mostly values above.
	 */
409
	struct sched_avg		avg ____cacheline_aligned_in_smp;
410
#endif
I
Ingo Molnar 已提交
411
};
412

P
Peter Zijlstra 已提交
413
struct sched_rt_entity {
414 415 416 417 418 419 420 421
	struct list_head		run_list;
	unsigned long			timeout;
	unsigned long			watchdog_stamp;
	unsigned int			time_slice;
	unsigned short			on_rq;
	unsigned short			on_list;

	struct sched_rt_entity		*back;
422
#ifdef CONFIG_RT_GROUP_SCHED
423
	struct sched_rt_entity		*parent;
P
Peter Zijlstra 已提交
424
	/* rq on which this entity is (to be) queued: */
425
	struct rt_rq			*rt_rq;
P
Peter Zijlstra 已提交
426
	/* rq "owned" by this entity/group: */
427
	struct rt_rq			*my_q;
P
Peter Zijlstra 已提交
428
#endif
P
Peter Zijlstra 已提交
429 430
};

431
struct sched_dl_entity {
432
	struct rb_node			rb_node;
433 434 435

	/*
	 * Original scheduling parameters. Copied here from sched_attr
436 437
	 * during sched_setattr(), they will remain the same until
	 * the next sched_setattr().
438
	 */
439 440 441
	u64				dl_runtime;	/* Maximum runtime for each instance	*/
	u64				dl_deadline;	/* Relative deadline of each instance	*/
	u64				dl_period;	/* Separation of two instances (period) */
442
	u64				dl_bw;		/* dl_runtime / dl_period		*/
443
	u64				dl_density;	/* dl_runtime / dl_deadline		*/
444 445 446 447 448 449

	/*
	 * Actual scheduling parameters. Initialized with the values above,
	 * they are continously updated during task execution. Note that
	 * the remaining runtime could be < 0 in case we are in overrun.
	 */
450 451 452
	s64				runtime;	/* Remaining runtime for this instance	*/
	u64				deadline;	/* Absolute deadline for this instance	*/
	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
453 454 455 456 457 458 459 460

	/*
	 * Some bool flags:
	 *
	 * @dl_throttled tells if we exhausted the runtime. If so, the
	 * task has to wait for a replenishment to be performed at the
	 * next firing of dl_timer.
	 *
461 462
	 * @dl_boosted tells if we are boosted due to DI. If so we are
	 * outside bandwidth enforcement mechanism (but only until we
463 464
	 * exit the critical section);
	 *
465
	 * @dl_yielded tells if task gave up the CPU before consuming
466
	 * all its available runtime during the last job.
467 468 469 470 471 472 473
	 *
	 * @dl_non_contending tells if the task is inactive while still
	 * contributing to the active utilization. In other words, it
	 * indicates if the inactive timer has been armed and its handler
	 * has not been executed yet. This flag is useful to avoid race
	 * conditions between the inactive timer handler and the wakeup
	 * code.
474
	 */
475 476 477
	int				dl_throttled;
	int				dl_boosted;
	int				dl_yielded;
478
	int				dl_non_contending;
479 480 481 482 483

	/*
	 * Bandwidth enforcement timer. Each -deadline task has its
	 * own bandwidth to be enforced, thus we need one timer per task.
	 */
484
	struct hrtimer			dl_timer;
485 486 487 488 489 490 491 492 493

	/*
	 * Inactive timer, responsible for decreasing the active utilization
	 * at the "0-lag time". When a -deadline task blocks, it contributes
	 * to GRUB's active utilization until the "0-lag time", hence a
	 * timer is needed to decrease the active utilization at the correct
	 * time.
	 */
	struct hrtimer inactive_timer;
494
};
495

496 497
union rcu_special {
	struct {
498 499 500 501 502 503
		u8			blocked;
		u8			need_qs;
		u8			exp_need_qs;

		/* Otherwise the compiler can store garbage here: */
		u8			pad;
504 505
	} b; /* Bits. */
	u32 s; /* Set of bits. */
506
};
507

P
Peter Zijlstra 已提交
508 509 510
enum perf_event_task_context {
	perf_invalid_context = -1,
	perf_hw_context = 0,
511
	perf_sw_context,
P
Peter Zijlstra 已提交
512 513 514
	perf_nr_task_contexts,
};

515 516 517 518
struct wake_q_node {
	struct wake_q_node *next;
};

L
Linus Torvalds 已提交
519
struct task_struct {
520 521 522 523 524
#ifdef CONFIG_THREAD_INFO_IN_TASK
	/*
	 * For reasons of header soup (see current_thread_info()), this
	 * must be the first element of task_struct.
	 */
525
	struct thread_info		thread_info;
526
#endif
527 528 529 530 531 532 533
	/* -1 unrunnable, 0 runnable, >0 stopped: */
	volatile long			state;
	void				*stack;
	atomic_t			usage;
	/* Per task flags (PF_*), defined further below: */
	unsigned int			flags;
	unsigned int			ptrace;
L
Linus Torvalds 已提交
534

535
#ifdef CONFIG_SMP
536 537
	struct llist_node		wake_entry;
	int				on_cpu;
538
#ifdef CONFIG_THREAD_INFO_IN_TASK
539 540
	/* Current CPU: */
	unsigned int			cpu;
541
#endif
542 543 544
	unsigned int			wakee_flips;
	unsigned long			wakee_flip_decay_ts;
	struct task_struct		*last_wakee;
545

546
	int				wake_cpu;
547
#endif
548 549 550 551 552 553
	int				on_rq;

	int				prio;
	int				static_prio;
	int				normal_prio;
	unsigned int			rt_priority;
554

555 556 557
	const struct sched_class	*sched_class;
	struct sched_entity		se;
	struct sched_rt_entity		rt;
P
Peter Zijlstra 已提交
558
#ifdef CONFIG_CGROUP_SCHED
559
	struct task_group		*sched_task_group;
P
Peter Zijlstra 已提交
560
#endif
561
	struct sched_dl_entity		dl;
L
Linus Torvalds 已提交
562

563
#ifdef CONFIG_PREEMPT_NOTIFIERS
564 565
	/* List of struct preempt_notifier: */
	struct hlist_head		preempt_notifiers;
566 567
#endif

568
#ifdef CONFIG_BLK_DEV_IO_TRACE
569
	unsigned int			btrace_seq;
570
#endif
L
Linus Torvalds 已提交
571

572 573 574
	unsigned int			policy;
	int				nr_cpus_allowed;
	cpumask_t			cpus_allowed;
L
Linus Torvalds 已提交
575

P
Paul E. McKenney 已提交
576
#ifdef CONFIG_PREEMPT_RCU
577 578 579 580
	int				rcu_read_lock_nesting;
	union rcu_special		rcu_read_unlock_special;
	struct list_head		rcu_node_entry;
	struct rcu_node			*rcu_blocked_node;
581
#endif /* #ifdef CONFIG_PREEMPT_RCU */
582

P
Paul E. McKenney 已提交
583
#ifdef CONFIG_TASKS_RCU
584 585 586 587
	unsigned long			rcu_tasks_nvcsw;
	bool				rcu_tasks_holdout;
	struct list_head		rcu_tasks_holdout_list;
	int				rcu_tasks_idle_cpu;
P
Paul E. McKenney 已提交
588
#endif /* #ifdef CONFIG_TASKS_RCU */
P
Paul E. McKenney 已提交
589

590
	struct sched_info		sched_info;
L
Linus Torvalds 已提交
591

592
	struct list_head		tasks;
593
#ifdef CONFIG_SMP
594 595
	struct plist_node		pushable_tasks;
	struct rb_node			pushable_dl_tasks;
596
#endif
L
Linus Torvalds 已提交
597

598 599
	struct mm_struct		*mm;
	struct mm_struct		*active_mm;
600 601

	/* Per-thread vma caching: */
602
	struct vmacache			vmacache;
603

604 605
#ifdef SPLIT_RSS_COUNTING
	struct task_rss_stat		rss_stat;
606
#endif
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
	int				exit_state;
	int				exit_code;
	int				exit_signal;
	/* The signal sent when the parent dies: */
	int				pdeath_signal;
	/* JOBCTL_*, siglock protected: */
	unsigned long			jobctl;

	/* Used for emulating ABI behavior of previous Linux versions: */
	unsigned int			personality;

	/* Scheduler bits, serialized by scheduler locks: */
	unsigned			sched_reset_on_fork:1;
	unsigned			sched_contributes_to_load:1;
	unsigned			sched_migrated:1;
	unsigned			sched_remote_wakeup:1;
	/* Force alignment to the next boundary: */
	unsigned			:0;

	/* Unserialized, strictly 'current' */

	/* Bit to tell LSMs we're in execve(): */
	unsigned			in_execve:1;
	unsigned			in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
	unsigned			restore_sigmask:1;
633
#endif
T
Tejun Heo 已提交
634
#ifdef CONFIG_MEMCG
635
	unsigned			memcg_may_oom:1;
636
#ifndef CONFIG_SLOB
637
	unsigned			memcg_kmem_skip_account:1;
638
#endif
639
#endif
640
#ifdef CONFIG_COMPAT_BRK
641
	unsigned			brk_randomized:1;
642
#endif
643 644 645 646
#ifdef CONFIG_CGROUPS
	/* disallow userland-initiated cgroup migration */
	unsigned			no_cgroup_migration:1;
#endif
647

648
	unsigned long			atomic_flags; /* Flags requiring atomic access. */
649

650
	struct restart_block		restart_block;
651

652 653
	pid_t				pid;
	pid_t				tgid;
654

655
#ifdef CONFIG_CC_STACKPROTECTOR
656 657
	/* Canary value for the -fstack-protector GCC feature: */
	unsigned long			stack_canary;
658
#endif
659
	/*
660
	 * Pointers to the (original) parent process, youngest child, younger sibling,
661
	 * older sibling, respectively.  (p->father can be replaced with
R
Roland McGrath 已提交
662
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
663
	 */
664 665 666 667 668 669 670

	/* Real parent process: */
	struct task_struct __rcu	*real_parent;

	/* Recipient of SIGCHLD, wait4() reports: */
	struct task_struct __rcu	*parent;

L
Linus Torvalds 已提交
671
	/*
672
	 * Children/sibling form the list of natural children:
L
Linus Torvalds 已提交
673
	 */
674 675 676
	struct list_head		children;
	struct list_head		sibling;
	struct task_struct		*group_leader;
L
Linus Torvalds 已提交
677

R
Roland McGrath 已提交
678
	/*
679 680
	 * 'ptraced' is the list of tasks this task is using ptrace() on.
	 *
R
Roland McGrath 已提交
681
	 * This includes both natural children and PTRACE_ATTACH targets.
682
	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
R
Roland McGrath 已提交
683
	 */
684 685
	struct list_head		ptraced;
	struct list_head		ptrace_entry;
R
Roland McGrath 已提交
686

L
Linus Torvalds 已提交
687
	/* PID/PID hash table linkage. */
688 689 690 691 692
	struct pid_link			pids[PIDTYPE_MAX];
	struct list_head		thread_group;
	struct list_head		thread_node;

	struct completion		*vfork_done;
L
Linus Torvalds 已提交
693

694 695
	/* CLONE_CHILD_SETTID: */
	int __user			*set_child_tid;
L
Linus Torvalds 已提交
696

697 698 699 700 701
	/* CLONE_CHILD_CLEARTID: */
	int __user			*clear_child_tid;

	u64				utime;
	u64				stime;
702
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
703 704
	u64				utimescaled;
	u64				stimescaled;
705
#endif
706 707
	u64				gtime;
	struct prev_cputime		prev_cputime;
708
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
709
	struct vtime			vtime;
710
#endif
711 712

#ifdef CONFIG_NO_HZ_FULL
713
	atomic_t			tick_dep_mask;
714
#endif
715 716 717 718 719 720 721 722 723 724 725 726 727
	/* Context switch counts: */
	unsigned long			nvcsw;
	unsigned long			nivcsw;

	/* Monotonic time in nsecs: */
	u64				start_time;

	/* Boot based time in nsecs: */
	u64				real_start_time;

	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
	unsigned long			min_flt;
	unsigned long			maj_flt;
L
Linus Torvalds 已提交
728

729
#ifdef CONFIG_POSIX_TIMERS
730 731
	struct task_cputime		cputime_expires;
	struct list_head		cpu_timers[3];
732
#endif
L
Linus Torvalds 已提交
733

734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
	/* Process credentials: */

	/* Tracer's credentials at attach: */
	const struct cred __rcu		*ptracer_cred;

	/* Objective and real subjective task credentials (COW): */
	const struct cred __rcu		*real_cred;

	/* Effective (overridable) subjective task credentials (COW): */
	const struct cred __rcu		*cred;

	/*
	 * executable name, excluding path.
	 *
	 * - normally initialized setup_new_exec()
	 * - access it with [gs]et_task_comm()
	 * - lock it with task_lock()
	 */
	char				comm[TASK_COMM_LEN];

	struct nameidata		*nameidata;

756
#ifdef CONFIG_SYSVIPC
757 758
	struct sysv_sem			sysvsem;
	struct sysv_shm			sysvshm;
759
#endif
760
#ifdef CONFIG_DETECT_HUNG_TASK
761
	unsigned long			last_switch_count;
762
#endif
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	/* Filesystem information: */
	struct fs_struct		*fs;

	/* Open file information: */
	struct files_struct		*files;

	/* Namespaces: */
	struct nsproxy			*nsproxy;

	/* Signal handlers: */
	struct signal_struct		*signal;
	struct sighand_struct		*sighand;
	sigset_t			blocked;
	sigset_t			real_blocked;
	/* Restored if set_restore_sigmask() was used: */
	sigset_t			saved_sigmask;
	struct sigpending		pending;
	unsigned long			sas_ss_sp;
	size_t				sas_ss_size;
	unsigned int			sas_ss_flags;

	struct callback_head		*task_works;

	struct audit_context		*audit_context;
A
Al Viro 已提交
787
#ifdef CONFIG_AUDITSYSCALL
788 789
	kuid_t				loginuid;
	unsigned int			sessionid;
A
Al Viro 已提交
790
#endif
791 792 793 794 795
	struct seccomp			seccomp;

	/* Thread group tracking: */
	u32				parent_exec_id;
	u32				self_exec_id;
L
Linus Torvalds 已提交
796

797 798
	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
	spinlock_t			alloc_lock;
L
Linus Torvalds 已提交
799

800
	/* Protection of the PI data structures: */
801
	raw_spinlock_t			pi_lock;
802

803
	struct wake_q_node		wake_q;
804

I
Ingo Molnar 已提交
805
#ifdef CONFIG_RT_MUTEXES
806 807 808
	/* PI waiters blocked on a rt_mutex held by this task: */
	struct rb_root			pi_waiters;
	struct rb_node			*pi_waiters_leftmost;
809 810
	/* Updated under owner's pi_lock and rq lock */
	struct task_struct		*pi_top_task;
811 812
	/* Deadlock detection and priority inheritance handling: */
	struct rt_mutex_waiter		*pi_blocked_on;
I
Ingo Molnar 已提交
813 814
#endif

815
#ifdef CONFIG_DEBUG_MUTEXES
816 817
	/* Mutex deadlock detection: */
	struct mutex_waiter		*blocked_on;
818
#endif
819

820
#ifdef CONFIG_TRACE_IRQFLAGS
821 822 823 824 825 826 827 828 829 830 831 832 833
	unsigned int			irq_events;
	unsigned long			hardirq_enable_ip;
	unsigned long			hardirq_disable_ip;
	unsigned int			hardirq_enable_event;
	unsigned int			hardirq_disable_event;
	int				hardirqs_enabled;
	int				hardirq_context;
	unsigned long			softirq_disable_ip;
	unsigned long			softirq_enable_ip;
	unsigned int			softirq_disable_event;
	unsigned int			softirq_enable_event;
	int				softirqs_enabled;
	int				softirq_context;
834
#endif
835

I
Ingo Molnar 已提交
836
#ifdef CONFIG_LOCKDEP
837 838 839 840 841 842
# define MAX_LOCK_DEPTH			48UL
	u64				curr_chain_key;
	int				lockdep_depth;
	unsigned int			lockdep_recursion;
	struct held_lock		held_locks[MAX_LOCK_DEPTH];
	gfp_t				lockdep_reclaim_gfp;
I
Ingo Molnar 已提交
843
#endif
844

845
#ifdef CONFIG_UBSAN
846
	unsigned int			in_ubsan;
847
#endif
848

849 850
	/* Journalling filesystem info: */
	void				*journal_info;
L
Linus Torvalds 已提交
851

852 853
	/* Stacked block device info: */
	struct bio_list			*bio_list;
854

855
#ifdef CONFIG_BLOCK
856 857
	/* Stack plugging: */
	struct blk_plug			*plug;
858 859
#endif

860 861 862 863
	/* VM state: */
	struct reclaim_state		*reclaim_state;

	struct backing_dev_info		*backing_dev_info;
L
Linus Torvalds 已提交
864

865
	struct io_context		*io_context;
L
Linus Torvalds 已提交
866

867 868 869
	/* Ptrace state: */
	unsigned long			ptrace_message;
	siginfo_t			*last_siginfo;
L
Linus Torvalds 已提交
870

871 872 873 874 875 876 877 878
	struct task_io_accounting	ioac;
#ifdef CONFIG_TASK_XACCT
	/* Accumulated RSS usage: */
	u64				acct_rss_mem1;
	/* Accumulated virtual memory usage: */
	u64				acct_vm_mem1;
	/* stime + utime since last update: */
	u64				acct_timexpd;
L
Linus Torvalds 已提交
879 880
#endif
#ifdef CONFIG_CPUSETS
881 882 883 884 885 886
	/* Protected by ->alloc_lock: */
	nodemask_t			mems_allowed;
	/* Seqence number to catch updates: */
	seqcount_t			mems_allowed_seq;
	int				cpuset_mem_spread_rotor;
	int				cpuset_slab_spread_rotor;
L
Linus Torvalds 已提交
887
#endif
888
#ifdef CONFIG_CGROUPS
889 890 891 892
	/* Control Group info protected by css_set_lock: */
	struct css_set __rcu		*cgroups;
	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
	struct list_head		cg_list;
893
#endif
F
Fenghua Yu 已提交
894
#ifdef CONFIG_INTEL_RDT_A
895
	int				closid;
F
Fenghua Yu 已提交
896
#endif
897
#ifdef CONFIG_FUTEX
898
	struct robust_list_head __user	*robust_list;
899 900 901
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
902 903
	struct list_head		pi_state_list;
	struct futex_pi_state		*pi_state_cache;
904
#endif
905
#ifdef CONFIG_PERF_EVENTS
906 907 908
	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
	struct mutex			perf_event_mutex;
	struct list_head		perf_event_list;
909
#endif
910
#ifdef CONFIG_DEBUG_PREEMPT
911
	unsigned long			preempt_disable_ip;
912
#endif
913
#ifdef CONFIG_NUMA
914 915
	/* Protected by alloc_lock: */
	struct mempolicy		*mempolicy;
916
	short				il_prev;
917
	short				pref_node_fork;
918
#endif
919
#ifdef CONFIG_NUMA_BALANCING
920 921 922 923 924 925 926 927 928 929 930 931 932
	int				numa_scan_seq;
	unsigned int			numa_scan_period;
	unsigned int			numa_scan_period_max;
	int				numa_preferred_nid;
	unsigned long			numa_migrate_retry;
	/* Migration stamp: */
	u64				node_stamp;
	u64				last_task_numa_placement;
	u64				last_sum_exec_runtime;
	struct callback_head		numa_work;

	struct list_head		numa_entry;
	struct numa_group		*numa_group;
933

934
	/*
935 936 937 938 939 940 941 942 943 944 945 946
	 * numa_faults is an array split into four regions:
	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
	 * in this precise order.
	 *
	 * faults_memory: Exponential decaying average of faults on a per-node
	 * basis. Scheduling placement decisions are made based on these
	 * counts. The values remain static for the duration of a PTE scan.
	 * faults_cpu: Track the nodes the process was running on when a NUMA
	 * hinting fault was incurred.
	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
	 * during the current scan window. When the scan completes, the counts
	 * in faults_memory and faults_cpu decay and these values are copied.
947
	 */
948 949
	unsigned long			*numa_faults;
	unsigned long			total_numa_faults;
950

951 952
	/*
	 * numa_faults_locality tracks if faults recorded during the last
953 954 955
	 * scan window were remote/local or failed to migrate. The task scan
	 * period is adapted based on the locality of the faults with different
	 * weights depending on whether they were shared or private faults
956
	 */
957
	unsigned long			numa_faults_locality[3];
958

959
	unsigned long			numa_pages_migrated;
960 961
#endif /* CONFIG_NUMA_BALANCING */

962
	struct tlbflush_unmap_batch	tlb_ubc;
963

964
	struct rcu_head			rcu;
965

966 967
	/* Cache last used pipe for splice(): */
	struct pipe_inode_info		*splice_pipe;
968

969
	struct page_frag		task_frag;
970

971 972
#ifdef CONFIG_TASK_DELAY_ACCT
	struct task_delay_info		*delays;
973
#endif
974

975
#ifdef CONFIG_FAULT_INJECTION
976
	int				make_it_fail;
977
	int fail_nth;
978
#endif
979
	/*
980 981
	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
	 * balance_dirty_pages() for a dirty throttling pause:
982
	 */
983 984 985 986
	int				nr_dirtied;
	int				nr_dirtied_pause;
	/* Start of a write-and-pause period: */
	unsigned long			dirty_paused_when;
987

A
Arjan van de Ven 已提交
988
#ifdef CONFIG_LATENCYTOP
989 990
	int				latency_record_count;
	struct latency_record		latency_record[LT_SAVECOUNT];
A
Arjan van de Ven 已提交
991
#endif
992
	/*
993
	 * Time slack values; these are used to round up poll() and
994 995
	 * select() etc timeout values. These are in nanoseconds.
	 */
996 997
	u64				timer_slack_ns;
	u64				default_timer_slack_ns;
998

999
#ifdef CONFIG_KASAN
1000
	unsigned int			kasan_depth;
1001
#endif
1002

1003
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1004 1005 1006 1007 1008 1009 1010 1011 1012
	/* Index of current stored address in ret_stack: */
	int				curr_ret_stack;

	/* Stack of return addresses for return function tracing: */
	struct ftrace_ret_stack		*ret_stack;

	/* Timestamp for last schedule: */
	unsigned long long		ftrace_timestamp;

1013 1014
	/*
	 * Number of functions that haven't been traced
1015
	 * because of depth overrun:
1016
	 */
1017 1018 1019 1020
	atomic_t			trace_overrun;

	/* Pause tracing: */
	atomic_t			tracing_graph_pause;
1021
#endif
1022

1023
#ifdef CONFIG_TRACING
1024 1025 1026 1027 1028
	/* State flags for use by tracers: */
	unsigned long			trace;

	/* Bitmask and counter of trace recursion: */
	unsigned long			trace_recursion;
1029
#endif /* CONFIG_TRACING */
1030

D
Dmitry Vyukov 已提交
1031
#ifdef CONFIG_KCOV
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	/* Coverage collection mode enabled for this task (0 if disabled): */
	enum kcov_mode			kcov_mode;

	/* Size of the kcov_area: */
	unsigned int			kcov_size;

	/* Buffer for coverage collection: */
	void				*kcov_area;

	/* KCOV descriptor wired with this task or NULL: */
	struct kcov			*kcov;
D
Dmitry Vyukov 已提交
1043
#endif
1044

1045
#ifdef CONFIG_MEMCG
1046 1047 1048
	struct mem_cgroup		*memcg_in_oom;
	gfp_t				memcg_oom_gfp_mask;
	int				memcg_oom_order;
1049

1050 1051
	/* Number of pages to reclaim on returning to userland: */
	unsigned int			memcg_nr_pages_over_high;
1052
#endif
1053

1054
#ifdef CONFIG_UPROBES
1055
	struct uprobe_task		*utask;
1056
#endif
K
Kent Overstreet 已提交
1057
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1058 1059
	unsigned int			sequential_io;
	unsigned int			sequential_io_avg;
K
Kent Overstreet 已提交
1060
#endif
P
Peter Zijlstra 已提交
1061
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1062
	unsigned long			task_state_change;
P
Peter Zijlstra 已提交
1063
#endif
1064
	int				pagefault_disabled;
1065
#ifdef CONFIG_MMU
1066
	struct task_struct		*oom_reaper_list;
1067
#endif
1068
#ifdef CONFIG_VMAP_STACK
1069
	struct vm_struct		*stack_vm_area;
1070
#endif
1071
#ifdef CONFIG_THREAD_INFO_IN_TASK
1072 1073
	/* A live task holds one reference: */
	atomic_t			stack_refcount;
1074 1075 1076
#endif
#ifdef CONFIG_LIVEPATCH
	int patch_state;
1077
#endif
1078 1079 1080
#ifdef CONFIG_SECURITY
	/* Used by LSM modules for access restriction: */
	void				*security;
1081
#endif
1082 1083 1084 1085 1086 1087 1088 1089 1090
	/* CPU-specific state of this task: */
	struct thread_struct		thread;

	/*
	 * WARNING: on x86, 'thread_struct' contains a variable-sized
	 * structure.  It *MUST* be at the end of 'task_struct'.
	 *
	 * Do not put anything below here!
	 */
L
Linus Torvalds 已提交
1091 1092
};

A
Alexey Dobriyan 已提交
1093
static inline struct pid *task_pid(struct task_struct *task)
1094 1095 1096 1097
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
1098
static inline struct pid *task_tgid(struct task_struct *task)
1099 1100 1101 1102
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

1103
/*
1104
 * Without tasklist or RCU lock it is not safe to dereference
1105 1106 1107
 * the result of task_pgrp/task_session even if task == current,
 * we can race with another thread doing sys_setsid/sys_setpgid.
 */
A
Alexey Dobriyan 已提交
1108
static inline struct pid *task_pgrp(struct task_struct *task)
1109 1110 1111 1112
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
1113
static inline struct pid *task_session(struct task_struct *task)
1114 1115 1116 1117
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

1118 1119 1120 1121 1122
/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
1123 1124
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
1125 1126 1127 1128
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
1129
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1130

A
Alexey Dobriyan 已提交
1131
static inline pid_t task_pid_nr(struct task_struct *tsk)
1132 1133 1134 1135
{
	return tsk->pid;
}

1136
static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1137 1138 1139
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
1140 1141 1142

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
1143
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1144 1145 1146
}


A
Alexey Dobriyan 已提交
1147
static inline pid_t task_tgid_nr(struct task_struct *tsk)
1148 1149 1150 1151
{
	return tsk->tgid;
}

1152
extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1153 1154 1155 1156 1157 1158

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_tgid(tsk));
}

1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
 *
 * Return: 1 if the process is alive. 0 otherwise.
 */
static inline int pid_alive(const struct task_struct *p)
{
	return p->pids[PIDTYPE_PID].pid != NULL;
}
1173

1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
	pid_t pid = 0;

	rcu_read_lock();
	if (pid_alive(tsk))
		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
	rcu_read_unlock();

	return pid;
}

static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
	return task_ppid_nr_ns(tsk, &init_pid_ns);
}

1191
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1192
{
1193
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1194 1195 1196 1197
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
1198
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1199 1200 1201
}


1202
static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1203
{
1204
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1205 1206 1207 1208
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
1209
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1210 1211
}

1212
/* Obsolete, do not use: */
1213 1214 1215 1216
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
1217

1218
/**
1219 1220
 * is_global_init - check if a task structure is init. Since init
 * is free to have sub-threads we need to check tgid.
1221 1222 1223
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
1224 1225
 *
 * Return: 1 if the task structure is init. 0 otherwise.
1226
 */
A
Alexey Dobriyan 已提交
1227
static inline int is_global_init(struct task_struct *tsk)
1228
{
1229
	return task_tgid_nr(tsk) == 1;
1230
}
1231

1232 1233
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
1234 1235 1236
/*
 * Per process flags
 */
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
#define PF_IDLE			0x00000002	/* I am an IDLE thread */
#define PF_EXITING		0x00000004	/* Getting shut down */
#define PF_EXITPIDONE		0x00000008	/* PI exit done on shut down */
#define PF_VCPU			0x00000010	/* I'm a virtual CPU */
#define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
#define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
#define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
#define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
#define PF_DUMPCORE		0x00000200	/* Dumped core */
#define PF_SIGNALED		0x00000400	/* Killed by a signal */
#define PF_MEMALLOC		0x00000800	/* Allocating memory */
#define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
#define PF_USED_ASYNC		0x00004000	/* Used async_schedule*(), used by module init */
#define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
#define PF_FROZEN		0x00010000	/* Frozen for system suspend */
1253 1254 1255
#define PF_KSWAPD		0x00020000	/* I am kswapd */
#define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
#define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1256 1257 1258 1259 1260 1261 1262 1263 1264
#define PF_LESS_THROTTLE	0x00100000	/* Throttle me less: I clean memory */
#define PF_KTHREAD		0x00200000	/* I am a kernel thread */
#define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
#define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
#define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
#define PF_MUTEX_TESTER		0x20000000	/* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
L
Linus Torvalds 已提交
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
1277 1278 1279 1280 1281
#define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math()			clear_stopped_child_used_math(current)
#define set_used_math()				set_stopped_child_used_math(current)

L
Linus Torvalds 已提交
1282 1283
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1284 1285 1286

#define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)

L
Linus Torvalds 已提交
1287 1288
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1289

L
Linus Torvalds 已提交
1290
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1291 1292
#define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
#define used_math()				tsk_used_math(current)
L
Linus Torvalds 已提交
1293

1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
static inline bool is_percpu_thread(void)
{
#ifdef CONFIG_SMP
	return (current->flags & PF_NO_SETAFFINITY) &&
		(current->nr_cpus_allowed  == 1);
#else
	return true;
#endif
}

1304
/* Per-process atomic flags. */
1305 1306 1307
#define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
#define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1308

1309

1310 1311 1312
#define TASK_PFA_TEST(name, func)					\
	static inline bool task_##func(struct task_struct *p)		\
	{ return test_bit(PFA_##name, &p->atomic_flags); }
1313

1314 1315 1316
#define TASK_PFA_SET(name, func)					\
	static inline void task_set_##func(struct task_struct *p)	\
	{ set_bit(PFA_##name, &p->atomic_flags); }
1317

1318 1319 1320 1321 1322 1323
#define TASK_PFA_CLEAR(name, func)					\
	static inline void task_clear_##func(struct task_struct *p)	\
	{ clear_bit(PFA_##name, &p->atomic_flags); }

TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1324

1325 1326 1327 1328 1329 1330 1331
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)

TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1332

1333
static inline void
1334
current_restore_flags(unsigned long orig_flags, unsigned long flags)
1335
{
1336 1337
	current->flags &= ~flags;
	current->flags |= orig_flags & flags;
1338 1339
}

1340 1341
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
L
Linus Torvalds 已提交
1342
#ifdef CONFIG_SMP
1343 1344
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
L
Linus Torvalds 已提交
1345
#else
1346
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1347 1348
{
}
1349
static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
L
Linus Torvalds 已提交
1350
{
1351
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
1352 1353 1354 1355
		return -EINVAL;
	return 0;
}
#endif
1356

1357 1358 1359 1360
#ifndef cpu_relax_yield
#define cpu_relax_yield() cpu_relax()
#endif

1361
extern int yield_to(struct task_struct *p, bool preempt);
1362 1363
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
1364

1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
/**
 * task_nice - return the nice value of a given task.
 * @p: the task in question.
 *
 * Return: The nice value [ -20 ... 0 ... 19 ].
 */
static inline int task_nice(const struct task_struct *p)
{
	return PRIO_TO_NICE((p)->static_prio);
}
1375

1376 1377
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
1378
extern int idle_cpu(int cpu);
1379 1380 1381
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1382
extern struct task_struct *idle_task(int cpu);
1383

1384 1385
/**
 * is_idle_task - is the specified task an idle task?
1386
 * @p: the task in question.
1387 1388
 *
 * Return: 1 if @p is an idle task. 0 otherwise.
1389
 */
1390
static inline bool is_idle_task(const struct task_struct *p)
1391
{
1392
	return !!(p->flags & PF_IDLE);
1393
}
1394

1395
extern struct task_struct *curr_task(int cpu);
1396
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
1397 1398 1399 1400

void yield(void);

union thread_union {
1401
#ifndef CONFIG_THREAD_INFO_IN_TASK
L
Linus Torvalds 已提交
1402
	struct thread_info thread_info;
1403
#endif
L
Linus Torvalds 已提交
1404 1405 1406
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

1407 1408 1409 1410 1411 1412 1413 1414 1415
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
	return &task->thread_info;
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
# define task_thread_info(task)	((struct thread_info *)(task)->stack)
#endif

1416 1417 1418 1419 1420
/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
1421 1422
 * find_task_by_vpid():
 *      finds a task by its virtual pid
1423
 *
1424
 * see also find_vpid() etc in include/linux/pid.h
1425 1426
 */

1427
extern struct task_struct *find_task_by_vpid(pid_t nr);
1428
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1429

1430 1431
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
1432
extern void wake_up_new_task(struct task_struct *tsk);
1433

L
Linus Torvalds 已提交
1434
#ifdef CONFIG_SMP
1435
extern void kick_process(struct task_struct *tsk);
L
Linus Torvalds 已提交
1436
#else
1437
static inline void kick_process(struct task_struct *tsk) { }
L
Linus Torvalds 已提交
1438 1439
#endif

1440
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1441

1442 1443 1444 1445
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
	__set_task_comm(tsk, from, false);
}
1446

1447
extern char *get_task_comm(char *to, struct task_struct *tsk);
L
Linus Torvalds 已提交
1448 1449

#ifdef CONFIG_SMP
1450
void scheduler_ipi(void);
R
Roland McGrath 已提交
1451
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
1452
#else
1453
static inline void scheduler_ipi(void) { }
1454
static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
R
Roland McGrath 已提交
1455 1456 1457
{
	return 1;
}
L
Linus Torvalds 已提交
1458 1459
#endif

1460 1461 1462
/*
 * Set thread flags in other task's structures.
 * See asm/thread_info.h for TIF_xxxx flags available:
L
Linus Torvalds 已提交
1463 1464 1465
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1466
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1467 1468 1469 1470
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1471
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1472 1473 1474 1475
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1476
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1477 1478 1479 1480
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1481
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1482 1483 1484 1485
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
1486
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

1499 1500 1501 1502 1503
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

L
Linus Torvalds 已提交
1504 1505 1506 1507 1508 1509 1510
/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
1511
#ifndef CONFIG_PREEMPT
1512
extern int _cond_resched(void);
1513 1514 1515
#else
static inline int _cond_resched(void) { return 0; }
#endif
1516

1517
#define cond_resched() ({			\
1518
	___might_sleep(__FILE__, __LINE__, 0);	\
1519 1520
	_cond_resched();			\
})
1521

1522 1523 1524
extern int __cond_resched_lock(spinlock_t *lock);

#define cond_resched_lock(lock) ({				\
1525
	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1526 1527 1528 1529 1530
	__cond_resched_lock(lock);				\
})

extern int __cond_resched_softirq(void);

1531
#define cond_resched_softirq() ({					\
1532
	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
1533
	__cond_resched_softirq();					\
1534
})
L
Linus Torvalds 已提交
1535

1536 1537 1538 1539 1540 1541 1542 1543 1544
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
	rcu_read_unlock();
	cond_resched();
	rcu_read_lock();
#endif
}

L
Linus Torvalds 已提交
1545 1546
/*
 * Does a critical section need to be broken due to another
N
Nick Piggin 已提交
1547 1548
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * but a general need for low latency)
L
Linus Torvalds 已提交
1549
 */
N
Nick Piggin 已提交
1550
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
1551
{
N
Nick Piggin 已提交
1552 1553 1554
#ifdef CONFIG_PREEMPT
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
1555
	return 0;
N
Nick Piggin 已提交
1556
#endif
L
Linus Torvalds 已提交
1557 1558
}

1559 1560 1561 1562 1563
static __always_inline bool need_resched(void)
{
	return unlikely(tif_need_resched());
}

L
Linus Torvalds 已提交
1564 1565 1566 1567 1568 1569 1570
/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
1571 1572 1573
#ifdef CONFIG_THREAD_INFO_IN_TASK
	return p->cpu;
#else
A
Al Viro 已提交
1574
	return task_thread_info(p)->cpu;
1575
#endif
L
Linus Torvalds 已提交
1576 1577
}

I
Ingo Molnar 已提交
1578
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
/*
 * In order to reduce various lock holder preemption latencies provide an
 * interface to see if a vCPU is currently running or not.
 *
 * This allows us to terminate optimistic spin loops and block, analogous to
 * the native optimistic spin heuristic of testing if the lock owner task is
 * running or not.
 */
#ifndef vcpu_is_preempted
# define vcpu_is_preempted(cpu)	false
#endif

1605 1606
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1607

D
Dave Hansen 已提交
1608 1609 1610 1611
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

L
Linus Torvalds 已提交
1612
#endif