sched.h 103.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4
#include <uapi/linux/sched.h>
5

6 7
#include <linux/sched/prio.h>

8 9 10 11 12

struct sched_param {
	int sched_priority;
};

L
Linus Torvalds 已提交
13 14 15 16 17 18 19 20
#include <asm/param.h>	/* for HZ */

#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
21
#include <linux/plist.h>
L
Linus Torvalds 已提交
22 23 24 25 26
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
27
#include <linux/mm_types.h>
28
#include <linux/preempt.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34

#include <asm/page.h>
#include <asm/ptrace.h>

#include <linux/smp.h>
#include <linux/sem.h>
35
#include <linux/shm.h>
L
Linus Torvalds 已提交
36 37 38 39 40 41 42
#include <linux/signal.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
#include <linux/seccomp.h>
I
Ingo Molnar 已提交
43
#include <linux/rcupdate.h>
44
#include <linux/rculist.h>
I
Ingo Molnar 已提交
45
#include <linux/rtmutex.h>
L
Linus Torvalds 已提交
46

47 48 49 50 51
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
D
Dmitry Vyukov 已提交
52
#include <linux/kcov.h>
53
#include <linux/task_io_accounting.h>
A
Arjan van de Ven 已提交
54
#include <linux/latencytop.h>
55
#include <linux/cred.h>
P
Peter Zijlstra 已提交
56
#include <linux/llist.h>
57
#include <linux/uidgid.h>
58
#include <linux/gfp.h>
59
#include <linux/topology.h>
60
#include <linux/magic.h>
61
#include <linux/cgroup-defs.h>
62 63

#include <asm/processor.h>
H
H. J. Lu 已提交
64

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
#define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */

/*
 * Extended scheduling parameters data structure.
 *
 * This is needed because the original struct sched_param can not be
 * altered without introducing ABI issues with legacy applications
 * (e.g., in sched_getparam()).
 *
 * However, the possibility of specifying more than just a priority for
 * the tasks may be useful for a wide variety of application fields, e.g.,
 * multimedia, streaming, automation and control, and many others.
 *
 * This variant (sched_attr) is meant at describing a so-called
 * sporadic time-constrained task. In such model a task is specified by:
 *  - the activation period or minimum instance inter-arrival time;
 *  - the maximum (or average, depending on the actual scheduling
 *    discipline) computation time of all instances, a.k.a. runtime;
 *  - the deadline (relative to the actual activation time) of each
 *    instance.
 * Very briefly, a periodic (sporadic) task asks for the execution of
 * some specific computation --which is typically called an instance--
 * (at most) every period. Moreover, each instance typically lasts no more
 * than the runtime and must be completed by time instant t equal to
 * the instance activation time + the deadline.
 *
 * This is reflected by the actual fields of the sched_attr structure:
 *
 *  @size		size of the structure, for fwd/bwd compat.
 *
 *  @sched_policy	task's scheduling policy
 *  @sched_flags	for customizing the scheduler behaviour
 *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
 *  @sched_priority	task's static priority (SCHED_FIFO/RR)
 *  @sched_deadline	representative of the task's deadline
 *  @sched_runtime	representative of the task's runtime
 *  @sched_period	representative of the task's period
 *
 * Given this task model, there are a multiplicity of scheduling algorithms
 * and policies, that can be used to ensure all the tasks will make their
 * timing constraints.
106 107 108 109
 *
 * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
 * only user of this new interface. More information about the algorithm
 * available in the scheduling class file or in Documentation/.
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
 */
struct sched_attr {
	u32 size;

	u32 sched_policy;
	u64 sched_flags;

	/* SCHED_NORMAL, SCHED_BATCH */
	s32 sched_nice;

	/* SCHED_FIFO, SCHED_RR */
	u32 sched_priority;

	/* SCHED_DEADLINE */
	u64 sched_runtime;
	u64 sched_deadline;
	u64 sched_period;
};

129
struct futex_pi_state;
130
struct robust_list_head;
131
struct bio_list;
132
struct fs_struct;
133
struct perf_event_context;
134
struct blk_plug;
135
struct filename;
136
struct nameidata;
L
Linus Torvalds 已提交
137 138 139 140 141 142 143 144 145 146 147 148

/*
 * These are the constant used to fake the fixed-point load-average
 * counting. Some notes:
 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 *    a load-average precision of 10 bits integer + 11 bits fractional
 *  - if you want to count load-averages more often, you need more
 *    precision, or rounding will get you. With 2-second counting freq,
 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 *    11 bit fractions.
 */
extern unsigned long avenrun[];		/* Load averages */
149
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
L
Linus Torvalds 已提交
150 151 152

#define FSHIFT		11		/* nr of bits of precision */
#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
153
#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
L
Linus Torvalds 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167
#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
#define EXP_5		2014		/* 1/exp(5sec/5min) */
#define EXP_15		2037		/* 1/exp(5sec/15min) */

#define CALC_LOAD(load,exp,n) \
	load *= exp; \
	load += n*(FIXED_1-exp); \
	load >>= FSHIFT;

extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
168
extern bool single_task_running(void);
L
Linus Torvalds 已提交
169
extern unsigned long nr_iowait(void);
170
extern unsigned long nr_iowait_cpu(int cpu);
171
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
172

173
extern void calc_global_load(unsigned long ticks);
174 175

#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
176 177
extern void cpu_load_update_nohz_start(void);
extern void cpu_load_update_nohz_stop(void);
178
#else
179 180
static inline void cpu_load_update_nohz_start(void) { }
static inline void cpu_load_update_nohz_stop(void) { }
181
#endif
L
Linus Torvalds 已提交
182

183 184
extern void dump_cpu_task(int cpu);

I
Ingo Molnar 已提交
185 186
struct seq_file;
struct cfs_rq;
187
struct task_group;
I
Ingo Molnar 已提交
188 189 190 191
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
#endif
L
Linus Torvalds 已提交
192

193 194 195 196 197 198 199 200 201 202
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
L
Linus Torvalds 已提交
203 204 205
#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
M
Matthew Wilcox 已提交
206 207
#define __TASK_STOPPED		4
#define __TASK_TRACED		8
208
/* in tsk->exit_state */
209 210
#define EXIT_DEAD		16
#define EXIT_ZOMBIE		32
211
#define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
212
/* in tsk->state again */
213
#define TASK_DEAD		64
M
Matthew Wilcox 已提交
214
#define TASK_WAKEKILL		128
P
Peter Zijlstra 已提交
215
#define TASK_WAKING		256
216
#define TASK_PARKED		512
217
#define TASK_NOLOAD		1024
218 219
#define TASK_NEW		2048
#define TASK_STATE_MAX		4096
M
Matthew Wilcox 已提交
220

221
#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
222

223
/* Convenience macros for the sake of set_current_state */
M
Matthew Wilcox 已提交
224 225 226
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
L
Linus Torvalds 已提交
227

228 229
#define TASK_IDLE		(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)

230 231
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
M
Matthew Wilcox 已提交
232
#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
233 234 235

/* get_task_state() */
#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
M
Matthew Wilcox 已提交
236
				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
237
				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
238

M
Matthew Wilcox 已提交
239 240
#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
241
#define task_is_stopped_or_traced(task)	\
M
Matthew Wilcox 已提交
242
			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
243
#define task_contributes_to_load(task)	\
244
				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
245 246
				 (task->flags & PF_FROZEN) == 0 && \
				 (task->state & TASK_NOLOAD) == 0)
L
Linus Torvalds 已提交
247

P
Peter Zijlstra 已提交
248 249 250 251 252 253 254 255 256 257
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP

#define __set_current_state(state_value)			\
	do {							\
		current->task_state_change = _THIS_IP_;		\
		current->state = (state_value);			\
	} while (0)
#define set_current_state(state_value)				\
	do {							\
		current->task_state_change = _THIS_IP_;		\
258
		smp_store_mb(current->state, (state_value));	\
P
Peter Zijlstra 已提交
259 260 261
	} while (0)

#else
262 263 264 265 266
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
267
 *   for (;;) {
268
 *	set_current_state(TASK_UNINTERRUPTIBLE);
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
 *	if (!need_sleep)
 *		break;
 *
 *	schedule();
 *   }
 *   __set_current_state(TASK_RUNNING);
 *
 * If the caller does not need such serialisation (because, for instance, the
 * condition test and condition change and wakeup are under the same lock) then
 * use __set_current_state().
 *
 * The above is typically ordered against the wakeup, which does:
 *
 *	need_sleep = false;
 *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
 *
 * Where wake_up_state() (and all other wakeup primitives) imply enough
 * barriers to order the store of the variable against wakeup.
 *
 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
291
 *
292
 * This is obviously fine, since they both store the exact same value.
293
 *
294
 * Also see the comments of try_to_wake_up().
295
 */
P
Peter Zijlstra 已提交
296
#define __set_current_state(state_value)		\
L
Linus Torvalds 已提交
297
	do { current->state = (state_value); } while (0)
P
Peter Zijlstra 已提交
298
#define set_current_state(state_value)			\
299
	smp_store_mb(current->state, (state_value))
L
Linus Torvalds 已提交
300

P
Peter Zijlstra 已提交
301 302
#endif

L
Linus Torvalds 已提交
303 304 305 306 307 308 309 310 311 312 313 314 315 316
/* Task command name length */
#define TASK_COMM_LEN 16

#include <linux/spinlock.h>

/*
 * This serializes "schedule()" and also protects
 * the run-queue from deletions/modifications (but
 * _adding_ to the beginning of the run-queue has
 * a separate lock).
 */
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;

317
struct task_struct;
L
Linus Torvalds 已提交
318

319 320 321 322
#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
#endif /* #ifdef CONFIG_PROVE_RCU */

L
Linus Torvalds 已提交
323 324
extern void sched_init(void);
extern void sched_init_smp(void);
325
extern asmlinkage void schedule_tail(struct task_struct *prev);
326
extern void init_idle(struct task_struct *idle, int cpu);
I
Ingo Molnar 已提交
327
extern void init_idle_bootup_task(struct task_struct *idle);
L
Linus Torvalds 已提交
328

329 330
extern cpumask_var_t cpu_isolated_map;

331
extern int runqueue_is_locked(int cpu);
I
Ingo Molnar 已提交
332

333
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
334
extern void nohz_balance_enter_idle(int cpu);
335
extern void set_cpu_sd_state_idle(void);
336
extern int get_nohz_timer_target(void);
337
#else
338
static inline void nohz_balance_enter_idle(int cpu) { }
339
static inline void set_cpu_sd_state_idle(void) { }
340
#endif
L
Linus Torvalds 已提交
341

I
Ingo Molnar 已提交
342
/*
I
Ingo Molnar 已提交
343
 * Only dump TASK_* tasks. (0 for all tasks)
I
Ingo Molnar 已提交
344 345 346 347 348
 */
extern void show_state_filter(unsigned long state_filter);

static inline void show_state(void)
{
I
Ingo Molnar 已提交
349
	show_state_filter(0);
I
Ingo Molnar 已提交
350 351
}

L
Linus Torvalds 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364
extern void show_regs(struct pt_regs *);

/*
 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 * task), SP is the stack pointer of the first frame that should be shown in the back
 * trace (or NULL if the entire call-chain of the task should be shown).
 */
extern void show_stack(struct task_struct *task, unsigned long *sp);

extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);
365
extern int sched_cpu_starting(unsigned int cpu);
366 367
extern int sched_cpu_activate(unsigned int cpu);
extern int sched_cpu_deactivate(unsigned int cpu);
L
Linus Torvalds 已提交
368

369 370 371 372 373
#ifdef CONFIG_HOTPLUG_CPU
extern int sched_cpu_dying(unsigned int cpu);
#else
# define sched_cpu_dying	NULL
#endif
L
Linus Torvalds 已提交
374

375 376
extern void sched_show_task(struct task_struct *p);

377
#ifdef CONFIG_LOCKUP_DETECTOR
378
extern void touch_softlockup_watchdog_sched(void);
I
Ingo Molnar 已提交
379
extern void touch_softlockup_watchdog(void);
380
extern void touch_softlockup_watchdog_sync(void);
381
extern void touch_all_softlockup_watchdogs(void);
382 383 384
extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
				  void __user *buffer,
				  size_t *lenp, loff_t *ppos);
I
Ingo Molnar 已提交
385
extern unsigned int  softlockup_panic;
386
extern unsigned int  hardlockup_panic;
387
void lockup_detector_init(void);
I
Ingo Molnar 已提交
388
#else
389 390 391
static inline void touch_softlockup_watchdog_sched(void)
{
}
I
Ingo Molnar 已提交
392 393 394
static inline void touch_softlockup_watchdog(void)
{
}
395 396 397
static inline void touch_softlockup_watchdog_sync(void)
{
}
398 399 400
static inline void touch_all_softlockup_watchdogs(void)
{
}
401 402 403
static inline void lockup_detector_init(void)
{
}
I
Ingo Molnar 已提交
404 405
#endif

406 407 408 409 410 411 412 413
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
#else
static inline void reset_hung_task_detector(void)
{
}
#endif

L
Linus Torvalds 已提交
414 415
/* Attach to any functions which should be ignored in wchan output. */
#define __sched		__attribute__((__section__(".sched.text")))
416 417 418 419

/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];

L
Linus Torvalds 已提交
420 421 422 423
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);

#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
424
extern signed long schedule_timeout(signed long timeout);
425
extern signed long schedule_timeout_interruptible(signed long timeout);
M
Matthew Wilcox 已提交
426
extern signed long schedule_timeout_killable(signed long timeout);
427
extern signed long schedule_timeout_uninterruptible(signed long timeout);
428
extern signed long schedule_timeout_idle(signed long timeout);
L
Linus Torvalds 已提交
429
asmlinkage void schedule(void);
430
extern void schedule_preempt_disabled(void);
L
Linus Torvalds 已提交
431

432 433
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
434
extern long io_schedule_timeout(long timeout);
435
extern void io_schedule(void);
436

437 438
void __noreturn do_task_dead(void);

S
Serge E. Hallyn 已提交
439
struct nsproxy;
440
struct user_namespace;
L
Linus Torvalds 已提交
441

442 443
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
L
Linus Torvalds 已提交
444 445 446 447 448 449 450
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
		       unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
			  unsigned long len, unsigned long pgoff,
			  unsigned long flags);
451 452 453
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
L
Linus Torvalds 已提交
454

455 456 457 458
#define SUID_DUMP_DISABLE	0	/* No setuid dumping */
#define SUID_DUMP_USER		1	/* Dump as user of process */
#define SUID_DUMP_ROOT		2	/* Dump as root */

459
/* mm flags */
H
Hugh Dickins 已提交
460

461
/* for SUID_DUMP_* above */
462
#define MMF_DUMPABLE_BITS 2
H
Hugh Dickins 已提交
463
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
464

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
extern void set_dumpable(struct mm_struct *mm, int value);
/*
 * This returns the actual value of the suid_dumpable flag. For things
 * that are using this for checking for privilege transitions, it must
 * test against SUID_DUMP_USER rather than treating it as a boolean
 * value.
 */
static inline int __get_dumpable(unsigned long mm_flags)
{
	return mm_flags & MMF_DUMPABLE_MASK;
}

static inline int get_dumpable(struct mm_struct *mm)
{
	return __get_dumpable(mm->flags);
}

482 483 484 485 486
/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE	2
#define MMF_DUMP_ANON_SHARED	3
#define MMF_DUMP_MAPPED_PRIVATE	4
#define MMF_DUMP_MAPPED_SHARED	5
R
Roland McGrath 已提交
487
#define MMF_DUMP_ELF_HEADERS	6
488 489
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED  8
490 491
#define MMF_DUMP_DAX_PRIVATE	9
#define MMF_DUMP_DAX_SHARED	10
H
Hugh Dickins 已提交
492

493
#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
494
#define MMF_DUMP_FILTER_BITS	9
495 496 497
#define MMF_DUMP_FILTER_MASK \
	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
498
	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
499 500 501 502 503 504 505
	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)

#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF	0
#endif
H
Hugh Dickins 已提交
506 507
					/* leave room for more dump flags */
#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
A
Andrea Arcangeli 已提交
508
#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
509 510 511 512 513
/*
 * This one-shot flag is dropped due to necessity of changing exe once again
 * on NFS restore
 */
//#define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
H
Hugh Dickins 已提交
514

515 516
#define MMF_HAS_UPROBES		19	/* has uprobes */
#define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
517
#define MMF_OOM_SKIP		21	/* mm is of no interest for the OOM killer */
518
#define MMF_UNSTABLE		22	/* mm is unstable for copy_from_user */
519
#define MMF_HUGE_ZERO_PAGE	23      /* mm has ever used the global huge zero page */
520

H
Hugh Dickins 已提交
521
#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
522

L
Linus Torvalds 已提交
523 524 525 526
struct sighand_struct {
	atomic_t		count;
	struct k_sigaction	action[_NSIG];
	spinlock_t		siglock;
D
Davide Libenzi 已提交
527
	wait_queue_head_t	signalfd_wqh;
L
Linus Torvalds 已提交
528 529
};

530
struct pacct_struct {
531 532
	int			ac_flag;
	long			ac_exitcode;
533
	unsigned long		ac_mem;
534
	u64			ac_utime, ac_stime;
535
	unsigned long		ac_minflt, ac_majflt;
536 537
};

538
struct cpu_itimer {
539 540
	u64 expires;
	u64 incr;
541 542
};

543
/**
544
 * struct prev_cputime - snaphsot of system and user cputime
545 546
 * @utime: time spent in user mode
 * @stime: time spent in system mode
547
 * @lock: protects the above two fields
548
 *
549 550
 * Stores previous user/system time values such that we can guarantee
 * monotonicity.
551
 */
552 553
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
554 555
	u64 utime;
	u64 stime;
556 557
	raw_spinlock_t lock;
#endif
558 559
};

560 561 562 563 564 565 566 567
static inline void prev_cputime_init(struct prev_cputime *prev)
{
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
	prev->utime = prev->stime = 0;
	raw_spin_lock_init(&prev->lock);
#endif
}

568 569
/**
 * struct task_cputime - collected CPU time counts
570 571
 * @utime:		time spent in user mode, in nanoseconds
 * @stime:		time spent in kernel mode, in nanoseconds
572
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
573
 *
574 575 576
 * This structure groups together three kinds of CPU time that are tracked for
 * threads and thread groups.  Most things considering CPU time want to group
 * these counts together and treat all three of them in parallel.
577 578
 */
struct task_cputime {
579 580
	u64 utime;
	u64 stime;
581 582
	unsigned long long sum_exec_runtime;
};
583

584 585
/* Alternate field names when used to cache expirations. */
#define virt_exp	utime
586
#define prof_exp	stime
587 588
#define sched_exp	sum_exec_runtime

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
/*
 * This is the atomic variant of task_cputime, which can be used for
 * storing and updating task_cputime statistics without locking.
 */
struct task_cputime_atomic {
	atomic64_t utime;
	atomic64_t stime;
	atomic64_t sum_exec_runtime;
};

#define INIT_CPUTIME_ATOMIC \
	(struct task_cputime_atomic) {				\
		.utime = ATOMIC64_INIT(0),			\
		.stime = ATOMIC64_INIT(0),			\
		.sum_exec_runtime = ATOMIC64_INIT(0),		\
	}

606
#define PREEMPT_DISABLED	(PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
607

P
Peter Zijlstra 已提交
608
/*
609 610
 * Disable preemption until the scheduler is running -- use an unconditional
 * value so that it also works on !PREEMPT_COUNT kernels.
P
Peter Zijlstra 已提交
611
 *
612
 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
P
Peter Zijlstra 已提交
613
 */
614
#define INIT_PREEMPT_COUNT	PREEMPT_OFFSET
615

P
Peter Zijlstra 已提交
616
/*
617 618
 * Initial preempt_count value; reflects the preempt_count schedule invariant
 * which states that during context switches:
P
Peter Zijlstra 已提交
619
 *
620 621 622 623
 *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
 *
 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
 * Note: See finish_task_switch().
P
Peter Zijlstra 已提交
624
 */
625
#define FORK_PREEMPT_COUNT	(2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
P
Peter Zijlstra 已提交
626

627
/**
628
 * struct thread_group_cputimer - thread group interval timer counts
629
 * @cputime_atomic:	atomic thread group interval timers.
630 631
 * @running:		true when there are timers running and
 *			@cputime_atomic receives updates.
632 633
 * @checking_timer:	true when a thread in the group is in the
 *			process of checking for thread group timers.
634 635
 *
 * This structure contains the version of task_cputime, above, that is
636
 * used for thread group CPU timer calculations.
637
 */
638
struct thread_group_cputimer {
639
	struct task_cputime_atomic cputime_atomic;
640
	bool running;
641
	bool checking_timer;
642 643
};

644
#include <linux/rwsem.h>
645 646
struct autogroup;

L
Linus Torvalds 已提交
647
/*
648
 * NOTE! "signal_struct" does not have its own
L
Linus Torvalds 已提交
649 650 651 652 653 654
 * locking, because a shared signal_struct always
 * implies a shared sighand_struct, so locking
 * sighand_struct is always a proper superset of
 * the locking of signal_struct.
 */
struct signal_struct {
655
	atomic_t		sigcnt;
L
Linus Torvalds 已提交
656
	atomic_t		live;
657
	int			nr_threads;
658
	struct list_head	thread_head;
L
Linus Torvalds 已提交
659 660 661 662

	wait_queue_head_t	wait_chldexit;	/* for wait4() */

	/* current thread group signal load-balancing target: */
663
	struct task_struct	*curr_target;
L
Linus Torvalds 已提交
664 665 666 667 668 669 670 671 672 673 674 675

	/* shared signal handling: */
	struct sigpending	shared_pending;

	/* thread group exit support */
	int			group_exit_code;
	/* overloaded:
	 * - notify group_exit_task when ->count is equal to notify_count
	 * - everyone except group_exit_task is stopped during signal delivery
	 *   of fatal signals, group_exit_task processes the signal.
	 */
	int			notify_count;
676
	struct task_struct	*group_exit_task;
L
Linus Torvalds 已提交
677 678 679 680 681

	/* thread group stop support, overloads group_exit_code too */
	int			group_stop_count;
	unsigned int		flags; /* see SIGNAL_* flags below */

682 683 684 685 686 687 688 689 690 691 692 693
	/*
	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
	 * manager, to re-parent orphan (double-forking) child processes
	 * to this process instead of 'init'. The service manager is
	 * able to receive SIGCHLD signals and is able to investigate
	 * the process until it calls wait(). All children of this
	 * process will inherit a flag if they should look for a
	 * child_subreaper process at exit.
	 */
	unsigned int		is_child_subreaper:1;
	unsigned int		has_child_subreaper:1;

694 695
#ifdef CONFIG_POSIX_TIMERS

L
Linus Torvalds 已提交
696
	/* POSIX.1b Interval Timers */
697 698
	int			posix_timer_id;
	struct list_head	posix_timers;
L
Linus Torvalds 已提交
699 700

	/* ITIMER_REAL timer for the process */
701 702
	struct hrtimer real_timer;
	ktime_t it_real_incr;
L
Linus Torvalds 已提交
703

704 705 706 707 708 709
	/*
	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
	 * values are defined to 0 and 1 respectively
	 */
	struct cpu_itimer it[2];
L
Linus Torvalds 已提交
710

711
	/*
712 713
	 * Thread group totals for process CPU timers.
	 * See thread_group_cputimer(), et al, for details.
714
	 */
715
	struct thread_group_cputimer cputimer;
716 717 718 719

	/* Earliest-expiration cache. */
	struct task_cputime cputime_expires;

720 721 722 723 724 725
	struct list_head cpu_timers[3];

#endif

	struct pid *leader_pid;

726
#ifdef CONFIG_NO_HZ_FULL
727
	atomic_t tick_dep_mask;
728 729
#endif

730
	struct pid *tty_old_pgrp;
731

L
Linus Torvalds 已提交
732 733 734 735 736
	/* boolean value for session group leader */
	int leader;

	struct tty_struct *tty; /* NULL if no tty */

737 738 739
#ifdef CONFIG_SCHED_AUTOGROUP
	struct autogroup *autogroup;
#endif
L
Linus Torvalds 已提交
740 741 742 743 744 745
	/*
	 * Cumulative resource counters for dead threads in the group,
	 * and for reaped dead child processes forked by this group.
	 * Live threads maintain their own counters and add to these
	 * in __exit_signal, except for the group leader.
	 */
746
	seqlock_t stats_lock;
747
	u64 utime, stime, cutime, cstime;
748 749
	u64 gtime;
	u64 cgtime;
750
	struct prev_cputime prev_cputime;
L
Linus Torvalds 已提交
751 752
	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
753
	unsigned long inblock, oublock, cinblock, coublock;
J
Jiri Pirko 已提交
754
	unsigned long maxrss, cmaxrss;
755
	struct task_io_accounting ioac;
L
Linus Torvalds 已提交
756

757 758 759 760 761 762 763 764
	/*
	 * Cumulative ns of schedule CPU time fo dead threads in the
	 * group, not including a zombie group leader, (This only differs
	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
	 * other than jiffies.)
	 */
	unsigned long long sum_sched_runtime;

L
Linus Torvalds 已提交
765 766 767 768 769 770 771 772 773 774 775
	/*
	 * We don't bother to synchronize most readers of this at all,
	 * because there is no reader checking a limit that actually needs
	 * to get both rlim_cur and rlim_max atomically, and either one
	 * alone is a single word that can safely be read normally.
	 * getrlimit/setrlimit use task_lock(current->group_leader) to
	 * protect this instead of the siglock, because they really
	 * have no need to disable irqs.
	 */
	struct rlimit rlim[RLIM_NLIMITS];

776 777 778
#ifdef CONFIG_BSD_PROCESS_ACCT
	struct pacct_struct pacct;	/* per-process accounting information */
#endif
779 780 781
#ifdef CONFIG_TASKSTATS
	struct taskstats *stats;
#endif
M
Miloslav Trmac 已提交
782 783 784 785
#ifdef CONFIG_AUDIT
	unsigned audit_tty;
	struct tty_audit_buf *tty_audit_buf;
#endif
786

T
Tetsuo Handa 已提交
787 788 789 790 791
	/*
	 * Thread is the potential origin of an oom condition; kill first on
	 * oom
	 */
	bool oom_flag_origin;
792 793 794
	short oom_score_adj;		/* OOM kill score adjustment */
	short oom_score_adj_min;	/* OOM kill score adjustment min value.
					 * Only settable by CAP_SYS_RESOURCE. */
795 796
	struct mm_struct *oom_mm;	/* recorded mm when the thread group got
					 * killed by the oom killer */
797 798 799 800

	struct mutex cred_guard_mutex;	/* guard against foreign influences on
					 * credential calculations
					 * (notably. ptrace) */
L
Linus Torvalds 已提交
801 802 803 804 805 806
};

/*
 * Bits in flags field of signal_struct.
 */
#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
807 808
#define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
809
#define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
810 811 812 813 814 815
/*
 * Pending notifications to parent.
 */
#define SIGNAL_CLD_STOPPED	0x00000010
#define SIGNAL_CLD_CONTINUED	0x00000020
#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
L
Linus Torvalds 已提交
816

817 818
#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */

819 820 821 822 823 824 825 826 827 828
#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
			  SIGNAL_STOP_CONTINUED)

static inline void signal_set_stop_flags(struct signal_struct *sig,
					 unsigned int flags)
{
	WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
	sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
}

829 830 831 832 833 834 835
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
		(sig->group_exit_task != NULL);
}

L
Linus Torvalds 已提交
836 837 838 839 840 841 842
/*
 * Some day this will be a full-fledged user tracking system..
 */
struct user_struct {
	atomic_t __count;	/* reference count */
	atomic_t processes;	/* How many processes does this user have? */
	atomic_t sigpending;	/* How many pending signals does this user have? */
843 844 845
#ifdef CONFIG_FANOTIFY
	atomic_t fanotify_listeners;
#endif
846
#ifdef CONFIG_EPOLL
847
	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
848
#endif
A
Alexey Dobriyan 已提交
849
#ifdef CONFIG_POSIX_MQUEUE
L
Linus Torvalds 已提交
850 851
	/* protected by mq_lock	*/
	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
A
Alexey Dobriyan 已提交
852
#endif
L
Linus Torvalds 已提交
853
	unsigned long locked_shm; /* How many pages of mlocked shm ? */
854
	unsigned long unix_inflight;	/* How many files in flight in unix sockets */
855
	atomic_long_t pipe_bufs;  /* how many pages are allocated in pipe buffers */
L
Linus Torvalds 已提交
856 857 858 859 860 861 862

#ifdef CONFIG_KEYS
	struct key *uid_keyring;	/* UID specific keyring */
	struct key *session_keyring;	/* UID's default session keyring */
#endif

	/* Hash table maintenance information */
P
Pavel Emelyanov 已提交
863
	struct hlist_node uidhash_node;
864
	kuid_t uid;
865

866
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
867 868
	atomic_long_t locked_vm;
#endif
L
Linus Torvalds 已提交
869 870
};

871
extern int uids_sysfs_init(void);
872

873
extern struct user_struct *find_user(kuid_t);
L
Linus Torvalds 已提交
874 875 876 877

extern struct user_struct root_user;
#define INIT_USER (&root_user)

878

L
Linus Torvalds 已提交
879 880 881
struct backing_dev_info;
struct reclaim_state;

882
#ifdef CONFIG_SCHED_INFO
L
Linus Torvalds 已提交
883 884
struct sched_info {
	/* cumulative counters */
885
	unsigned long pcount;	      /* # of times run on this cpu */
886
	unsigned long long run_delay; /* time spent waiting on a runqueue */
L
Linus Torvalds 已提交
887 888

	/* timestamps */
889 890
	unsigned long long last_arrival,/* when we last ran on a cpu */
			   last_queued;	/* when we were last queued to run */
L
Linus Torvalds 已提交
891
};
892
#endif /* CONFIG_SCHED_INFO */
L
Linus Torvalds 已提交
893

894 895 896 897 898 899 900 901 902 903 904 905 906 907
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
	spinlock_t	lock;
	unsigned int	flags;	/* Private per-task flags */

	/* For each stat XXX, add following, aligned appropriately
	 *
	 * struct timespec XXX_start, XXX_end;
	 * u64 XXX_delay;
	 * u32 XXX_count;
	 *
	 * Atomicity of updates to XXX_delay, XXX_count protected by
	 * single lock above (split into XXX_lock if contention is an issue).
	 */
908 909 910 911 912 913

	/*
	 * XXX_count is incremented on every XXX operation, the delay
	 * associated with the operation is added to XXX_delay.
	 * XXX_delay contains the accumulated delay time in nanoseconds.
	 */
914
	u64 blkio_start;	/* Shared by blkio, swapin */
915 916 917 918 919 920
	u64 blkio_delay;	/* wait for sync block io completion */
	u64 swapin_delay;	/* wait for swapin block io completion */
	u32 blkio_count;	/* total count of the number of sync block */
				/* io operations performed */
	u32 swapin_count;	/* total count of the number of swapin block */
				/* io operations performed */
921

922
	u64 freepages_start;
923 924
	u64 freepages_delay;	/* wait for memory reclaim */
	u32 freepages_count;	/* total count of memory reclaim */
925
};
926 927 928 929 930 931 932 933 934 935 936
#endif	/* CONFIG_TASK_DELAY_ACCT */

static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
	return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
	extern int delayacct_on;
	return delayacct_on;
#else
	return 0;
937
#endif
938
}
939

940 941 942 943
#ifdef CONFIG_SCHEDSTATS
void force_schedstat_enabled(void);
#endif

I
Ingo Molnar 已提交
944 945 946 947 948
enum cpu_idle_type {
	CPU_IDLE,
	CPU_NOT_IDLE,
	CPU_NEWLY_IDLE,
	CPU_MAX_IDLE_TYPES
L
Linus Torvalds 已提交
949 950
};

951 952 953 954 955 956 957 958 959 960
/*
 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 * has a few: load, load_avg, util_avg, freq, and capacity.
 *
 * We define a basic fixed point arithmetic range, and then formalize
 * all these metrics based on that basic range.
 */
# define SCHED_FIXEDPOINT_SHIFT	10
# define SCHED_FIXEDPOINT_SCALE	(1L << SCHED_FIXEDPOINT_SHIFT)

961
/*
962
 * Increase resolution of cpu_capacity calculations
963
 */
964
#define SCHED_CAPACITY_SHIFT	SCHED_FIXEDPOINT_SHIFT
965
#define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)
L
Linus Torvalds 已提交
966

967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983
/*
 * Wake-queues are lists of tasks with a pending wakeup, whose
 * callers have already marked the task as woken internally,
 * and can thus carry on. A common use case is being able to
 * do the wakeups once the corresponding user lock as been
 * released.
 *
 * We hold reference to each task in the list across the wakeup,
 * thus guaranteeing that the memory is still valid by the time
 * the actual wakeups are performed in wake_up_q().
 *
 * One per task suffices, because there's never a need for a task to be
 * in two wake queues simultaneously; it is forbidden to abandon a task
 * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
 * already in a wake queue, the wakeup will happen soon and the second
 * waker can just skip it.
 *
984
 * The DEFINE_WAKE_Q macro declares and initializes the list head.
985
 * wake_up_q() does NOT reinitialize the list; it's expected to be
986 987
 * called near the end of a function. Otherwise, the list can be
 * re-initialized for later re-use by wake_q_init().
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
 *
 * Note that this can cause spurious wakeups. schedule() callers
 * must ensure the call is done inside a loop, confirming that the
 * wakeup condition has in fact occurred.
 */
struct wake_q_node {
	struct wake_q_node *next;
};

struct wake_q_head {
	struct wake_q_node *first;
	struct wake_q_node **lastp;
};

#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)

1004
#define DEFINE_WAKE_Q(name)				\
1005 1006
	struct wake_q_head name = { WAKE_Q_TAIL, &name.first }

1007 1008 1009 1010 1011 1012
static inline void wake_q_init(struct wake_q_head *head)
{
	head->first = WAKE_Q_TAIL;
	head->lastp = &head->first;
}

1013 1014 1015 1016
extern void wake_q_add(struct wake_q_head *head,
		       struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);

1017 1018 1019
/*
 * sched-domains (multiprocessor balancing) declarations:
 */
1020
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
1021 1022 1023 1024
#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
1025
#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
P
Peter Zijlstra 已提交
1026
#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
1027
#define SD_ASYM_CPUCAPACITY	0x0040  /* Groups have different max cpu capacities */
1028
#define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu capacity */
1029
#define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */
P
Peter Zijlstra 已提交
1030 1031
#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
1032
#define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
P
Peter Zijlstra 已提交
1033
#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
1034
#define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
1035
#define SD_NUMA			0x4000	/* cross-node balancing */
1036

1037
#ifdef CONFIG_SCHED_SMT
G
Guenter Roeck 已提交
1038
static inline int cpu_smt_flags(void)
1039
{
1040
	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1041 1042 1043 1044
}
#endif

#ifdef CONFIG_SCHED_MC
G
Guenter Roeck 已提交
1045
static inline int cpu_core_flags(void)
1046 1047 1048 1049 1050 1051
{
	return SD_SHARE_PKG_RESOURCES;
}
#endif

#ifdef CONFIG_NUMA
G
Guenter Roeck 已提交
1052
static inline int cpu_numa_flags(void)
1053 1054 1055 1056
{
	return SD_NUMA;
}
#endif
1057

T
Tim Chen 已提交
1058 1059
extern int arch_asym_cpu_priority(int cpu);

1060 1061 1062 1063 1064 1065 1066 1067
struct sched_domain_attr {
	int relax_domain_level;
};

#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
	.relax_domain_level = -1,			\
}

1068 1069
extern int sched_domain_level_max;

1070 1071
struct sched_group;

1072 1073
struct sched_domain_shared {
	atomic_t	ref;
1074
	atomic_t	nr_busy_cpus;
1075
	int		has_idle_cores;
1076 1077
};

L
Linus Torvalds 已提交
1078 1079 1080
struct sched_domain {
	/* These fields must be setup */
	struct sched_domain *parent;	/* top domain must be null terminated */
1081
	struct sched_domain *child;	/* bottom domain must be null terminated */
L
Linus Torvalds 已提交
1082 1083 1084 1085 1086 1087
	struct sched_group *groups;	/* the balancing groups of the domain */
	unsigned long min_interval;	/* Minimum balance interval ms */
	unsigned long max_interval;	/* Maximum balance interval ms */
	unsigned int busy_factor;	/* less balancing by factor if busy */
	unsigned int imbalance_pct;	/* No balance until over watermark */
	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
N
Nick Piggin 已提交
1088 1089 1090 1091
	unsigned int busy_idx;
	unsigned int idle_idx;
	unsigned int newidle_idx;
	unsigned int wake_idx;
N
Nick Piggin 已提交
1092
	unsigned int forkexec_idx;
P
Peter Zijlstra 已提交
1093
	unsigned int smt_gain;
V
Vincent Guittot 已提交
1094 1095

	int nohz_idle;			/* NOHZ IDLE status */
L
Linus Torvalds 已提交
1096
	int flags;			/* See SD_* */
1097
	int level;
L
Linus Torvalds 已提交
1098 1099 1100 1101 1102 1103

	/* Runtime fields. */
	unsigned long last_balance;	/* init to jiffies. units in jiffies */
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */

1104
	/* idle_balance() stats */
1105
	u64 max_newidle_lb_cost;
1106
	unsigned long next_decay_max_lb_cost;
P
Peter Zijlstra 已提交
1107

1108 1109
	u64 avg_scan_cost;		/* select_idle_sibling */

L
Linus Torvalds 已提交
1110 1111
#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
1112 1113 1114 1115 1116 1117 1118 1119
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
L
Linus Torvalds 已提交
1120 1121

	/* Active load balancing */
1122 1123 1124
	unsigned int alb_count;
	unsigned int alb_failed;
	unsigned int alb_pushed;
L
Linus Torvalds 已提交
1125

1126
	/* SD_BALANCE_EXEC stats */
1127 1128 1129
	unsigned int sbe_count;
	unsigned int sbe_balanced;
	unsigned int sbe_pushed;
L
Linus Torvalds 已提交
1130

1131
	/* SD_BALANCE_FORK stats */
1132 1133 1134
	unsigned int sbf_count;
	unsigned int sbf_balanced;
	unsigned int sbf_pushed;
1135

L
Linus Torvalds 已提交
1136
	/* try_to_wake_up() stats */
1137 1138 1139
	unsigned int ttwu_wake_remote;
	unsigned int ttwu_move_affine;
	unsigned int ttwu_move_balance;
L
Linus Torvalds 已提交
1140
#endif
1141 1142 1143
#ifdef CONFIG_SCHED_DEBUG
	char *name;
#endif
1144 1145 1146 1147
	union {
		void *private;		/* used during construction */
		struct rcu_head rcu;	/* used during destruction */
	};
1148
	struct sched_domain_shared *shared;
1149

1150
	unsigned int span_weight;
1151 1152 1153 1154 1155 1156 1157 1158
	/*
	 * Span of all CPUs in this domain.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 */
	unsigned long span[0];
L
Linus Torvalds 已提交
1159 1160
};

1161 1162
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
1163
	return to_cpumask(sd->span);
1164 1165
}

1166
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1167
				    struct sched_domain_attr *dattr_new);
P
Paul Jackson 已提交
1168

1169 1170 1171 1172
/* Allocate an array of sched domains, for partition_sched_domains(). */
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);

1173 1174
bool cpus_share_cache(int this_cpu, int that_cpu);

1175
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
G
Guenter Roeck 已提交
1176
typedef int (*sched_domain_flags_f)(void);
1177 1178 1179 1180 1181

#define SDTL_OVERLAP	0x01

struct sd_data {
	struct sched_domain **__percpu sd;
1182
	struct sched_domain_shared **__percpu sds;
1183
	struct sched_group **__percpu sg;
1184
	struct sched_group_capacity **__percpu sgc;
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
};

struct sched_domain_topology_level {
	sched_domain_mask_f mask;
	sched_domain_flags_f sd_flags;
	int		    flags;
	int		    numa_level;
	struct sd_data      data;
#ifdef CONFIG_SCHED_DEBUG
	char                *name;
#endif
};

extern void set_sched_topology(struct sched_domain_topology_level *tl);
1199
extern void wake_up_if_idle(int cpu);
1200 1201 1202 1203 1204 1205 1206

#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(type)		.name = #type
#else
# define SD_INIT_NAME(type)
#endif

1207
#else /* CONFIG_SMP */
L
Linus Torvalds 已提交
1208

1209
struct sched_domain_attr;
1210

1211
static inline void
1212
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1213 1214
			struct sched_domain_attr *dattr_new)
{
1215
}
1216 1217 1218 1219 1220 1221

static inline bool cpus_share_cache(int this_cpu, int that_cpu)
{
	return true;
}

1222
#endif	/* !CONFIG_SMP */
L
Linus Torvalds 已提交
1223

1224

L
Linus Torvalds 已提交
1225 1226 1227
struct io_context;			/* See blkdev.h */


1228
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1229
extern void prefetch_stack(struct task_struct *t);
1230 1231 1232
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
L
Linus Torvalds 已提交
1233 1234 1235

struct audit_context;		/* See audit.c */
struct mempolicy;
1236
struct pipe_inode_info;
1237
struct uts_namespace;
L
Linus Torvalds 已提交
1238

I
Ingo Molnar 已提交
1239
struct load_weight {
1240 1241
	unsigned long weight;
	u32 inv_weight;
I
Ingo Molnar 已提交
1242 1243
};

1244
/*
1245 1246 1247 1248 1249 1250 1251 1252 1253
 * The load_avg/util_avg accumulates an infinite geometric series
 * (see __update_load_avg() in kernel/sched/fair.c).
 *
 * [load_avg definition]
 *
 *   load_avg = runnable% * scale_load_down(load)
 *
 * where runnable% is the time ratio that a sched_entity is runnable.
 * For cfs_rq, it is the aggregated load_avg of all runnable and
1254
 * blocked sched_entities.
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
 *
 * load_avg may also take frequency scaling into account:
 *
 *   load_avg = runnable% * scale_load_down(load) * freq%
 *
 * where freq% is the CPU frequency normalized to the highest frequency.
 *
 * [util_avg definition]
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE
 *
 * where running% is the time ratio that a sched_entity is running on
 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
 * and blocked sched_entities.
 *
 * util_avg may also factor frequency scaling and CPU capacity scaling:
 *
 *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
 *
 * where freq% is the same as above, and capacity% is the CPU capacity
 * normalized to the greatest capacity (due to uarch differences, etc).
 *
 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
 * we therefore scale them to as large a range as necessary. This is for
 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
 *
 * [Overflow issue]
 *
 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
 * with the highest load (=88761), always runnable on a single cfs_rq,
 * and should not overflow as the number already hits PID_MAX_LIMIT.
 *
 * For all other cases (including 32-bit kernels), struct load_weight's
 * weight will overflow first before we do, because:
 *
 *    Max(load_avg) <= Max(load.weight)
 *
 * Then it is the load_weight's responsibility to consider overflow
 * issues.
1295
 */
1296
struct sched_avg {
1297 1298 1299
	u64 last_update_time, load_sum;
	u32 util_sum, period_contrib;
	unsigned long load_avg, util_avg;
1300 1301
};

1302
#ifdef CONFIG_SCHEDSTATS
1303
struct sched_statistics {
I
Ingo Molnar 已提交
1304
	u64			wait_start;
1305
	u64			wait_max;
1306 1307
	u64			wait_count;
	u64			wait_sum;
1308 1309
	u64			iowait_count;
	u64			iowait_sum;
1310

I
Ingo Molnar 已提交
1311 1312
	u64			sleep_start;
	u64			sleep_max;
1313 1314 1315
	s64			sum_sleep_runtime;

	u64			block_start;
I
Ingo Molnar 已提交
1316 1317
	u64			block_max;
	u64			exec_max;
I
Ingo Molnar 已提交
1318
	u64			slice_max;
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334

	u64			nr_migrations_cold;
	u64			nr_failed_migrations_affine;
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_hot;
	u64			nr_forced_migrations;

	u64			nr_wakeups;
	u64			nr_wakeups_sync;
	u64			nr_wakeups_migrate;
	u64			nr_wakeups_local;
	u64			nr_wakeups_remote;
	u64			nr_wakeups_affine;
	u64			nr_wakeups_affine_attempts;
	u64			nr_wakeups_passive;
	u64			nr_wakeups_idle;
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
};
#endif

struct sched_entity {
	struct load_weight	load;		/* for load-balancing */
	struct rb_node		run_node;
	struct list_head	group_node;
	unsigned int		on_rq;

	u64			exec_start;
	u64			sum_exec_runtime;
	u64			vruntime;
	u64			prev_sum_exec_runtime;

	u64			nr_migrations;

#ifdef CONFIG_SCHEDSTATS
	struct sched_statistics statistics;
1353 1354
#endif

I
Ingo Molnar 已提交
1355
#ifdef CONFIG_FAIR_GROUP_SCHED
P
Peter Zijlstra 已提交
1356
	int			depth;
I
Ingo Molnar 已提交
1357 1358 1359 1360 1361 1362
	struct sched_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct cfs_rq		*cfs_rq;
	/* rq "owned" by this entity/group: */
	struct cfs_rq		*my_q;
#endif
1363

1364
#ifdef CONFIG_SMP
1365 1366 1367 1368 1369 1370 1371
	/*
	 * Per entity load average tracking.
	 *
	 * Put into separate cache line so it does not
	 * collide with read-mostly values above.
	 */
	struct sched_avg	avg ____cacheline_aligned_in_smp;
1372
#endif
I
Ingo Molnar 已提交
1373
};
1374

P
Peter Zijlstra 已提交
1375 1376
struct sched_rt_entity {
	struct list_head run_list;
1377
	unsigned long timeout;
1378
	unsigned long watchdog_stamp;
1379
	unsigned int time_slice;
1380 1381
	unsigned short on_rq;
	unsigned short on_list;
P
Peter Zijlstra 已提交
1382

1383
	struct sched_rt_entity *back;
1384
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
1385 1386 1387 1388 1389 1390
	struct sched_rt_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct rt_rq		*rt_rq;
	/* rq "owned" by this entity/group: */
	struct rt_rq		*my_q;
#endif
P
Peter Zijlstra 已提交
1391 1392
};

1393 1394 1395 1396 1397
struct sched_dl_entity {
	struct rb_node	rb_node;

	/*
	 * Original scheduling parameters. Copied here from sched_attr
1398 1399
	 * during sched_setattr(), they will remain the same until
	 * the next sched_setattr().
1400 1401 1402
	 */
	u64 dl_runtime;		/* maximum runtime for each instance	*/
	u64 dl_deadline;	/* relative deadline of each instance	*/
1403
	u64 dl_period;		/* separation of two instances (period) */
1404
	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421

	/*
	 * Actual scheduling parameters. Initialized with the values above,
	 * they are continously updated during task execution. Note that
	 * the remaining runtime could be < 0 in case we are in overrun.
	 */
	s64 runtime;		/* remaining runtime for this instance	*/
	u64 deadline;		/* absolute deadline for this instance	*/
	unsigned int flags;	/* specifying the scheduler behaviour	*/

	/*
	 * Some bool flags:
	 *
	 * @dl_throttled tells if we exhausted the runtime. If so, the
	 * task has to wait for a replenishment to be performed at the
	 * next firing of dl_timer.
	 *
1422 1423
	 * @dl_boosted tells if we are boosted due to DI. If so we are
	 * outside bandwidth enforcement mechanism (but only until we
1424 1425 1426 1427
	 * exit the critical section);
	 *
	 * @dl_yielded tells if task gave up the cpu before consuming
	 * all its available runtime during the last job.
1428
	 */
1429
	int dl_throttled, dl_boosted, dl_yielded;
1430 1431 1432 1433 1434 1435 1436

	/*
	 * Bandwidth enforcement timer. Each -deadline task has its
	 * own bandwidth to be enforced, thus we need one timer per task.
	 */
	struct hrtimer dl_timer;
};
1437

1438 1439
union rcu_special {
	struct {
1440 1441 1442 1443 1444 1445
		u8 blocked;
		u8 need_qs;
		u8 exp_need_qs;
		u8 pad;	/* Otherwise the compiler can store garbage here. */
	} b; /* Bits. */
	u32 s; /* Set of bits. */
1446
};
1447 1448
struct rcu_node;

P
Peter Zijlstra 已提交
1449 1450 1451
enum perf_event_task_context {
	perf_invalid_context = -1,
	perf_hw_context = 0,
1452
	perf_sw_context,
P
Peter Zijlstra 已提交
1453 1454 1455
	perf_nr_task_contexts,
};

1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
	/*
	 * Each bit set is a CPU that potentially has a TLB entry for one of
	 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
	 */
	struct cpumask cpumask;

	/* True if any bit in cpumask is set */
	bool flush_required;
1466 1467 1468 1469 1470 1471 1472

	/*
	 * If true then the PTE was dirty when unmapped. The entry must be
	 * flushed before IO is initiated or a stale TLB entry potentially
	 * allows an update without redirtying the page.
	 */
	bool writable;
1473 1474
};

L
Linus Torvalds 已提交
1475
struct task_struct {
1476 1477 1478 1479 1480 1481 1482
#ifdef CONFIG_THREAD_INFO_IN_TASK
	/*
	 * For reasons of header soup (see current_thread_info()), this
	 * must be the first element of task_struct.
	 */
	struct thread_info thread_info;
#endif
L
Linus Torvalds 已提交
1483
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
R
Roman Zippel 已提交
1484
	void *stack;
L
Linus Torvalds 已提交
1485
	atomic_t usage;
1486 1487
	unsigned int flags;	/* per process flags, defined below */
	unsigned int ptrace;
L
Linus Torvalds 已提交
1488

1489
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
1490
	struct llist_node wake_entry;
P
Peter Zijlstra 已提交
1491
	int on_cpu;
1492 1493 1494
#ifdef CONFIG_THREAD_INFO_IN_TASK
	unsigned int cpu;	/* current CPU */
#endif
M
Mike Galbraith 已提交
1495
	unsigned int wakee_flips;
1496
	unsigned long wakee_flip_decay_ts;
M
Mike Galbraith 已提交
1497
	struct task_struct *last_wakee;
1498 1499

	int wake_cpu;
1500
#endif
P
Peter Zijlstra 已提交
1501
	int on_rq;
1502

1503
	int prio, static_prio, normal_prio;
1504
	unsigned int rt_priority;
1505
	const struct sched_class *sched_class;
I
Ingo Molnar 已提交
1506
	struct sched_entity se;
P
Peter Zijlstra 已提交
1507
	struct sched_rt_entity rt;
P
Peter Zijlstra 已提交
1508 1509 1510
#ifdef CONFIG_CGROUP_SCHED
	struct task_group *sched_task_group;
#endif
1511
	struct sched_dl_entity dl;
L
Linus Torvalds 已提交
1512

1513 1514 1515 1516 1517
#ifdef CONFIG_PREEMPT_NOTIFIERS
	/* list of struct preempt_notifier: */
	struct hlist_head preempt_notifiers;
#endif

1518
#ifdef CONFIG_BLK_DEV_IO_TRACE
1519
	unsigned int btrace_seq;
1520
#endif
L
Linus Torvalds 已提交
1521

1522
	unsigned int policy;
1523
	int nr_cpus_allowed;
L
Linus Torvalds 已提交
1524 1525
	cpumask_t cpus_allowed;

P
Paul E. McKenney 已提交
1526
#ifdef CONFIG_PREEMPT_RCU
P
Paul E. McKenney 已提交
1527
	int rcu_read_lock_nesting;
1528
	union rcu_special rcu_read_unlock_special;
1529
	struct list_head rcu_node_entry;
P
Paul E. McKenney 已提交
1530
	struct rcu_node *rcu_blocked_node;
1531
#endif /* #ifdef CONFIG_PREEMPT_RCU */
P
Paul E. McKenney 已提交
1532 1533 1534 1535
#ifdef CONFIG_TASKS_RCU
	unsigned long rcu_tasks_nvcsw;
	bool rcu_tasks_holdout;
	struct list_head rcu_tasks_holdout_list;
1536
	int rcu_tasks_idle_cpu;
P
Paul E. McKenney 已提交
1537
#endif /* #ifdef CONFIG_TASKS_RCU */
P
Paul E. McKenney 已提交
1538

1539
#ifdef CONFIG_SCHED_INFO
L
Linus Torvalds 已提交
1540 1541 1542 1543
	struct sched_info sched_info;
#endif

	struct list_head tasks;
1544
#ifdef CONFIG_SMP
1545
	struct plist_node pushable_tasks;
1546
	struct rb_node pushable_dl_tasks;
1547
#endif
L
Linus Torvalds 已提交
1548 1549

	struct mm_struct *mm, *active_mm;
1550 1551 1552 1553

	/* Per-thread vma caching: */
	struct vmacache vmacache;

1554 1555 1556
#if defined(SPLIT_RSS_COUNTING)
	struct task_rss_stat	rss_stat;
#endif
L
Linus Torvalds 已提交
1557
/* task state */
1558
	int exit_state;
L
Linus Torvalds 已提交
1559 1560
	int exit_code, exit_signal;
	int pdeath_signal;  /*  The signal sent when the parent dies  */
1561
	unsigned long jobctl;	/* JOBCTL_*, siglock protected */
1562 1563

	/* Used for emulating ABI behavior of previous Linux versions */
1564
	unsigned int personality;
1565

1566
	/* scheduler bits, serialized by scheduler locks */
1567
	unsigned sched_reset_on_fork:1;
1568
	unsigned sched_contributes_to_load:1;
1569
	unsigned sched_migrated:1;
P
Peter Zijlstra 已提交
1570
	unsigned sched_remote_wakeup:1;
1571 1572 1573 1574 1575
	unsigned :0; /* force alignment to the next boundary */

	/* unserialized, strictly 'current' */
	unsigned in_execve:1; /* bit to tell LSMs we're in execve */
	unsigned in_iowait:1;
1576 1577 1578
#if !defined(TIF_RESTORE_SIGMASK)
	unsigned restore_sigmask:1;
#endif
T
Tejun Heo 已提交
1579 1580
#ifdef CONFIG_MEMCG
	unsigned memcg_may_oom:1;
1581
#ifndef CONFIG_SLOB
1582 1583
	unsigned memcg_kmem_skip_account:1;
#endif
1584
#endif
1585 1586 1587
#ifdef CONFIG_COMPAT_BRK
	unsigned brk_randomized:1;
#endif
1588

1589 1590
	unsigned long atomic_flags; /* Flags needing atomic access. */

1591 1592
	struct restart_block restart_block;

L
Linus Torvalds 已提交
1593 1594
	pid_t pid;
	pid_t tgid;
1595

1596
#ifdef CONFIG_CC_STACKPROTECTOR
1597 1598
	/* Canary value for the -fstack-protector gcc feature */
	unsigned long stack_canary;
1599
#endif
1600
	/*
L
Linus Torvalds 已提交
1601
	 * pointers to (original) parent process, youngest child, younger sibling,
1602
	 * older sibling, respectively.  (p->father can be replaced with
R
Roland McGrath 已提交
1603
	 * p->real_parent->pid)
L
Linus Torvalds 已提交
1604
	 */
1605 1606
	struct task_struct __rcu *real_parent; /* real parent process */
	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
L
Linus Torvalds 已提交
1607
	/*
R
Roland McGrath 已提交
1608
	 * children/sibling forms the list of my natural children
L
Linus Torvalds 已提交
1609 1610 1611 1612 1613
	 */
	struct list_head children;	/* list of my children */
	struct list_head sibling;	/* linkage in my parent's children list */
	struct task_struct *group_leader;	/* threadgroup leader */

R
Roland McGrath 已提交
1614 1615 1616 1617 1618 1619 1620 1621
	/*
	 * ptraced is the list of tasks this task is using ptrace on.
	 * This includes both natural children and PTRACE_ATTACH targets.
	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
	 */
	struct list_head ptraced;
	struct list_head ptrace_entry;

L
Linus Torvalds 已提交
1622
	/* PID/PID hash table linkage. */
1623
	struct pid_link pids[PIDTYPE_MAX];
O
Oleg Nesterov 已提交
1624
	struct list_head thread_group;
1625
	struct list_head thread_node;
L
Linus Torvalds 已提交
1626 1627 1628 1629 1630

	struct completion *vfork_done;		/* for vfork() */
	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */

1631
	u64 utime, stime;
1632
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1633
	u64 utimescaled, stimescaled;
1634
#endif
1635
	u64 gtime;
1636
	struct prev_cputime prev_cputime;
1637
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1638
	seqcount_t vtime_seqcount;
1639 1640
	unsigned long long vtime_snap;
	enum {
1641 1642 1643
		/* Task is sleeping or running in a CPU with VTIME inactive */
		VTIME_INACTIVE = 0,
		/* Task runs in userspace in a CPU with VTIME active */
1644
		VTIME_USER,
1645
		/* Task runs in kernelspace in a CPU with VTIME active */
1646 1647
		VTIME_SYS,
	} vtime_snap_whence;
1648
#endif
1649 1650

#ifdef CONFIG_NO_HZ_FULL
1651
	atomic_t tick_dep_mask;
1652
#endif
L
Linus Torvalds 已提交
1653
	unsigned long nvcsw, nivcsw; /* context switch counts */
1654
	u64 start_time;		/* monotonic time in nsec */
1655
	u64 real_start_time;	/* boot based time in nsec */
L
Linus Torvalds 已提交
1656 1657 1658
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
	unsigned long min_flt, maj_flt;

1659
#ifdef CONFIG_POSIX_TIMERS
1660
	struct task_cputime cputime_expires;
L
Linus Torvalds 已提交
1661
	struct list_head cpu_timers[3];
1662
#endif
L
Linus Torvalds 已提交
1663 1664

/* process credentials */
1665
	const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
A
Arnd Bergmann 已提交
1666
	const struct cred __rcu *real_cred; /* objective and real subjective task
1667
					 * credentials (COW) */
A
Arnd Bergmann 已提交
1668
	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1669
					 * credentials (COW) */
1670 1671 1672
	char comm[TASK_COMM_LEN]; /* executable name excluding path
				     - access with [gs]et_task_comm (which lock
				       it with task_lock())
1673
				     - initialized normally by setup_new_exec */
L
Linus Torvalds 已提交
1674
/* file system info */
1675
	struct nameidata *nameidata;
1676
#ifdef CONFIG_SYSVIPC
L
Linus Torvalds 已提交
1677 1678
/* ipc stuff */
	struct sysv_sem sysvsem;
1679
	struct sysv_shm sysvshm;
1680
#endif
1681
#ifdef CONFIG_DETECT_HUNG_TASK
1682 1683 1684
/* hung task detection */
	unsigned long last_switch_count;
#endif
L
Linus Torvalds 已提交
1685 1686 1687 1688
/* filesystem information */
	struct fs_struct *fs;
/* open file information */
	struct files_struct *files;
1689
/* namespaces */
S
Serge E. Hallyn 已提交
1690
	struct nsproxy *nsproxy;
L
Linus Torvalds 已提交
1691 1692 1693 1694 1695
/* signal handlers */
	struct signal_struct *signal;
	struct sighand_struct *sighand;

	sigset_t blocked, real_blocked;
1696
	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
L
Linus Torvalds 已提交
1697 1698 1699 1700
	struct sigpending pending;

	unsigned long sas_ss_sp;
	size_t sas_ss_size;
1701
	unsigned sas_ss_flags;
1702

1703
	struct callback_head *task_works;
1704

L
Linus Torvalds 已提交
1705
	struct audit_context *audit_context;
A
Al Viro 已提交
1706
#ifdef CONFIG_AUDITSYSCALL
1707
	kuid_t loginuid;
1708
	unsigned int sessionid;
A
Al Viro 已提交
1709
#endif
1710
	struct seccomp seccomp;
L
Linus Torvalds 已提交
1711 1712 1713 1714

/* Thread group tracking */
   	u32 parent_exec_id;
   	u32 self_exec_id;
1715 1716
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
 * mempolicy */
L
Linus Torvalds 已提交
1717 1718
	spinlock_t alloc_lock;

1719
	/* Protection of the PI data structures: */
1720
	raw_spinlock_t pi_lock;
1721

1722 1723
	struct wake_q_node wake_q;

I
Ingo Molnar 已提交
1724 1725
#ifdef CONFIG_RT_MUTEXES
	/* PI waiters blocked on a rt_mutex held by this task */
1726 1727
	struct rb_root pi_waiters;
	struct rb_node *pi_waiters_leftmost;
I
Ingo Molnar 已提交
1728 1729 1730 1731
	/* Deadlock detection and priority inheritance handling */
	struct rt_mutex_waiter *pi_blocked_on;
#endif

1732 1733 1734 1735
#ifdef CONFIG_DEBUG_MUTEXES
	/* mutex deadlock detection */
	struct mutex_waiter *blocked_on;
#endif
1736 1737 1738 1739
#ifdef CONFIG_TRACE_IRQFLAGS
	unsigned int irq_events;
	unsigned long hardirq_enable_ip;
	unsigned long hardirq_disable_ip;
1740
	unsigned int hardirq_enable_event;
1741
	unsigned int hardirq_disable_event;
1742 1743
	int hardirqs_enabled;
	int hardirq_context;
1744 1745
	unsigned long softirq_disable_ip;
	unsigned long softirq_enable_ip;
1746
	unsigned int softirq_disable_event;
1747
	unsigned int softirq_enable_event;
1748
	int softirqs_enabled;
1749 1750
	int softirq_context;
#endif
I
Ingo Molnar 已提交
1751
#ifdef CONFIG_LOCKDEP
1752
# define MAX_LOCK_DEPTH 48UL
I
Ingo Molnar 已提交
1753 1754 1755
	u64 curr_chain_key;
	int lockdep_depth;
	unsigned int lockdep_recursion;
1756
	struct held_lock held_locks[MAX_LOCK_DEPTH];
1757
	gfp_t lockdep_reclaim_gfp;
I
Ingo Molnar 已提交
1758
#endif
1759 1760 1761
#ifdef CONFIG_UBSAN
	unsigned int in_ubsan;
#endif
1762

L
Linus Torvalds 已提交
1763 1764 1765
/* journalling filesystem info */
	void *journal_info;

1766
/* stacked block device info */
1767
	struct bio_list *bio_list;
1768

1769 1770 1771 1772 1773
#ifdef CONFIG_BLOCK
/* stack plugging */
	struct blk_plug *plug;
#endif

L
Linus Torvalds 已提交
1774 1775 1776 1777 1778 1779 1780 1781 1782
/* VM state */
	struct reclaim_state *reclaim_state;

	struct backing_dev_info *backing_dev_info;

	struct io_context *io_context;

	unsigned long ptrace_message;
	siginfo_t *last_siginfo; /* For ptrace use.  */
1783
	struct task_io_accounting ioac;
1784
#if defined(CONFIG_TASK_XACCT)
L
Linus Torvalds 已提交
1785 1786
	u64 acct_rss_mem1;	/* accumulated rss usage */
	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1787
	u64 acct_timexpd;	/* stime + utime since last update */
L
Linus Torvalds 已提交
1788 1789
#endif
#ifdef CONFIG_CPUSETS
1790
	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1791
	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1792
	int cpuset_mem_spread_rotor;
1793
	int cpuset_slab_spread_rotor;
L
Linus Torvalds 已提交
1794
#endif
1795
#ifdef CONFIG_CGROUPS
1796
	/* Control Group info protected by css_set_lock */
A
Arnd Bergmann 已提交
1797
	struct css_set __rcu *cgroups;
1798 1799
	/* cg_list protected by css_set_lock and tsk->alloc_lock */
	struct list_head cg_list;
1800
#endif
F
Fenghua Yu 已提交
1801 1802 1803
#ifdef CONFIG_INTEL_RDT_A
	int closid;
#endif
1804
#ifdef CONFIG_FUTEX
1805
	struct robust_list_head __user *robust_list;
1806 1807 1808
#ifdef CONFIG_COMPAT
	struct compat_robust_list_head __user *compat_robust_list;
#endif
1809 1810
	struct list_head pi_state_list;
	struct futex_pi_state *pi_state_cache;
1811
#endif
1812
#ifdef CONFIG_PERF_EVENTS
P
Peter Zijlstra 已提交
1813
	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1814 1815
	struct mutex perf_event_mutex;
	struct list_head perf_event_list;
1816
#endif
1817 1818 1819
#ifdef CONFIG_DEBUG_PREEMPT
	unsigned long preempt_disable_ip;
#endif
1820
#ifdef CONFIG_NUMA
1821
	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1822
	short il_next;
1823
	short pref_node_fork;
1824
#endif
1825 1826 1827
#ifdef CONFIG_NUMA_BALANCING
	int numa_scan_seq;
	unsigned int numa_scan_period;
1828
	unsigned int numa_scan_period_max;
1829
	int numa_preferred_nid;
1830
	unsigned long numa_migrate_retry;
1831
	u64 node_stamp;			/* migration stamp  */
1832 1833
	u64 last_task_numa_placement;
	u64 last_sum_exec_runtime;
1834
	struct callback_head numa_work;
1835

1836 1837 1838
	struct list_head numa_entry;
	struct numa_group *numa_group;

1839
	/*
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
	 * numa_faults is an array split into four regions:
	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
	 * in this precise order.
	 *
	 * faults_memory: Exponential decaying average of faults on a per-node
	 * basis. Scheduling placement decisions are made based on these
	 * counts. The values remain static for the duration of a PTE scan.
	 * faults_cpu: Track the nodes the process was running on when a NUMA
	 * hinting fault was incurred.
	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
	 * during the current scan window. When the scan completes, the counts
	 * in faults_memory and faults_cpu decay and these values are copied.
1852
	 */
1853
	unsigned long *numa_faults;
1854
	unsigned long total_numa_faults;
1855

1856 1857
	/*
	 * numa_faults_locality tracks if faults recorded during the last
1858 1859 1860
	 * scan window were remote/local or failed to migrate. The task scan
	 * period is adapted based on the locality of the faults with different
	 * weights depending on whether they were shared or private faults
1861
	 */
1862
	unsigned long numa_faults_locality[3];
1863

I
Ingo Molnar 已提交
1864
	unsigned long numa_pages_migrated;
1865 1866
#endif /* CONFIG_NUMA_BALANCING */

1867 1868 1869 1870
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
	struct tlbflush_unmap_batch tlb_ubc;
#endif

I
Ingo Molnar 已提交
1871
	struct rcu_head rcu;
1872 1873 1874 1875 1876

	/*
	 * cache last used pipe for splice
	 */
	struct pipe_inode_info *splice_pipe;
1877 1878 1879

	struct page_frag task_frag;

1880 1881
#ifdef	CONFIG_TASK_DELAY_ACCT
	struct task_delay_info *delays;
1882 1883 1884
#endif
#ifdef CONFIG_FAULT_INJECTION
	int make_it_fail;
1885
#endif
1886 1887 1888 1889 1890 1891
	/*
	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
	 * balance_dirty_pages() for some dirty throttling pause
	 */
	int nr_dirtied;
	int nr_dirtied_pause;
1892
	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1893

A
Arjan van de Ven 已提交
1894 1895 1896 1897
#ifdef CONFIG_LATENCYTOP
	int latency_record_count;
	struct latency_record latency_record[LT_SAVECOUNT];
#endif
1898 1899 1900 1901
	/*
	 * time slack values; these are used to round up poll() and
	 * select() etc timeout values. These are in nanoseconds.
	 */
1902 1903
	u64 timer_slack_ns;
	u64 default_timer_slack_ns;
1904

1905 1906 1907
#ifdef CONFIG_KASAN
	unsigned int kasan_depth;
#endif
1908
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
D
Daniel Mack 已提交
1909
	/* Index of current stored address in ret_stack */
1910 1911 1912
	int curr_ret_stack;
	/* Stack of return addresses for return function tracing */
	struct ftrace_ret_stack	*ret_stack;
1913 1914
	/* time stamp for last schedule */
	unsigned long long ftrace_timestamp;
1915 1916 1917 1918 1919
	/*
	 * Number of functions that haven't been traced
	 * because of depth overrun.
	 */
	atomic_t trace_overrun;
1920 1921
	/* Pause for the tracing */
	atomic_t tracing_graph_pause;
1922
#endif
1923 1924 1925
#ifdef CONFIG_TRACING
	/* state flags for use by tracers */
	unsigned long trace;
1926
	/* bitmask and counter of trace recursion */
1927 1928
	unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
D
Dmitry Vyukov 已提交
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
#ifdef CONFIG_KCOV
	/* Coverage collection mode enabled for this task (0 if disabled). */
	enum kcov_mode kcov_mode;
	/* Size of the kcov_area. */
	unsigned	kcov_size;
	/* Buffer for coverage collection. */
	void		*kcov_area;
	/* kcov desciptor wired with this task or NULL. */
	struct kcov	*kcov;
#endif
1939
#ifdef CONFIG_MEMCG
T
Tejun Heo 已提交
1940 1941 1942
	struct mem_cgroup *memcg_in_oom;
	gfp_t memcg_oom_gfp_mask;
	int memcg_oom_order;
1943 1944 1945

	/* number of pages to reclaim on returning to userland */
	unsigned int memcg_nr_pages_over_high;
1946
#endif
1947 1948 1949
#ifdef CONFIG_UPROBES
	struct uprobe_task *utask;
#endif
K
Kent Overstreet 已提交
1950 1951 1952 1953
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
	unsigned int	sequential_io;
	unsigned int	sequential_io_avg;
#endif
P
Peter Zijlstra 已提交
1954 1955 1956
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
	unsigned long	task_state_change;
#endif
1957
	int pagefault_disabled;
1958
#ifdef CONFIG_MMU
1959
	struct task_struct *oom_reaper_list;
1960
#endif
1961 1962 1963
#ifdef CONFIG_VMAP_STACK
	struct vm_struct *stack_vm_area;
#endif
1964 1965 1966 1967
#ifdef CONFIG_THREAD_INFO_IN_TASK
	/* A live task holds one reference. */
	atomic_t stack_refcount;
#endif
1968 1969 1970 1971 1972 1973 1974 1975
/* CPU-specific state of this task */
	struct thread_struct thread;
/*
 * WARNING: on x86, 'thread_struct' contains a variable-sized
 * structure.  It *MUST* be at the end of 'task_struct'.
 *
 * Do not put anything below here!
 */
L
Linus Torvalds 已提交
1976 1977
};

1978 1979 1980 1981 1982
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
#else
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
1983

1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
#ifdef CONFIG_VMAP_STACK
static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
{
	return t->stack_vm_area;
}
#else
static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
{
	return NULL;
}
#endif

1996 1997
#define TNF_MIGRATED	0x01
#define TNF_NO_GROUP	0x02
1998
#define TNF_SHARED	0x04
1999
#define TNF_FAULT_LOCAL	0x08
2000
#define TNF_MIGRATE_FAIL 0x10
2001

2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
static inline bool in_vfork(struct task_struct *tsk)
{
	bool ret;

	/*
	 * need RCU to access ->real_parent if CLONE_VM was used along with
	 * CLONE_PARENT.
	 *
	 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
	 * imply CLONE_VM
	 *
	 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
	 * ->real_parent is not necessarily the task doing vfork(), so in
	 * theory we can't rely on task_lock() if we want to dereference it.
	 *
	 * And in this case we can't trust the real_parent->mm == tsk->mm
	 * check, it can be false negative. But we do not care, if init or
	 * another oom-unkillable task does this it should blame itself.
	 */
	rcu_read_lock();
	ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
	rcu_read_unlock();

	return ret;
}

2028
#ifdef CONFIG_NUMA_BALANCING
2029
extern void task_numa_fault(int last_node, int node, int pages, int flags);
2030
extern pid_t task_numa_group_id(struct task_struct *p);
2031
extern void set_numabalancing_state(bool enabled);
2032
extern void task_numa_free(struct task_struct *p);
2033 2034
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
					int src_nid, int dst_cpu);
2035
#else
2036
static inline void task_numa_fault(int last_node, int node, int pages,
2037
				   int flags)
2038 2039
{
}
2040 2041 2042 2043
static inline pid_t task_numa_group_id(struct task_struct *p)
{
	return 0;
}
2044 2045 2046
static inline void set_numabalancing_state(bool enabled)
{
}
2047 2048 2049
static inline void task_numa_free(struct task_struct *p)
{
}
2050 2051 2052 2053 2054
static inline bool should_numa_migrate_memory(struct task_struct *p,
				struct page *page, int src_nid, int dst_cpu)
{
	return true;
}
2055 2056
#endif

A
Alexey Dobriyan 已提交
2057
static inline struct pid *task_pid(struct task_struct *task)
2058 2059 2060 2061
{
	return task->pids[PIDTYPE_PID].pid;
}

A
Alexey Dobriyan 已提交
2062
static inline struct pid *task_tgid(struct task_struct *task)
2063 2064 2065 2066
{
	return task->group_leader->pids[PIDTYPE_PID].pid;
}

2067 2068 2069 2070 2071
/*
 * Without tasklist or rcu lock it is not safe to dereference
 * the result of task_pgrp/task_session even if task == current,
 * we can race with another thread doing sys_setsid/sys_setpgid.
 */
A
Alexey Dobriyan 已提交
2072
static inline struct pid *task_pgrp(struct task_struct *task)
2073 2074 2075 2076
{
	return task->group_leader->pids[PIDTYPE_PGID].pid;
}

A
Alexey Dobriyan 已提交
2077
static inline struct pid *task_session(struct task_struct *task)
2078 2079 2080 2081
{
	return task->group_leader->pids[PIDTYPE_SID].pid;
}

2082 2083 2084 2085 2086 2087 2088
struct pid_namespace;

/*
 * the helpers to get the task's different pids as they are seen
 * from various namespaces
 *
 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
E
Eric W. Biederman 已提交
2089 2090
 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
 *                     current.
2091 2092 2093 2094 2095 2096
 * task_xid_nr_ns()  : id seen from the ns specified;
 *
 * set_task_vxid()   : assigns a virtual id to a task;
 *
 * see also pid_nr() etc in include/linux/pid.h
 */
2097 2098
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
			struct pid_namespace *ns);
2099

A
Alexey Dobriyan 已提交
2100
static inline pid_t task_pid_nr(struct task_struct *tsk)
2101 2102 2103 2104
{
	return tsk->pid;
}

2105 2106 2107 2108 2109
static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
{
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
2110 2111 2112

static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
2113
	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
2114 2115 2116
}


A
Alexey Dobriyan 已提交
2117
static inline pid_t task_tgid_nr(struct task_struct *tsk)
2118 2119 2120 2121
{
	return tsk->tgid;
}

2122
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
2123 2124 2125 2126 2127 2128 2129

static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
	return pid_vnr(task_tgid(tsk));
}


2130
static inline int pid_alive(const struct task_struct *p);
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
	pid_t pid = 0;

	rcu_read_lock();
	if (pid_alive(tsk))
		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
	rcu_read_unlock();

	return pid;
}

static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
	return task_ppid_nr_ns(tsk, &init_pid_ns);
}

2148 2149
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
2150
{
2151
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
2152 2153 2154 2155
}

static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
2156
	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
2157 2158 2159
}


2160 2161
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
					struct pid_namespace *ns)
2162
{
2163
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
2164 2165 2166 2167
}

static inline pid_t task_session_vnr(struct task_struct *tsk)
{
2168
	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
2169 2170
}

2171 2172 2173 2174 2175
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
	return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
2176

L
Linus Torvalds 已提交
2177 2178 2179 2180 2181 2182 2183
/**
 * pid_alive - check that a task structure is not stale
 * @p: Task structure to be checked.
 *
 * Test if a process is not yet dead (at most zombie state)
 * If pid_alive fails, then pointers within the task structure
 * can be stale and must not be dereferenced.
2184 2185
 *
 * Return: 1 if the process is alive. 0 otherwise.
L
Linus Torvalds 已提交
2186
 */
2187
static inline int pid_alive(const struct task_struct *p)
L
Linus Torvalds 已提交
2188
{
2189
	return p->pids[PIDTYPE_PID].pid != NULL;
L
Linus Torvalds 已提交
2190 2191
}

2192
/**
2193 2194
 * is_global_init - check if a task structure is init. Since init
 * is free to have sub-threads we need to check tgid.
2195 2196 2197
 * @tsk: Task structure to be checked.
 *
 * Check if a task structure is the first user space task the kernel created.
2198 2199
 *
 * Return: 1 if the task structure is init. 0 otherwise.
2200
 */
A
Alexey Dobriyan 已提交
2201
static inline int is_global_init(struct task_struct *tsk)
2202
{
2203
	return task_tgid_nr(tsk) == 1;
2204
}
2205

2206 2207
extern struct pid *cad_pid;

L
Linus Torvalds 已提交
2208 2209
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
I
Ingo Molnar 已提交
2210

2211
extern void __put_task_struct(struct task_struct *t);
I
Ingo Molnar 已提交
2212 2213 2214 2215

static inline void put_task_struct(struct task_struct *t)
{
	if (atomic_dec_and_test(&t->usage))
2216
		__put_task_struct(t);
I
Ingo Molnar 已提交
2217
}
L
Linus Torvalds 已提交
2218

2219 2220 2221
struct task_struct *task_rcu_dereference(struct task_struct **ptask);
struct task_struct *try_get_task_struct(struct task_struct **ptask);

2222 2223
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
2224
			 u64 *utime, u64 *stime);
2225
extern u64 task_gtime(struct task_struct *t);
2226
#else
2227
static inline void task_cputime(struct task_struct *t,
2228
				u64 *utime, u64 *stime)
2229
{
2230 2231
	*utime = t->utime;
	*stime = t->stime;
2232 2233
}

2234
static inline u64 task_gtime(struct task_struct *t)
2235 2236 2237 2238 2239 2240
{
	return t->gtime;
}
#endif

#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2241
static inline void task_cputime_scaled(struct task_struct *t,
2242 2243
				       u64 *utimescaled,
				       u64 *stimescaled)
2244
{
2245 2246
	*utimescaled = t->utimescaled;
	*stimescaled = t->stimescaled;
2247
}
2248 2249
#else
static inline void task_cputime_scaled(struct task_struct *t,
2250 2251
				       u64 *utimescaled,
				       u64 *stimescaled)
2252
{
2253
	task_cputime(t, utimescaled, stimescaled);
2254 2255
}
#endif
2256

2257 2258
extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
2259

L
Linus Torvalds 已提交
2260 2261 2262
/*
 * Per process flags
 */
2263
#define PF_IDLE		0x00000002	/* I am an IDLE thread */
L
Linus Torvalds 已提交
2264
#define PF_EXITING	0x00000004	/* getting shut down */
2265
#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
2266
#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
T
Tejun Heo 已提交
2267
#define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
L
Linus Torvalds 已提交
2268
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
2269
#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
L
Linus Torvalds 已提交
2270 2271 2272 2273
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
#define PF_DUMPCORE	0x00000200	/* dumped core */
#define PF_SIGNALED	0x00000400	/* killed by a signal */
#define PF_MEMALLOC	0x00000800	/* Allocating memory */
2274
#define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
L
Linus Torvalds 已提交
2275
#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
2276
#define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
L
Linus Torvalds 已提交
2277 2278 2279 2280
#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
#define PF_FROZEN	0x00010000	/* frozen for system suspend */
#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
#define PF_KSWAPD	0x00040000	/* I am kswapd */
2281
#define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
L
Linus Torvalds 已提交
2282
#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
2283
#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
J
Jens Axboe 已提交
2284 2285
#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
2286
#define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
2287
#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
2288
#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
2289
#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
2290
#define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
L
Linus Torvalds 已提交
2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316

/*
 * Only the _current_ task can read/write to tsk->flags, but other
 * tasks can access tsk->flags in readonly mode for example
 * with tsk_used_math (like during threaded core dumping).
 * There is however an exception to this rule during ptrace
 * or during fork: the ptracer task is allowed to write to the
 * child->flags of its traced child (same goes for fork, the parent
 * can write to the child->flags), because we're guaranteed the
 * child is not running and in turn not changing child->flags
 * at the same time the parent does it.
 */
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) \
	conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)

2317 2318 2319
/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
 * __GFP_FS is also cleared as it implies __GFP_IO.
 */
2320 2321 2322
static inline gfp_t memalloc_noio_flags(gfp_t flags)
{
	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2323
		flags &= ~(__GFP_IO | __GFP_FS);
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
	return flags;
}

static inline unsigned int memalloc_noio_save(void)
{
	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
	current->flags |= PF_MEMALLOC_NOIO;
	return flags;
}

static inline void memalloc_noio_restore(unsigned int flags)
{
	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}

2339
/* Per-process atomic flags. */
2340
#define PFA_NO_NEW_PRIVS 0	/* May not gain new privileges. */
2341 2342
#define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
2343
#define PFA_LMK_WAITING  3      /* Lowmemorykiller is waiting */
2344

2345

2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
#define TASK_PFA_TEST(name, func)					\
	static inline bool task_##func(struct task_struct *p)		\
	{ return test_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_SET(name, func)					\
	static inline void task_set_##func(struct task_struct *p)	\
	{ set_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_CLEAR(name, func)					\
	static inline void task_clear_##func(struct task_struct *p)	\
	{ clear_bit(PFA_##name, &p->atomic_flags); }

TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2358

2359 2360 2361 2362 2363 2364 2365
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)

TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2366

2367 2368 2369
TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
TASK_PFA_SET(LMK_WAITING, lmk_waiting)

2370
/*
2371
 * task->jobctl flags
2372
 */
2373
#define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
2374

2375 2376 2377
#define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
#define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
#define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
2378
#define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
2379
#define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
2380
#define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
T
Tejun Heo 已提交
2381
#define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
2382

2383 2384 2385 2386 2387 2388 2389
#define JOBCTL_STOP_DEQUEUED	(1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING	(1UL << JOBCTL_STOP_PENDING_BIT)
#define JOBCTL_STOP_CONSUME	(1UL << JOBCTL_STOP_CONSUME_BIT)
#define JOBCTL_TRAP_STOP	(1UL << JOBCTL_TRAP_STOP_BIT)
#define JOBCTL_TRAP_NOTIFY	(1UL << JOBCTL_TRAP_NOTIFY_BIT)
#define JOBCTL_TRAPPING		(1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING	(1UL << JOBCTL_LISTENING_BIT)
2390

2391
#define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2392
#define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2393

2394
extern bool task_set_jobctl_pending(struct task_struct *task,
2395
				    unsigned long mask);
2396
extern void task_clear_jobctl_trapping(struct task_struct *task);
2397
extern void task_clear_jobctl_pending(struct task_struct *task,
2398
				      unsigned long mask);
2399

2400 2401
static inline void rcu_copy_process(struct task_struct *p)
{
P
Paul E. McKenney 已提交
2402
#ifdef CONFIG_PREEMPT_RCU
2403
	p->rcu_read_lock_nesting = 0;
2404
	p->rcu_read_unlock_special.s = 0;
2405
	p->rcu_blocked_node = NULL;
2406
	INIT_LIST_HEAD(&p->rcu_node_entry);
P
Paul E. McKenney 已提交
2407 2408 2409 2410
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
	p->rcu_tasks_holdout = false;
	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2411
	p->rcu_tasks_idle_cpu = -1;
P
Paul E. McKenney 已提交
2412
#endif /* #ifdef CONFIG_TASKS_RCU */
2413 2414
}

2415 2416 2417 2418 2419 2420 2421
static inline void tsk_restore_flags(struct task_struct *task,
				unsigned long orig_flags, unsigned long flags)
{
	task->flags &= ~flags;
	task->flags |= orig_flags & flags;
}

2422 2423
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
				     const struct cpumask *trial);
2424 2425
extern int task_can_attach(struct task_struct *p,
			   const struct cpumask *cs_cpus_allowed);
L
Linus Torvalds 已提交
2426
#ifdef CONFIG_SMP
2427 2428 2429
extern void do_set_cpus_allowed(struct task_struct *p,
			       const struct cpumask *new_mask);

2430
extern int set_cpus_allowed_ptr(struct task_struct *p,
2431
				const struct cpumask *new_mask);
L
Linus Torvalds 已提交
2432
#else
2433 2434 2435 2436
static inline void do_set_cpus_allowed(struct task_struct *p,
				      const struct cpumask *new_mask)
{
}
2437
static inline int set_cpus_allowed_ptr(struct task_struct *p,
2438
				       const struct cpumask *new_mask)
L
Linus Torvalds 已提交
2439
{
2440
	if (!cpumask_test_cpu(0, new_mask))
L
Linus Torvalds 已提交
2441 2442 2443 2444
		return -EINVAL;
	return 0;
}
#endif
2445

2446
#ifdef CONFIG_NO_HZ_COMMON
2447 2448 2449 2450 2451
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
#else
static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
2452
#endif /* CONFIG_NO_HZ_COMMON */
2453

2454 2455 2456 2457
#ifndef cpu_relax_yield
#define cpu_relax_yield() cpu_relax()
#endif

2458
/*
2459 2460 2461 2462 2463 2464
 * Do not use outside of architecture code which knows its limitations.
 *
 * sched_clock() has no promise of monotonicity or bounded drift between
 * CPUs, use (which you should not) requires disabling IRQs.
 *
 * Please use one of the three interfaces below.
2465
 */
2466
extern unsigned long long notrace sched_clock(void);
2467
/*
2468
 * See the comment in kernel/sched/clock.c
2469
 */
2470
extern u64 running_clock(void);
2471 2472
extern u64 sched_clock_cpu(int cpu);

2473

2474
extern void sched_clock_init(void);
2475

2476
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2477 2478 2479 2480
static inline void sched_clock_init_late(void)
{
}

2481 2482 2483 2484
static inline void sched_clock_tick(void)
{
}

2485 2486 2487 2488
static inline void clear_sched_clock_stable(void)
{
}

2489 2490 2491 2492 2493 2494 2495
static inline void sched_clock_idle_sleep_event(void)
{
}

static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505

static inline u64 cpu_clock(int cpu)
{
	return sched_clock();
}

static inline u64 local_clock(void)
{
	return sched_clock();
}
2506
#else
2507
extern void sched_clock_init_late(void);
2508 2509 2510 2511 2512 2513
/*
 * Architectures can set this to 1 if they have specified
 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
 * but then during bootup it turns out that sched_clock()
 * is reliable after all:
 */
2514 2515
extern int sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
2516

2517 2518 2519
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539

/*
 * As outlined in clock.c, provides a fast, high resolution, nanosecond
 * time source that is monotonic per cpu argument and has bounded drift
 * between cpus.
 *
 * ######################### BIG FAT WARNING ##########################
 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
 * # go backwards !!                                                  #
 * ####################################################################
 */
static inline u64 cpu_clock(int cpu)
{
	return sched_clock_cpu(cpu);
}

static inline u64 local_clock(void)
{
	return sched_clock_cpu(raw_smp_processor_id());
}
2540 2541
#endif

2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
 * The reason for this explicit opt-in is not to have perf penalty with
 * slow sched_clocks.
 */
extern void enable_sched_clock_irqtime(void);
extern void disable_sched_clock_irqtime(void);
#else
static inline void enable_sched_clock_irqtime(void) {}
static inline void disable_sched_clock_irqtime(void) {}
#endif

2555
extern unsigned long long
2556
task_sched_runtime(struct task_struct *task);
L
Linus Torvalds 已提交
2557 2558 2559 2560 2561 2562 2563 2564

/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec()   {}
#endif

2565 2566
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2567

L
Linus Torvalds 已提交
2568 2569 2570 2571 2572 2573
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
#endif

2574
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2575
extern void wake_up_nohz_cpu(int cpu);
2576
#else
2577
static inline void wake_up_nohz_cpu(int cpu) { }
2578 2579
#endif

2580
#ifdef CONFIG_NO_HZ_FULL
2581
extern u64 scheduler_tick_max_deferment(void);
2582 2583
#endif

2584 2585 2586 2587 2588
#ifdef CONFIG_SCHED_AUTOGROUP
extern void sched_autogroup_create_attach(struct task_struct *p);
extern void sched_autogroup_detach(struct task_struct *p);
extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
2589
extern void sched_autogroup_exit_task(struct task_struct *p);
2590 2591
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2592
extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2593 2594 2595 2596 2597 2598
#endif
#else
static inline void sched_autogroup_create_attach(struct task_struct *p) { }
static inline void sched_autogroup_detach(struct task_struct *p) { }
static inline void sched_autogroup_fork(struct signal_struct *sig) { }
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2599
static inline void sched_autogroup_exit_task(struct task_struct *p) { }
2600 2601
#endif

2602
extern int yield_to(struct task_struct *p, bool preempt);
2603 2604
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
/**
 * task_nice - return the nice value of a given task.
 * @p: the task in question.
 *
 * Return: The nice value [ -20 ... 0 ... 19 ].
 */
static inline int task_nice(const struct task_struct *p)
{
	return PRIO_TO_NICE((p)->static_prio);
}
2615 2616
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
L
Linus Torvalds 已提交
2617
extern int idle_cpu(int cpu);
2618 2619
extern int sched_setscheduler(struct task_struct *, int,
			      const struct sched_param *);
2620
extern int sched_setscheduler_nocheck(struct task_struct *, int,
2621
				      const struct sched_param *);
2622 2623
extern int sched_setattr(struct task_struct *,
			 const struct sched_attr *);
2624
extern struct task_struct *idle_task(int cpu);
2625 2626
/**
 * is_idle_task - is the specified task an idle task?
2627
 * @p: the task in question.
2628 2629
 *
 * Return: 1 if @p is an idle task. 0 otherwise.
2630
 */
2631
static inline bool is_idle_task(const struct task_struct *p)
2632
{
2633
	return !!(p->flags & PF_IDLE);
2634
}
2635
extern struct task_struct *curr_task(int cpu);
2636
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
L
Linus Torvalds 已提交
2637 2638 2639 2640

void yield(void);

union thread_union {
2641
#ifndef CONFIG_THREAD_INFO_IN_TASK
L
Linus Torvalds 已提交
2642
	struct thread_info thread_info;
2643
#endif
L
Linus Torvalds 已提交
2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
	unsigned long stack[THREAD_SIZE/sizeof(long)];
};

#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
	/* Reliable end of stack detection:
	 * Some APM bios versions misalign the stack
	 */
	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif

extern union thread_union init_thread_union;
extern struct task_struct init_task;

extern struct   mm_struct init_mm;

2662 2663 2664 2665 2666 2667 2668
extern struct pid_namespace init_pid_ns;

/*
 * find a task by one of its numerical ids
 *
 * find_task_by_pid_ns():
 *      finds a task by its pid in the specified namespace
2669 2670
 * find_task_by_vpid():
 *      finds a task by its virtual pid
2671
 *
2672
 * see also find_vpid() etc in include/linux/pid.h
2673 2674
 */

2675 2676 2677
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
		struct pid_namespace *ns);
2678

L
Linus Torvalds 已提交
2679
/* per-UID process charging. */
2680
extern struct user_struct * alloc_uid(kuid_t);
L
Linus Torvalds 已提交
2681 2682 2683 2684 2685 2686 2687 2688 2689
static inline struct user_struct *get_uid(struct user_struct *u)
{
	atomic_inc(&u->__count);
	return u;
}
extern void free_uid(struct user_struct *);

#include <asm/current.h>

T
Torben Hohn 已提交
2690
extern void xtime_update(unsigned long ticks);
L
Linus Torvalds 已提交
2691

2692 2693
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
2694
extern void wake_up_new_task(struct task_struct *tsk);
L
Linus Torvalds 已提交
2695 2696 2697 2698 2699
#ifdef CONFIG_SMP
 extern void kick_process(struct task_struct *tsk);
#else
 static inline void kick_process(struct task_struct *tsk) { }
#endif
2700
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2701
extern void sched_dead(struct task_struct *p);
L
Linus Torvalds 已提交
2702 2703 2704

extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
2705
extern void ignore_signals(struct task_struct *);
L
Linus Torvalds 已提交
2706 2707 2708
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);

2709
static inline int kernel_dequeue_signal(siginfo_t *info)
L
Linus Torvalds 已提交
2710
{
2711 2712
	struct task_struct *tsk = current;
	siginfo_t __info;
L
Linus Torvalds 已提交
2713 2714
	int ret;

2715 2716 2717
	spin_lock_irq(&tsk->sighand->siglock);
	ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
	spin_unlock_irq(&tsk->sighand->siglock);
L
Linus Torvalds 已提交
2718 2719

	return ret;
2720
}
L
Linus Torvalds 已提交
2721

2722 2723 2724 2725 2726 2727 2728 2729 2730 2731
static inline void kernel_signal_stop(void)
{
	spin_lock_irq(&current->sighand->siglock);
	if (current->jobctl & JOBCTL_STOP_DEQUEUED)
		__set_current_state(TASK_STOPPED);
	spin_unlock_irq(&current->sighand->siglock);

	schedule();
}

L
Linus Torvalds 已提交
2732 2733 2734 2735
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2736 2737
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2738 2739
extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
				const struct cred *, u32);
2740 2741
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
2742
extern int kill_proc_info(int, struct siginfo *, pid_t);
2743
extern __must_check bool do_notify_parent(struct task_struct *, int);
2744
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
L
Linus Torvalds 已提交
2745 2746
extern void force_sig(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
2747
extern int zap_other_threads(struct task_struct *p);
L
Linus Torvalds 已提交
2748 2749
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
2750
extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2751
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
L
Linus Torvalds 已提交
2752

2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812
#ifdef TIF_RESTORE_SIGMASK
/*
 * Legacy restore_sigmask accessors.  These are inefficient on
 * SMP architectures because they require atomic operations.
 */

/**
 * set_restore_sigmask() - make sure saved_sigmask processing gets done
 *
 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
 * will run before returning to user mode, to process the flag.  For
 * all callers, TIF_SIGPENDING is already set or it's no harm to set
 * it.  TIF_RESTORE_SIGMASK need not be in the set of bits that the
 * arch code will notice on return to user mode, in case those bits
 * are scarce.  We set TIF_SIGPENDING here to ensure that the arch
 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
 */
static inline void set_restore_sigmask(void)
{
	set_thread_flag(TIF_RESTORE_SIGMASK);
	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
static inline void clear_restore_sigmask(void)
{
	clear_thread_flag(TIF_RESTORE_SIGMASK);
}
static inline bool test_restore_sigmask(void)
{
	return test_thread_flag(TIF_RESTORE_SIGMASK);
}
static inline bool test_and_clear_restore_sigmask(void)
{
	return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
}

#else	/* TIF_RESTORE_SIGMASK */

/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
static inline void set_restore_sigmask(void)
{
	current->restore_sigmask = true;
	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
static inline void clear_restore_sigmask(void)
{
	current->restore_sigmask = false;
}
static inline bool test_restore_sigmask(void)
{
	return current->restore_sigmask;
}
static inline bool test_and_clear_restore_sigmask(void)
{
	if (!current->restore_sigmask)
		return false;
	current->restore_sigmask = false;
	return true;
}
#endif

A
Al Viro 已提交
2813 2814 2815
static inline void restore_saved_sigmask(void)
{
	if (test_and_clear_restore_sigmask())
2816
		__set_current_blocked(&current->saved_sigmask);
A
Al Viro 已提交
2817 2818
}

A
Al Viro 已提交
2819 2820 2821 2822 2823 2824 2825 2826
static inline sigset_t *sigmask_to_save(void)
{
	sigset_t *res = &current->blocked;
	if (unlikely(test_restore_sigmask()))
		res = &current->saved_sigmask;
	return res;
}

2827 2828 2829 2830 2831
static inline int kill_cad_pid(int sig, int priv)
{
	return kill_pid(cad_pid, sig, priv);
}

L
Linus Torvalds 已提交
2832 2833 2834 2835 2836
/* These can be the second arg to send_sig_info/send_group_sig_info.  */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV	((struct siginfo *) 1)
#define SEND_SIG_FORCED	((struct siginfo *) 2)

2837 2838 2839
/*
 * True if we are on the alternate signal stack.
 */
L
Linus Torvalds 已提交
2840 2841
static inline int on_sig_stack(unsigned long sp)
{
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
	/*
	 * If the signal stack is SS_AUTODISARM then, by construction, we
	 * can't be on the signal stack unless user code deliberately set
	 * SS_AUTODISARM when we were already on it.
	 *
	 * This improves reliability: if user state gets corrupted such that
	 * the stack pointer points very close to the end of the signal stack,
	 * then this check will enable the signal to be handled anyway.
	 */
	if (current->sas_ss_flags & SS_AUTODISARM)
		return 0;

2854 2855 2856 2857 2858 2859 2860
#ifdef CONFIG_STACK_GROWSUP
	return sp >= current->sas_ss_sp &&
		sp - current->sas_ss_sp < current->sas_ss_size;
#else
	return sp > current->sas_ss_sp &&
		sp - current->sas_ss_sp <= current->sas_ss_size;
#endif
L
Linus Torvalds 已提交
2861 2862 2863 2864
}

static inline int sas_ss_flags(unsigned long sp)
{
2865 2866 2867 2868
	if (!current->sas_ss_size)
		return SS_DISABLE;

	return on_sig_stack(sp) ? SS_ONSTACK : 0;
L
Linus Torvalds 已提交
2869 2870
}

2871 2872 2873 2874 2875 2876 2877
static inline void sas_ss_reset(struct task_struct *p)
{
	p->sas_ss_sp = 0;
	p->sas_ss_size = 0;
	p->sas_ss_flags = SS_DISABLE;
}

A
Al Viro 已提交
2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
{
	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
#ifdef CONFIG_STACK_GROWSUP
		return current->sas_ss_sp;
#else
		return current->sas_ss_sp + current->sas_ss_size;
#endif
	return sp;
}

L
Linus Torvalds 已提交
2889 2890 2891 2892 2893
/*
 * Routines for handling mm_structs
 */
extern struct mm_struct * mm_alloc(void);

V
Vegard Nossum 已提交
2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915
/**
 * mmgrab() - Pin a &struct mm_struct.
 * @mm: The &struct mm_struct to pin.
 *
 * Make sure that @mm will not get freed even after the owning task
 * exits. This doesn't guarantee that the associated address space
 * will still exist later on and mmget_not_zero() has to be used before
 * accessing it.
 *
 * This is a preferred way to to pin @mm for a longer/unbounded amount
 * of time.
 *
 * Use mmdrop() to release the reference acquired by mmgrab().
 *
 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
 * of &mm_struct.mm_count vs &mm_struct.mm_users.
 */
static inline void mmgrab(struct mm_struct *mm)
{
	atomic_inc(&mm->mm_count);
}

L
Linus Torvalds 已提交
2916
/* mmdrop drops the mm and the page tables */
2917
extern void __mmdrop(struct mm_struct *);
2918
static inline void mmdrop(struct mm_struct *mm)
L
Linus Torvalds 已提交
2919
{
I
Ingo Molnar 已提交
2920
	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
L
Linus Torvalds 已提交
2921 2922 2923
		__mmdrop(mm);
}

2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937
static inline void mmdrop_async_fn(struct work_struct *work)
{
	struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
	__mmdrop(mm);
}

static inline void mmdrop_async(struct mm_struct *mm)
{
	if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
		INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
		schedule_work(&mm->async_put_work);
	}
}

V
Vegard Nossum 已提交
2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
/**
 * mmget() - Pin the address space associated with a &struct mm_struct.
 * @mm: The address space to pin.
 *
 * Make sure that the address space of the given &struct mm_struct doesn't
 * go away. This does not protect against parts of the address space being
 * modified or freed, however.
 *
 * Never use this function to pin this address space for an
 * unbounded/indefinite amount of time.
 *
 * Use mmput() to release the reference acquired by mmget().
 *
 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
 * of &mm_struct.mm_count vs &mm_struct.mm_users.
 */
static inline void mmget(struct mm_struct *mm)
{
	atomic_inc(&mm->mm_users);
}

2959 2960 2961 2962 2963
static inline bool mmget_not_zero(struct mm_struct *mm)
{
	return atomic_inc_not_zero(&mm->mm_users);
}

L
Linus Torvalds 已提交
2964 2965
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
M
Michal Hocko 已提交
2966 2967
#ifdef CONFIG_MMU
/* same as above but performs the slow path from the async context. Can
2968 2969 2970
 * be called from the atomic context as well
 */
extern void mmput_async(struct mm_struct *);
M
Michal Hocko 已提交
2971
#endif
2972

L
Linus Torvalds 已提交
2973 2974
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
2975 2976 2977 2978 2979 2980
/*
 * Grab a reference to a task's mm, if it is not already going away
 * and ptrace_may_access with the mode parameter passed to it
 * succeeds.
 */
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
L
Linus Torvalds 已提交
2981 2982 2983
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);

2984 2985 2986 2987
#ifdef CONFIG_HAVE_COPY_THREAD_TLS
extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
			struct task_struct *, unsigned long);
#else
A
Alexey Dobriyan 已提交
2988
extern int copy_thread(unsigned long, unsigned long, unsigned long,
2989
			struct task_struct *);
2990 2991 2992 2993 2994 2995 2996 2997 2998 2999

/* Architectures that haven't opted into copy_thread_tls get the tls argument
 * via pt_regs, so ignore the tls argument passed via C. */
static inline int copy_thread_tls(
		unsigned long clone_flags, unsigned long sp, unsigned long arg,
		struct task_struct *p, unsigned long tls)
{
	return copy_thread(clone_flags, sp, arg, p);
}
#endif
L
Linus Torvalds 已提交
3000
extern void flush_thread(void);
J
Jiri Slaby 已提交
3001 3002

#ifdef CONFIG_HAVE_EXIT_THREAD
3003
extern void exit_thread(struct task_struct *tsk);
J
Jiri Slaby 已提交
3004
#else
3005
static inline void exit_thread(struct task_struct *tsk)
J
Jiri Slaby 已提交
3006 3007 3008
{
}
#endif
L
Linus Torvalds 已提交
3009 3010

extern void exit_files(struct task_struct *);
3011
extern void __cleanup_sighand(struct sighand_struct *);
3012

L
Linus Torvalds 已提交
3013
extern void exit_itimers(struct signal_struct *);
3014
extern void flush_itimer_signals(void);
L
Linus Torvalds 已提交
3015

3016
extern void do_group_exit(int);
L
Linus Torvalds 已提交
3017

3018
extern int do_execve(struct filename *,
3019
		     const char __user * const __user *,
3020
		     const char __user * const __user *);
3021 3022 3023 3024
extern int do_execveat(int, struct filename *,
		       const char __user * const __user *,
		       const char __user * const __user *,
		       int);
3025
extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
3026
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
3027
struct task_struct *fork_idle(int);
3028
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
L
Linus Torvalds 已提交
3029

3030 3031 3032 3033 3034
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
	__set_task_comm(tsk, from, false);
}
3035
extern char *get_task_comm(char *to, struct task_struct *tsk);
L
Linus Torvalds 已提交
3036 3037

#ifdef CONFIG_SMP
3038
void scheduler_ipi(void);
R
Roland McGrath 已提交
3039
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
L
Linus Torvalds 已提交
3040
#else
3041
static inline void scheduler_ipi(void) { }
R
Roland McGrath 已提交
3042 3043 3044 3045 3046
static inline unsigned long wait_task_inactive(struct task_struct *p,
					       long match_state)
{
	return 1;
}
L
Linus Torvalds 已提交
3047 3048
#endif

3049 3050 3051
#define tasklist_empty() \
	list_empty(&init_task.tasks)

3052 3053
#define next_task(p) \
	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
L
Linus Torvalds 已提交
3054 3055 3056 3057

#define for_each_process(p) \
	for (p = &init_task ; (p = next_task(p)) != &init_task ; )

3058
extern bool current_is_single_threaded(void);
D
David Howells 已提交
3059

L
Linus Torvalds 已提交
3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
/*
 * Careful: do_each_thread/while_each_thread is a double loop so
 *          'break' will not work as expected - use goto instead.
 */
#define do_each_thread(g, t) \
	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do

#define while_each_thread(g, t) \
	while ((t = next_thread(t)) != g)

3070 3071 3072 3073 3074 3075 3076 3077 3078 3079
#define __for_each_thread(signal, t)	\
	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)

#define for_each_thread(p, t)		\
	__for_each_thread((p)->signal, t)

/* Careful: this is a double loop, 'break' won't work as expected. */
#define for_each_process_thread(p, t)	\
	for_each_process(p) for_each_thread(p, t)

3080 3081 3082
typedef int (*proc_visitor)(struct task_struct *p, void *data);
void walk_process_tree(struct task_struct *top, proc_visitor, void *);

3083 3084
static inline int get_nr_threads(struct task_struct *tsk)
{
3085
	return tsk->signal->nr_threads;
3086 3087
}

3088 3089 3090 3091
static inline bool thread_group_leader(struct task_struct *p)
{
	return p->exit_signal >= 0;
}
L
Linus Torvalds 已提交
3092

3093 3094 3095 3096 3097 3098
/* Do to the insanities of de_thread it is possible for a process
 * to have the pid of the thread group leader without actually being
 * the thread group leader.  For iteration through the pids in proc
 * all we care about is that we have a task with the appropriate
 * pid, we don't actually care if we have the right task.
 */
3099
static inline bool has_group_leader_pid(struct task_struct *p)
3100
{
3101
	return task_pid(p) == p->signal->leader_pid;
3102 3103
}

3104
static inline
3105
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
3106
{
3107
	return p1->signal == p2->signal;
3108 3109
}

3110
static inline struct task_struct *next_thread(const struct task_struct *p)
O
Oleg Nesterov 已提交
3111
{
3112 3113
	return list_entry_rcu(p->thread_group.next,
			      struct task_struct, thread_group);
O
Oleg Nesterov 已提交
3114 3115
}

A
Alexey Dobriyan 已提交
3116
static inline int thread_group_empty(struct task_struct *p)
L
Linus Torvalds 已提交
3117
{
O
Oleg Nesterov 已提交
3118
	return list_empty(&p->thread_group);
L
Linus Torvalds 已提交
3119 3120 3121 3122 3123 3124
}

#define delay_group_leader(p) \
		(thread_group_leader(p) && !thread_group_empty(p))

/*
3125
 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
3126
 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
3127
 * pins the final release of task.io_context.  Also protects ->cpuset and
O
Oleg Nesterov 已提交
3128
 * ->cgroup.subsys[]. And ->vfork_done.
L
Linus Torvalds 已提交
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143
 *
 * Nests both inside and outside of read_lock(&tasklist_lock).
 * It must not be nested with write_lock_irq(&tasklist_lock),
 * neither inside nor outside.
 */
static inline void task_lock(struct task_struct *p)
{
	spin_lock(&p->alloc_lock);
}

static inline void task_unlock(struct task_struct *p)
{
	spin_unlock(&p->alloc_lock);
}

3144
extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
3145 3146
							unsigned long *flags);

3147 3148 3149 3150 3151 3152 3153 3154 3155
static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
						       unsigned long *flags)
{
	struct sighand_struct *ret;

	ret = __lock_task_sighand(tsk, flags);
	(void)__cond_lock(&tsk->sighand->siglock, ret);
	return ret;
}
3156

3157 3158 3159 3160 3161 3162
static inline void unlock_task_sighand(struct task_struct *tsk,
						unsigned long *flags)
{
	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}

3163 3164 3165 3166 3167 3168
#ifdef CONFIG_THREAD_INFO_IN_TASK

static inline struct thread_info *task_thread_info(struct task_struct *task)
{
	return &task->thread_info;
}
3169 3170 3171 3172 3173 3174

/*
 * When accessing the stack of a non-current task that might exit, use
 * try_get_task_stack() instead.  task_stack_page will return a pointer
 * that could get freed out from under you.
 */
3175 3176 3177 3178
static inline void *task_stack_page(const struct task_struct *task)
{
	return task->stack;
}
3179

3180
#define setup_thread_stack(new,old)	do { } while(0)
3181

3182 3183 3184 3185 3186 3187
static inline unsigned long *end_of_stack(const struct task_struct *task)
{
	return task->stack;
}

#elif !defined(__HAVE_THREAD_FUNCTIONS)
A
Al Viro 已提交
3188

R
Roman Zippel 已提交
3189
#define task_thread_info(task)	((struct thread_info *)(task)->stack)
3190
#define task_stack_page(task)	((void *)(task)->stack)
A
Al Viro 已提交
3191

3192 3193 3194 3195 3196 3197
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
	*task_thread_info(p) = *task_thread_info(org);
	task_thread_info(p)->task = p;
}

3198 3199 3200 3201 3202 3203 3204 3205 3206
/*
 * Return the address of the last usable long on the stack.
 *
 * When the stack grows down, this is just above the thread
 * info struct. Going any lower will corrupt the threadinfo.
 *
 * When the stack grows up, this is the highest address.
 * Beyond that position, we corrupt data on the next page.
 */
3207 3208
static inline unsigned long *end_of_stack(struct task_struct *p)
{
3209 3210 3211
#ifdef CONFIG_STACK_GROWSUP
	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
#else
R
Roman Zippel 已提交
3212
	return (unsigned long *)(task_thread_info(p) + 1);
3213
#endif
3214 3215
}

A
Al Viro 已提交
3216
#endif
3217

3218 3219 3220 3221 3222 3223 3224 3225 3226
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline void *try_get_task_stack(struct task_struct *tsk)
{
	return atomic_inc_not_zero(&tsk->stack_refcount) ?
		task_stack_page(tsk) : NULL;
}

extern void put_task_stack(struct task_struct *tsk);
#else
3227 3228 3229 3230 3231 3232
static inline void *try_get_task_stack(struct task_struct *tsk)
{
	return task_stack_page(tsk);
}

static inline void put_task_stack(struct task_struct *tsk) {}
3233
#endif
3234

3235 3236
#define task_stack_end_corrupted(task) \
		(*(end_of_stack(task)) != STACK_END_MAGIC)
A
Al Viro 已提交
3237

3238 3239 3240 3241 3242 3243 3244
static inline int object_is_on_stack(void *obj)
{
	void *stack = task_stack_page(current);

	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}

3245
extern void thread_stack_cache_init(void);
3246

3247 3248 3249 3250 3251 3252
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
{
	unsigned long *n = end_of_stack(p);

	do { 	/* Skip over canary */
3253 3254 3255
# ifdef CONFIG_STACK_GROWSUP
		n--;
# else
3256
		n++;
3257
# endif
3258 3259
	} while (!*n);

3260 3261 3262
# ifdef CONFIG_STACK_GROWSUP
	return (unsigned long)end_of_stack(p) - (unsigned long)n;
# else
3263
	return (unsigned long)n - (unsigned long)end_of_stack(p);
3264
# endif
3265 3266
}
#endif
3267
extern void set_task_stack_end_magic(struct task_struct *tsk);
3268

L
Linus Torvalds 已提交
3269 3270 3271 3272 3273
/* set thread flags in other task's structures
 * - see asm/thread_info.h for TIF_xxxx flags available
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
3274
	set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
3275 3276 3277 3278
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
3279
	clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
3280 3281 3282 3283
}

static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
3284
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
3285 3286 3287 3288
}

static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
3289
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
3290 3291 3292 3293
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
A
Al Viro 已提交
3294
	return test_ti_thread_flag(task_thread_info(tsk), flag);
L
Linus Torvalds 已提交
3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}

3307 3308 3309 3310 3311
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}

3312 3313 3314 3315 3316 3317
static inline int restart_syscall(void)
{
	set_tsk_thread_flag(current, TIF_SIGPENDING);
	return -ERESTARTNOINTR;
}

L
Linus Torvalds 已提交
3318 3319 3320 3321
static inline int signal_pending(struct task_struct *p)
{
	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
M
Matthew Wilcox 已提交
3322

3323 3324 3325 3326
static inline int __fatal_signal_pending(struct task_struct *p)
{
	return unlikely(sigismember(&p->pending.signal, SIGKILL));
}
M
Matthew Wilcox 已提交
3327 3328 3329 3330 3331 3332

static inline int fatal_signal_pending(struct task_struct *p)
{
	return signal_pending(p) && __fatal_signal_pending(p);
}

3333 3334 3335 3336 3337 3338 3339 3340 3341 3342
static inline int signal_pending_state(long state, struct task_struct *p)
{
	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
		return 0;
	if (!signal_pending(p))
		return 0;

	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}

L
Linus Torvalds 已提交
3343 3344 3345 3346 3347 3348 3349
/*
 * cond_resched() and cond_resched_lock(): latency reduction via
 * explicit rescheduling in places that are safe. The return
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 * cond_resched_softirq() will enable bhs before scheduling.
 */
3350
#ifndef CONFIG_PREEMPT
3351
extern int _cond_resched(void);
3352 3353 3354
#else
static inline int _cond_resched(void) { return 0; }
#endif
3355

3356
#define cond_resched() ({			\
3357
	___might_sleep(__FILE__, __LINE__, 0);	\
3358 3359
	_cond_resched();			\
})
3360

3361 3362 3363
extern int __cond_resched_lock(spinlock_t *lock);

#define cond_resched_lock(lock) ({				\
3364
	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
3365 3366 3367 3368 3369
	__cond_resched_lock(lock);				\
})

extern int __cond_resched_softirq(void);

3370
#define cond_resched_softirq() ({					\
3371
	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
3372
	__cond_resched_softirq();					\
3373
})
L
Linus Torvalds 已提交
3374

3375 3376 3377 3378 3379 3380 3381 3382 3383
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
	rcu_read_unlock();
	cond_resched();
	rcu_read_lock();
#endif
}

L
Linus Torvalds 已提交
3384 3385
/*
 * Does a critical section need to be broken due to another
N
Nick Piggin 已提交
3386 3387
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * but a general need for low latency)
L
Linus Torvalds 已提交
3388
 */
N
Nick Piggin 已提交
3389
static inline int spin_needbreak(spinlock_t *lock)
L
Linus Torvalds 已提交
3390
{
N
Nick Piggin 已提交
3391 3392 3393
#ifdef CONFIG_PREEMPT
	return spin_is_contended(lock);
#else
L
Linus Torvalds 已提交
3394
	return 0;
N
Nick Piggin 已提交
3395
#endif
L
Linus Torvalds 已提交
3396 3397
}

3398 3399
/*
 * Idle thread specific functions to determine the need_resched
3400
 * polling state.
3401
 */
3402
#ifdef TIF_POLLING_NRFLAG
3403 3404 3405 3406
static inline int tsk_is_polling(struct task_struct *p)
{
	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
}
3407 3408

static inline void __current_set_polling(void)
3409 3410 3411 3412
{
	set_thread_flag(TIF_POLLING_NRFLAG);
}

3413 3414 3415 3416 3417 3418
static inline bool __must_check current_set_polling_and_test(void)
{
	__current_set_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
3419
	 * paired by resched_curr()
3420
	 */
3421
	smp_mb__after_atomic();
3422 3423 3424 3425 3426

	return unlikely(tif_need_resched());
}

static inline void __current_clr_polling(void)
3427 3428 3429
{
	clear_thread_flag(TIF_POLLING_NRFLAG);
}
3430 3431 3432 3433 3434 3435 3436

static inline bool __must_check current_clr_polling_and_test(void)
{
	__current_clr_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
3437
	 * paired by resched_curr()
3438
	 */
3439
	smp_mb__after_atomic();
3440 3441 3442 3443

	return unlikely(tif_need_resched());
}

3444 3445
#else
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }

static inline bool __must_check current_set_polling_and_test(void)
{
	return unlikely(tif_need_resched());
}
static inline bool __must_check current_clr_polling_and_test(void)
{
	return unlikely(tif_need_resched());
}
3457 3458
#endif

3459 3460 3461 3462 3463 3464 3465 3466 3467 3468
static inline void current_clr_polling(void)
{
	__current_clr_polling();

	/*
	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
	 * Once the bit is cleared, we'll get IPIs with every new
	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
	 * fold.
	 */
3469
	smp_mb(); /* paired with resched_curr() */
3470 3471 3472 3473

	preempt_fold_need_resched();
}

3474 3475 3476 3477 3478
static __always_inline bool need_resched(void)
{
	return unlikely(tif_need_resched());
}

3479 3480 3481
/*
 * Thread group CPU time accounting.
 */
3482
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3483
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3484

R
Roland McGrath 已提交
3485 3486 3487 3488 3489 3490 3491
/*
 * Reevaluate whether the task has signals pending delivery.
 * Wake the task if so.
 * This is required every time the blocked sigset_t changes.
 * callers must hold sighand->siglock.
 */
extern void recalc_sigpending_and_wake(struct task_struct *t);
L
Linus Torvalds 已提交
3492 3493
extern void recalc_sigpending(void);

3494 3495 3496 3497 3498 3499 3500 3501 3502 3503
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);

static inline void signal_wake_up(struct task_struct *t, bool resume)
{
	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
L
Linus Torvalds 已提交
3504 3505 3506 3507 3508 3509 3510 3511

/*
 * Wrappers for p->thread_info->cpu access. No-op on UP.
 */
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
3512 3513 3514
#ifdef CONFIG_THREAD_INFO_IN_TASK
	return p->cpu;
#else
A
Al Viro 已提交
3515
	return task_thread_info(p)->cpu;
3516
#endif
L
Linus Torvalds 已提交
3517 3518
}

I
Ingo Molnar 已提交
3519 3520 3521 3522 3523
static inline int task_node(const struct task_struct *p)
{
	return cpu_to_node(task_cpu(p));
}

I
Ingo Molnar 已提交
3524
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
L
Linus Torvalds 已提交
3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538

#else

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}

#endif /* CONFIG_SMP */

3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550
/*
 * In order to reduce various lock holder preemption latencies provide an
 * interface to see if a vCPU is currently running or not.
 *
 * This allows us to terminate optimistic spin loops and block, analogous to
 * the native optimistic spin heuristic of testing if the lock owner task is
 * running or not.
 */
#ifndef vcpu_is_preempted
# define vcpu_is_preempted(cpu)	false
#endif

3551 3552
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3553

D
Dhaval Giani 已提交
3554
#ifdef CONFIG_CGROUP_SCHED
3555
extern struct task_group root_task_group;
P
Peter Zijlstra 已提交
3556
#endif /* CONFIG_CGROUP_SCHED */
3557

3558 3559 3560
extern int task_can_switch_user(struct user_struct *up,
					struct task_struct *tsk);

3561 3562 3563
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
3564
	tsk->ioac.rchar += amt;
3565 3566 3567 3568
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
3569
	tsk->ioac.wchar += amt;
3570 3571 3572 3573
}

static inline void inc_syscr(struct task_struct *tsk)
{
3574
	tsk->ioac.syscr++;
3575 3576 3577 3578
}

static inline void inc_syscw(struct task_struct *tsk)
{
3579
	tsk->ioac.syscw++;
3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}

static inline void inc_syscr(struct task_struct *tsk)
{
}

static inline void inc_syscw(struct task_struct *tsk)
{
}
#endif

D
Dave Hansen 已提交
3599 3600 3601 3602
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)	TASK_SIZE
#endif

O
Oleg Nesterov 已提交
3603
#ifdef CONFIG_MEMCG
3604 3605 3606 3607 3608
extern void mm_update_next_owner(struct mm_struct *mm);
#else
static inline void mm_update_next_owner(struct mm_struct *mm)
{
}
O
Oleg Nesterov 已提交
3609
#endif /* CONFIG_MEMCG */
3610

3611 3612 3613
static inline unsigned long task_rlimit(const struct task_struct *tsk,
		unsigned int limit)
{
3614
	return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3615 3616 3617 3618 3619
}

static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
		unsigned int limit)
{
3620
	return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632
}

static inline unsigned long rlimit(unsigned int limit)
{
	return task_rlimit(current, limit);
}

static inline unsigned long rlimit_max(unsigned int limit)
{
	return task_rlimit_max(current, limit);
}

3633 3634
#define SCHED_CPUFREQ_RT	(1U << 0)
#define SCHED_CPUFREQ_DL	(1U << 1)
3635
#define SCHED_CPUFREQ_IOWAIT	(1U << 2)
3636 3637 3638

#define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)

3639 3640
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
3641
       void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
3642 3643
};

3644
void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
3645 3646
                       void (*func)(struct update_util_data *data, u64 time,
				    unsigned int flags));
3647
void cpufreq_remove_update_util_hook(int cpu);
3648 3649
#endif /* CONFIG_CPU_FREQ */

L
Linus Torvalds 已提交
3650
#endif