cgroup.h 26.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#ifndef _LINUX_CGROUP_H
#define _LINUX_CGROUP_H
/*
 *  cgroup interface
 *
 *  Copyright (C) 2003 BULL SA
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 */

#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
15
#include <linux/rculist.h>
B
Balbir Singh 已提交
16
#include <linux/cgroupstats.h>
17
#include <linux/prio_heap.h>
18
#include <linux/rwsem.h>
K
KAMEZAWA Hiroyuki 已提交
19
#include <linux/idr.h>
20
#include <linux/workqueue.h>
A
Aristeu Rozanski 已提交
21
#include <linux/xattr.h>
22
#include <linux/fs.h>
23 24 25 26 27 28

#ifdef CONFIG_CGROUPS

struct cgroupfs_root;
struct cgroup_subsys;
struct inode;
29
struct cgroup;
K
KAMEZAWA Hiroyuki 已提交
30
struct css_id;
31
struct eventfd_ctx;
32 33 34

extern int cgroup_init_early(void);
extern int cgroup_init(void);
35
extern void cgroup_fork(struct task_struct *p);
36
extern void cgroup_post_fork(struct task_struct *p);
37
extern void cgroup_exit(struct task_struct *p, int run_callbacks);
B
Balbir Singh 已提交
38 39
extern int cgroupstats_build(struct cgroupstats *stats,
				struct dentry *dentry);
40
extern int cgroup_load_subsys(struct cgroup_subsys *ss);
B
Ben Blum 已提交
41
extern void cgroup_unload_subsys(struct cgroup_subsys *ss);
42

43
extern int proc_cgroup_show(struct seq_file *, void *);
44

45 46 47 48 49
/*
 * Define the enumeration of all cgroup subsystems.
 *
 * We define ids for builtin subsystems and then modular ones.
 */
50 51
#define SUBSYS(_x) _x ## _subsys_id,
enum cgroup_subsys_id {
52
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
53
#include <linux/cgroup_subsys.h>
54 55 56 57 58 59
#undef IS_SUBSYS_ENABLED
	CGROUP_BUILTIN_SUBSYS_COUNT,

	__CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1,

#define IS_SUBSYS_ENABLED(option) IS_MODULE(option)
60
#include <linux/cgroup_subsys.h>
61
#undef IS_SUBSYS_ENABLED
62
	CGROUP_SUBSYS_COUNT,
63 64 65
};
#undef SUBSYS

66 67
/* Per-subsystem/per-cgroup state maintained by the system. */
struct cgroup_subsys_state {
P
Paul Menage 已提交
68 69
	/*
	 * The cgroup that this subsystem is attached to. Useful
70
	 * for subsystems that want to know about the cgroup
P
Paul Menage 已提交
71 72
	 * hierarchy structure
	 */
73 74
	struct cgroup *cgroup;

P
Paul Menage 已提交
75 76
	/*
	 * State maintained by the cgroup system to allow subsystems
P
Paul Menage 已提交
77
	 * to be "busy". Should be accessed via css_get(),
78
	 * css_tryget() and css_put().
P
Paul Menage 已提交
79
	 */
80 81 82 83

	atomic_t refcnt;

	unsigned long flags;
K
KAMEZAWA Hiroyuki 已提交
84
	/* ID for this css, if possible */
A
Arnd Bergmann 已提交
85
	struct css_id __rcu *id;
86 87 88

	/* Used to put @cgroup->dentry on the last css_put() */
	struct work_struct dput_work;
89 90 91 92
};

/* bits in struct cgroup_subsys_state flags field */
enum {
93
	CSS_ROOT	= (1 << 0), /* this CSS is the root of the subsystem */
94
	CSS_ONLINE	= (1 << 1), /* between ->css_online() and ->css_offline() */
95 96
};

97 98 99 100 101
/**
 * css_get - obtain a reference on the specified css
 * @css: target css
 *
 * The caller must already have a reference.
102 103 104 105
 */
static inline void css_get(struct cgroup_subsys_state *css)
{
	/* We don't need to reference count the root state */
106
	if (!(css->flags & CSS_ROOT))
107
		atomic_inc(&css->refcnt);
108
}
P
Paul Menage 已提交
109

110
extern bool __css_tryget(struct cgroup_subsys_state *css);
111 112 113 114 115 116 117 118 119 120 121

/**
 * css_tryget - try to obtain a reference on the specified css
 * @css: target css
 *
 * Obtain a reference on @css if it's alive.  The caller naturally needs to
 * ensure that @css is accessible but doesn't have to be holding a
 * reference on it - IOW, RCU protected access is good enough for this
 * function.  Returns %true if a reference count was successfully obtained;
 * %false otherwise.
 */
P
Paul Menage 已提交
122 123
static inline bool css_tryget(struct cgroup_subsys_state *css)
{
124
	if (css->flags & CSS_ROOT)
P
Paul Menage 已提交
125
		return true;
126
	return __css_tryget(css);
P
Paul Menage 已提交
127 128
}

129
extern void __css_put(struct cgroup_subsys_state *css);
130 131 132 133 134 135 136

/**
 * css_put - put a css reference
 * @css: target css
 *
 * Put a reference obtained via css_get() and css_tryget().
 */
137 138
static inline void css_put(struct cgroup_subsys_state *css)
{
139
	if (!(css->flags & CSS_ROOT))
140
		__css_put(css);
141 142
}

143 144 145
/* bits in struct cgroup flags field */
enum {
	/* Control Group is dead */
146
	CGRP_DEAD,
P
Paul Menage 已提交
147 148 149 150
	/*
	 * Control Group has previously had a child cgroup or a task,
	 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
	 */
151 152 153
	CGRP_RELEASABLE,
	/* Control Group requires release notifications to userspace */
	CGRP_NOTIFY_ON_RELEASE,
154
	/*
155 156 157
	 * Clone the parent's configuration when creating a new child
	 * cpuset cgroup.  For historical reasons, this option can be
	 * specified at mount time and thus is implemented here.
158
	 */
159
	CGRP_CPUSET_CLONE_CHILDREN,
160 161
	/* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */
	CGRP_SANE_BEHAVIOR,
162 163
};

164 165 166
struct cgroup_name {
	struct rcu_head rcu_head;
	char name[];
167 168
};

169 170 171
struct cgroup {
	unsigned long flags;		/* "unsigned long" so bitops work */

P
Paul Menage 已提交
172 173 174 175
	/*
	 * count users of this cgroup. >0 means busy, but doesn't
	 * necessarily indicate the number of tasks in the cgroup
	 */
176 177
	atomic_t count;

T
Tejun Heo 已提交
178 179
	int id;				/* ida allocated in-hierarchy ID */

180 181 182 183 184 185
	/*
	 * We link our 'sibling' struct into our parent's 'children'.
	 * Our children link their 'sibling' into our 'children'.
	 */
	struct list_head sibling;	/* my parent's children */
	struct list_head children;	/* my children */
T
Tejun Heo 已提交
186
	struct list_head files;		/* my files */
187

P
Paul Menage 已提交
188
	struct cgroup *parent;		/* my parent */
189
	struct dentry *dentry;		/* cgroup fs entry, RCU protected */
190

191 192 193 194 195 196 197 198
	/*
	 * Monotonically increasing unique serial number which defines a
	 * uniform order among all cgroups.  It's guaranteed that all
	 * ->children lists are in the ascending order of ->serial_nr.
	 * It's used to allow interrupting and resuming iterations.
	 */
	u64 serial_nr;

199 200 201 202 203 204 205 206 207 208 209 210 211
	/*
	 * This is a copy of dentry->d_name, and it's needed because
	 * we can't use dentry->d_name in cgroup_path().
	 *
	 * You must acquire rcu_read_lock() to access cgrp->name, and
	 * the only place that can change it is rename(), which is
	 * protected by parent dir's i_mutex.
	 *
	 * Normally you should use cgroup_name() wrapper rather than
	 * access it directly.
	 */
	struct cgroup_name __rcu *name;

212 213 214 215
	/* Private pointers for each registered subsystem */
	struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];

	struct cgroupfs_root *root;
216 217

	/*
218 219
	 * List of cgrp_cset_links pointing at css_sets with tasks in this
	 * cgroup.  Protected by css_set_lock.
220
	 */
221
	struct list_head cset_links;
222

223
	struct list_head allcg_node;	/* cgroupfs_root->allcg_list */
224
	struct list_head cft_q_node;	/* used during cftype add/rm */
225

226 227 228 229 230 231
	/*
	 * Linked list running through all cgroups that can
	 * potentially be reaped by the release agent. Protected by
	 * release_list_lock
	 */
	struct list_head release_list;
232

233 234 235 236 237 238
	/*
	 * list of pidlists, up to two for each namespace (one for procs, one
	 * for tasks); created on demand.
	 */
	struct list_head pidlists;
	struct mutex pidlist_mutex;
239 240 241

	/* For RCU-protected deletion */
	struct rcu_head rcu_head;
242
	struct work_struct free_work;
243

L
Lucas De Marchi 已提交
244
	/* List of events which userspace want to receive */
245 246
	struct list_head event_list;
	spinlock_t event_list_lock;
A
Aristeu Rozanski 已提交
247 248 249

	/* directory xattrs */
	struct simple_xattrs xattrs;
250 251
};

252 253 254 255
#define MAX_CGROUP_ROOT_NAMELEN 64

/* cgroupfs_root->flags */
enum {
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	/*
	 * Unfortunately, cgroup core and various controllers are riddled
	 * with idiosyncrasies and pointless options.  The following flag,
	 * when set, will force sane behavior - some options are forced on,
	 * others are disallowed, and some controllers will change their
	 * hierarchical or other behaviors.
	 *
	 * The set of behaviors affected by this flag are still being
	 * determined and developed and the mount option for this flag is
	 * prefixed with __DEVEL__.  The prefix will be dropped once we
	 * reach the point where all behaviors are compatible with the
	 * planned unified hierarchy, which will automatically turn on this
	 * flag.
	 *
	 * The followings are the behaviors currently affected this flag.
	 *
	 * - Mount options "noprefix" and "clone_children" are disallowed.
	 *   Also, cgroupfs file cgroup.clone_children is not created.
	 *
	 * - When mounting an existing superblock, mount options should
	 *   match.
	 *
	 * - Remount is disallowed.
	 *
280 281 282
	 * - memcg: use_hierarchy is on by default and the cgroup file for
	 *   the flag is not created.
	 *
283 284 285 286 287 288 289
	 * The followings are planned changes.
	 *
	 * - release_agent will be disallowed once replacement notification
	 *   mechanism is implemented.
	 */
	CGRP_ROOT_SANE_BEHAVIOR	= (1 << 0),

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	CGRP_ROOT_NOPREFIX	= (1 << 1), /* mounted subsystems have no named prefix */
	CGRP_ROOT_XATTR		= (1 << 2), /* supports extended attributes */
};

/*
 * A cgroupfs_root represents the root of a cgroup hierarchy, and may be
 * associated with a superblock to form an active hierarchy.  This is
 * internal to cgroup core.  Don't access directly from controllers.
 */
struct cgroupfs_root {
	struct super_block *sb;

	/*
	 * The bitmask of subsystems intended to be attached to this
	 * hierarchy
	 */
	unsigned long subsys_mask;

	/* Unique id for this hierarchy. */
	int hierarchy_id;

	/* The bitmask of subsystems currently attached to this hierarchy */
	unsigned long actual_subsys_mask;

	/* A list running through the attached subsystems */
	struct list_head subsys_list;

	/* The root cgroup for this hierarchy */
	struct cgroup top_cgroup;

	/* Tracks how many cgroups are currently defined in hierarchy.*/
	int number_of_cgroups;

	/* A list running through the active hierarchies */
	struct list_head root_list;

	/* All cgroups on this root, cgroup_mutex protected */
	struct list_head allcg_list;

	/* Hierarchy-specific flags */
	unsigned long flags;

	/* IDs for cgroups in this hierarchy */
	struct ida cgroup_ida;

	/* The path to use for release notifications. */
	char release_agent_path[PATH_MAX];

	/* The name for this hierarchy - may be empty */
	char name[MAX_CGROUP_ROOT_NAMELEN];
};

P
Paul Menage 已提交
342 343
/*
 * A css_set is a structure holding pointers to a set of
344 345
 * cgroup_subsys_state objects. This saves space in the task struct
 * object and speeds up fork()/exit(), since a single inc/dec and a
P
Paul Menage 已提交
346 347
 * list_add()/del() can bump the reference count on the entire cgroup
 * set for a task.
348 349 350 351 352
 */

struct css_set {

	/* Reference count */
353
	atomic_t refcount;
354

355 356 357 358 359 360
	/*
	 * List running through all cgroup groups in the same hash
	 * slot. Protected by css_set_lock
	 */
	struct hlist_node hlist;

361 362 363 364 365 366 367
	/*
	 * List running through all tasks using this cgroup
	 * group. Protected by css_set_lock
	 */
	struct list_head tasks;

	/*
368 369
	 * List of cgrp_cset_links pointing at cgroups referenced from this
	 * css_set.  Protected by css_set_lock.
370
	 */
371
	struct list_head cgrp_links;
372 373 374 375

	/*
	 * Set of subsystem states, one for each subsystem. This array
	 * is immutable after creation apart from the init_css_set
B
Ben Blum 已提交
376 377
	 * during subsystem registration (at boot time) and modular subsystem
	 * loading/unloading.
378 379
	 */
	struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
380 381 382

	/* For RCU-protected deletion */
	struct rcu_head rcu_head;
383 384
};

385 386 387 388 389 390 391 392 393 394
/*
 * cgroup_map_cb is an abstract callback API for reporting map-valued
 * control files
 */

struct cgroup_map_cb {
	int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
	void *state;
};

P
Paul Menage 已提交
395 396
/*
 * struct cftype: handler definitions for cgroup control files
397 398
 *
 * When reading/writing to a file:
L
Li Zefan 已提交
399
 *	- the cgroup to use is file->f_dentry->d_parent->d_fsdata
400 401 402
 *	- the 'cftype' of the file is file->f_dentry->d_fsdata
 */

403 404
/* cftype->flags */
#define CFTYPE_ONLY_ON_ROOT	(1U << 0)	/* only create on root cg */
405
#define CFTYPE_NOT_ON_ROOT	(1U << 1)	/* don't create on root cg */
406
#define CFTYPE_INSANE		(1U << 2)	/* don't create if sane_behavior */
407 408 409

#define MAX_CFTYPE_NAME		64

410
struct cftype {
P
Paul Menage 已提交
411 412
	/*
	 * By convention, the name should begin with the name of the
413 414
	 * subsystem, followed by a period.  Zero length string indicates
	 * end of cftype array.
P
Paul Menage 已提交
415
	 */
416 417
	char name[MAX_CFTYPE_NAME];
	int private;
L
Li Zefan 已提交
418 419 420 421
	/*
	 * If not 0, file mode is set to this value, otherwise it will
	 * be figured out automatically
	 */
A
Al Viro 已提交
422
	umode_t mode;
423 424 425 426 427 428 429

	/*
	 * If non-zero, defines the maximum length of string that can
	 * be passed to write_string; defaults to 64
	 */
	size_t max_write_len;

430 431 432
	/* CFTYPE_* flags */
	unsigned int flags;

433 434 435 436
	int (*open)(struct inode *inode, struct file *file);
	ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
			struct file *file,
			char __user *buf, size_t nbytes, loff_t *ppos);
437
	/*
438
	 * read_u64() is a shortcut for the common case of returning a
439 440
	 * single integer. Use it in place of read()
	 */
441
	u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
442 443 444
	/*
	 * read_s64() is a signed version of read_u64()
	 */
445
	s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
446 447 448 449 450 451
	/*
	 * read_map() is used for defining a map of key/value
	 * pairs. It should call cb->fill(cb, key, value) for each
	 * entry. The key/value pairs (and their ordering) should not
	 * change between reboots.
	 */
452 453
	int (*read_map)(struct cgroup *cont, struct cftype *cft,
			struct cgroup_map_cb *cb);
454 455 456 457
	/*
	 * read_seq_string() is used for outputting a simple sequence
	 * using seqfile.
	 */
458 459
	int (*read_seq_string)(struct cgroup *cont, struct cftype *cft,
			       struct seq_file *m);
460

461 462 463
	ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
			 struct file *file,
			 const char __user *buf, size_t nbytes, loff_t *ppos);
464 465

	/*
466
	 * write_u64() is a shortcut for the common case of accepting
467 468 469
	 * a single integer (as parsed by simple_strtoull) from
	 * userspace. Use in place of write(); return 0 or error.
	 */
470
	int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
471 472 473
	/*
	 * write_s64() is a signed version of write_u64()
	 */
474
	int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
475

476 477 478 479 480 481 482
	/*
	 * write_string() is passed a nul-terminated kernelspace
	 * buffer of maximum length determined by max_write_len.
	 * Returns 0 or -ve error code.
	 */
	int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
			    const char *buffer);
483 484 485 486 487 488 489 490
	/*
	 * trigger() callback can be used to get some kick from the
	 * userspace, when the actual string written is not important
	 * at all. The private field can be used to determine the
	 * kick type for multiplexing.
	 */
	int (*trigger)(struct cgroup *cgrp, unsigned int event);

491
	int (*release)(struct inode *inode, struct file *file);
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506

	/*
	 * register_event() callback will be used to add new userspace
	 * waiter for changes related to the cftype. Implement it if
	 * you want to provide this functionality. Use eventfd_signal()
	 * on eventfd to send notification to userspace.
	 */
	int (*register_event)(struct cgroup *cgrp, struct cftype *cft,
			struct eventfd_ctx *eventfd, const char *args);
	/*
	 * unregister_event() callback will be called when userspace
	 * closes the eventfd or on cgroup removing.
	 * This callback must be implemented, if you want provide
	 * notification functionality.
	 */
507
	void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
508
			struct eventfd_ctx *eventfd);
509 510
};

511 512 513 514 515 516 517
/*
 * cftype_sets describe cftypes belonging to a subsystem and are chained at
 * cgroup_subsys->cftsets.  Each cftset points to an array of cftypes
 * terminated by zero length name.
 */
struct cftype_set {
	struct list_head		node;	/* chained at subsys->cftsets */
A
Aristeu Rozanski 已提交
518
	struct cftype			*cfts;
519 520
};

521 522 523 524 525 526
struct cgroup_scanner {
	struct cgroup *cg;
	int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
	void (*process_task)(struct task_struct *p,
			struct cgroup_scanner *scan);
	struct ptr_heap *heap;
527
	void *data;
528 529
};

530 531 532 533 534 535 536 537 538
/*
 * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details.  This
 * function can be called as long as @cgrp is accessible.
 */
static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
{
	return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
}

539 540 541 542 543 544
/* Caller should hold rcu_read_lock() */
static inline const char *cgroup_name(const struct cgroup *cgrp)
{
	return rcu_dereference(cgrp->name)->name;
}

A
Aristeu Rozanski 已提交
545 546
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
547

548
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
549

L
Li Zefan 已提交
550
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
551 552
int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id,
				    char *buf, size_t buflen);
553

L
Li Zefan 已提交
554
int cgroup_task_count(const struct cgroup *cgrp);
555

556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
/*
 * Control Group taskset, used to pass around set of tasks to cgroup_subsys
 * methods.
 */
struct cgroup_taskset;
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
int cgroup_taskset_size(struct cgroup_taskset *tset);

/**
 * cgroup_taskset_for_each - iterate cgroup_taskset
 * @task: the loop cursor
 * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
 * @tset: taskset to iterate
 */
#define cgroup_taskset_for_each(task, skip_cgrp, tset)			\
	for ((task) = cgroup_taskset_first((tset)); (task);		\
	     (task) = cgroup_taskset_next((tset)))			\
		if (!(skip_cgrp) ||					\
		    cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))

578 579 580 581
/*
 * Control Group subsystem type.
 * See Documentation/cgroups/cgroups.txt for details
 */
582 583

struct cgroup_subsys {
584 585 586 587 588
	struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp);
	int (*css_online)(struct cgroup *cgrp);
	void (*css_offline)(struct cgroup *cgrp);
	void (*css_free)(struct cgroup *cgrp);

589 590 591 592 593 594 595
	int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
	void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
	void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
	void (*fork)(struct task_struct *task);
	void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
		     struct task_struct *task);
	void (*bind)(struct cgroup *root);
596

597
	int subsys_id;
598
	int disabled;
599
	int early_init;
K
KAMEZAWA Hiroyuki 已提交
600 601 602 603 604
	/*
	 * True if this subsys uses ID. ID is not available before cgroup_init()
	 * (not available in early_init time.)
	 */
	bool use_id;
605

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
	/*
	 * If %false, this subsystem is properly hierarchical -
	 * configuration, resource accounting and restriction on a parent
	 * cgroup cover those of its children.  If %true, hierarchy support
	 * is broken in some ways - some subsystems ignore hierarchy
	 * completely while others are only implemented half-way.
	 *
	 * It's now disallowed to create nested cgroups if the subsystem is
	 * broken and cgroup core will emit a warning message on such
	 * cases.  Eventually, all subsystems will be made properly
	 * hierarchical and this will go away.
	 */
	bool broken_hierarchy;
	bool warned_broken_hierarchy;

621 622 623
#define MAX_CGROUP_TYPE_NAMELEN 32
	const char *name;

624 625
	/*
	 * Link to parent, and list entry in parent's children.
L
Li Zefan 已提交
626
	 * Protected by cgroup_lock()
627 628
	 */
	struct cgroupfs_root *root;
629
	struct list_head sibling;
K
KAMEZAWA Hiroyuki 已提交
630 631
	/* used when use_id == true */
	struct idr idr;
632
	spinlock_t id_lock;
633

634 635 636 637 638 639 640
	/* list of cftype_sets */
	struct list_head cftsets;

	/* base cftypes, automatically [de]registered with subsys itself */
	struct cftype *base_cftypes;
	struct cftype_set base_cftset;

641 642
	/* should be defined only by modular subsystems */
	struct module *module;
643 644 645
};

#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
646
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
647
#include <linux/cgroup_subsys.h>
648
#undef IS_SUBSYS_ENABLED
649 650 651
#undef SUBSYS

static inline struct cgroup_subsys_state *cgroup_subsys_state(
L
Li Zefan 已提交
652
	struct cgroup *cgrp, int subsys_id)
653
{
L
Li Zefan 已提交
654
	return cgrp->subsys[subsys_id];
655 656
}

657 658 659 660 661
/*
 * function to get the cgroup_subsys_state which allows for extra
 * rcu_dereference_check() conditions, such as locks used during the
 * cgroup_subsys::attach() methods.
 */
T
Tejun Heo 已提交
662 663
#ifdef CONFIG_PROVE_RCU
extern struct mutex cgroup_mutex;
664
#define task_subsys_state_check(task, subsys_id, __c)			\
T
Tejun Heo 已提交
665 666 667 668
	rcu_dereference_check((task)->cgroups->subsys[(subsys_id)],	\
			      lockdep_is_held(&(task)->alloc_lock) ||	\
			      lockdep_is_held(&cgroup_mutex) || (__c))
#else
669
#define task_subsys_state_check(task, subsys_id, __c)			\
T
Tejun Heo 已提交
670 671
	rcu_dereference((task)->cgroups->subsys[(subsys_id)])
#endif
672 673 674

static inline struct cgroup_subsys_state *
task_subsys_state(struct task_struct *task, int subsys_id)
675
{
676
	return task_subsys_state_check(task, subsys_id, false);
677 678 679 680 681 682 683 684
}

static inline struct cgroup* task_cgroup(struct task_struct *task,
					       int subsys_id)
{
	return task_subsys_state(task, subsys_id)->cgroup;
}

685 686
struct cgroup *cgroup_next_sibling(struct cgroup *pos);

687 688 689
/**
 * cgroup_for_each_child - iterate through children of a cgroup
 * @pos: the cgroup * to use as the loop cursor
690
 * @cgrp: cgroup whose children to walk
691
 *
692
 * Walk @cgrp's children.  Must be called under rcu_read_lock().  A child
693 694
 * cgroup which hasn't finished ->css_online() or already has finished
 * ->css_offline() may show up during traversal and it's each subsystem's
695 696
 * responsibility to verify that each @pos is alive.
 *
697 698 699
 * If a subsystem synchronizes against the parent in its ->css_online() and
 * before starting iterating, a cgroup which finished ->css_online() is
 * guaranteed to be visible in the future iterations.
700 701 702 703
 *
 * It is allowed to temporarily drop RCU read lock during iteration.  The
 * caller is responsible for ensuring that @pos remains accessible until
 * the start of the next iteration by, for example, bumping the css refcnt.
704
 */
705 706 707 708
#define cgroup_for_each_child(pos, cgrp)				\
	for ((pos) = list_first_or_null_rcu(&(cgrp)->children,		\
					    struct cgroup, sibling);	\
	     (pos); (pos) = cgroup_next_sibling((pos)))
709 710 711

struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
					  struct cgroup *cgroup);
712
struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
713 714 715 716 717 718 719

/**
 * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
 * @pos: the cgroup * to use as the loop cursor
 * @cgroup: cgroup whose descendants to walk
 *
 * Walk @cgroup's descendants.  Must be called under rcu_read_lock().  A
720 721
 * descendant cgroup which hasn't finished ->css_online() or already has
 * finished ->css_offline() may show up during traversal and it's each
722 723
 * subsystem's responsibility to verify that each @pos is alive.
 *
724 725
 * If a subsystem synchronizes against the parent in its ->css_online() and
 * before starting iterating, and synchronizes against @pos on each
726
 * iteration, any descendant cgroup which finished ->css_online() is
727 728 729 730 731
 * guaranteed to be visible in the future iterations.
 *
 * In other words, the following guarantees that a descendant can't escape
 * state updates of its ancestors.
 *
732
 * my_online(@cgrp)
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
 * {
 *	Lock @cgrp->parent and @cgrp;
 *	Inherit state from @cgrp->parent;
 *	Unlock both.
 * }
 *
 * my_update_state(@cgrp)
 * {
 *	Lock @cgrp;
 *	Update @cgrp's state;
 *	Unlock @cgrp;
 *
 *	cgroup_for_each_descendant_pre(@pos, @cgrp) {
 *		Lock @pos;
 *		Verify @pos is alive and inherit state from @pos->parent;
 *		Unlock @pos;
 *	}
 * }
 *
 * As long as the inheriting step, including checking the parent state, is
 * enclosed inside @pos locking, double-locking the parent isn't necessary
 * while inheriting.  The state update to the parent is guaranteed to be
 * visible by walking order and, as long as inheriting operations to the
 * same @pos are atomic to each other, multiple updates racing each other
 * still result in the correct state.  It's guaranateed that at least one
 * inheritance happens for any cgroup after the latest update to its
 * parent.
 *
 * If checking parent's state requires locking the parent, each inheriting
 * iteration should lock and unlock both @pos->parent and @pos.
 *
 * Alternatively, a subsystem may choose to use a single global lock to
765
 * synchronize ->css_online() and ->css_offline() against tree-walking
766
 * operations.
767 768 769 770
 *
 * It is allowed to temporarily drop RCU read lock during iteration.  The
 * caller is responsible for ensuring that @pos remains accessible until
 * the start of the next iteration by, for example, bumping the css refcnt.
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
 */
#define cgroup_for_each_descendant_pre(pos, cgroup)			\
	for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos);	\
	     pos = cgroup_next_descendant_pre((pos), (cgroup)))

struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
					   struct cgroup *cgroup);

/**
 * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants
 * @pos: the cgroup * to use as the loop cursor
 * @cgroup: cgroup whose descendants to walk
 *
 * Similar to cgroup_for_each_descendant_pre() but performs post-order
 * traversal instead.  Note that the walk visibility guarantee described in
 * pre-order walk doesn't apply the same to post-order walks.
 */
#define cgroup_for_each_descendant_post(pos, cgroup)			\
	for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos);	\
	     pos = cgroup_next_descendant_post((pos), (cgroup)))

792 793
/* A cgroup_iter should be treated as an opaque object */
struct cgroup_iter {
794
	struct list_head *cset_link;
795 796 797
	struct list_head *task;
};

P
Paul Menage 已提交
798 799
/*
 * To iterate across the tasks in a cgroup:
800
 *
801
 * 1) call cgroup_iter_start to initialize an iterator
802 803 804 805 806
 *
 * 2) call cgroup_iter_next() to retrieve member tasks until it
 *    returns NULL or until you want to end the iteration
 *
 * 3) call cgroup_iter_end() to destroy the iterator.
807
 *
P
Paul Menage 已提交
808 809 810 811
 * Or, call cgroup_scan_tasks() to iterate through every task in a
 * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
 * the test_task() callback, but not while calling the process_task()
 * callback.
812
 */
L
Li Zefan 已提交
813 814
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
815
					struct cgroup_iter *it);
L
Li Zefan 已提交
816
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
817
int cgroup_scan_tasks(struct cgroup_scanner *scan);
M
Michael S. Tsirkin 已提交
818
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
819
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
M
Michael S. Tsirkin 已提交
820

K
KAMEZAWA Hiroyuki 已提交
821 822 823 824 825 826 827 828
/*
 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
 * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
 * CSS ID is assigned at cgroup allocation (create) automatically
 * and removed when subsys calls free_css_id() function. This is because
 * the lifetime of cgroup_subsys_state is subsys's matter.
 *
 * Looking up and scanning function should be called under rcu_read_lock().
L
Li Zefan 已提交
829
 * Taking cgroup_mutex is not necessary for following calls.
K
KAMEZAWA Hiroyuki 已提交
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
 * But the css returned by this routine can be "not populated yet" or "being
 * destroyed". The caller should check css and cgroup's status.
 */

/*
 * Typically Called at ->destroy(), or somewhere the subsys frees
 * cgroup_subsys_state.
 */
void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);

/* Find a cgroup_subsys_state which has given ID */

struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);

/* Returns true if root is ancestor of cg */
bool css_is_ancestor(struct cgroup_subsys_state *cg,
846
		     const struct cgroup_subsys_state *root);
K
KAMEZAWA Hiroyuki 已提交
847 848 849

/* Get id and depth of css */
unsigned short css_id(struct cgroup_subsys_state *css);
S
Stephane Eranian 已提交
850
struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
K
KAMEZAWA Hiroyuki 已提交
851

852 853 854 855
#else /* !CONFIG_CGROUPS */

static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
856
static inline void cgroup_fork(struct task_struct *p) {}
857
static inline void cgroup_post_fork(struct task_struct *p) {}
858
static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
859

B
Balbir Singh 已提交
860 861 862 863 864
static inline int cgroupstats_build(struct cgroupstats *stats,
					struct dentry *dentry)
{
	return -EINVAL;
}
865

866
/* No cgroups - nothing to do */
M
Michael S. Tsirkin 已提交
867 868 869 870 871
static inline int cgroup_attach_task_all(struct task_struct *from,
					 struct task_struct *t)
{
	return 0;
}
872

873 874 875
#endif /* !CONFIG_CGROUPS */

#endif /* _LINUX_CGROUP_H */