cgroup.h 26.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#ifndef _LINUX_CGROUP_H
#define _LINUX_CGROUP_H
/*
 *  cgroup interface
 *
 *  Copyright (C) 2003 BULL SA
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 */

#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
15
#include <linux/rculist.h>
B
Balbir Singh 已提交
16
#include <linux/cgroupstats.h>
17
#include <linux/prio_heap.h>
18
#include <linux/rwsem.h>
K
KAMEZAWA Hiroyuki 已提交
19
#include <linux/idr.h>
20
#include <linux/workqueue.h>
A
Aristeu Rozanski 已提交
21
#include <linux/xattr.h>
22
#include <linux/fs.h>
23
#include <linux/percpu-refcount.h>
24 25 26 27 28 29

#ifdef CONFIG_CGROUPS

struct cgroupfs_root;
struct cgroup_subsys;
struct inode;
30
struct cgroup;
K
KAMEZAWA Hiroyuki 已提交
31
struct css_id;
32
struct eventfd_ctx;
33 34 35

extern int cgroup_init_early(void);
extern int cgroup_init(void);
36
extern void cgroup_fork(struct task_struct *p);
37
extern void cgroup_post_fork(struct task_struct *p);
38
extern void cgroup_exit(struct task_struct *p, int run_callbacks);
B
Balbir Singh 已提交
39 40
extern int cgroupstats_build(struct cgroupstats *stats,
				struct dentry *dentry);
41
extern int cgroup_load_subsys(struct cgroup_subsys *ss);
B
Ben Blum 已提交
42
extern void cgroup_unload_subsys(struct cgroup_subsys *ss);
43

44
extern int proc_cgroup_show(struct seq_file *, void *);
45

46 47 48 49 50
/*
 * Define the enumeration of all cgroup subsystems.
 *
 * We define ids for builtin subsystems and then modular ones.
 */
51 52
#define SUBSYS(_x) _x ## _subsys_id,
enum cgroup_subsys_id {
53
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
54
#include <linux/cgroup_subsys.h>
55 56 57 58 59 60
#undef IS_SUBSYS_ENABLED
	CGROUP_BUILTIN_SUBSYS_COUNT,

	__CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1,

#define IS_SUBSYS_ENABLED(option) IS_MODULE(option)
61
#include <linux/cgroup_subsys.h>
62
#undef IS_SUBSYS_ENABLED
63
	CGROUP_SUBSYS_COUNT,
64 65 66
};
#undef SUBSYS

67 68
/* Per-subsystem/per-cgroup state maintained by the system. */
struct cgroup_subsys_state {
P
Paul Menage 已提交
69 70
	/*
	 * The cgroup that this subsystem is attached to. Useful
71
	 * for subsystems that want to know about the cgroup
P
Paul Menage 已提交
72 73
	 * hierarchy structure
	 */
74 75
	struct cgroup *cgroup;

76 77
	/* reference count - access via css_[try]get() and css_put() */
	struct percpu_ref refcnt;
78 79

	unsigned long flags;
K
KAMEZAWA Hiroyuki 已提交
80
	/* ID for this css, if possible */
A
Arnd Bergmann 已提交
81
	struct css_id __rcu *id;
82 83 84

	/* Used to put @cgroup->dentry on the last css_put() */
	struct work_struct dput_work;
85 86 87 88
};

/* bits in struct cgroup_subsys_state flags field */
enum {
89
	CSS_ROOT	= (1 << 0), /* this CSS is the root of the subsystem */
90
	CSS_ONLINE	= (1 << 1), /* between ->css_online() and ->css_offline() */
91 92
};

93 94 95 96 97
/**
 * css_get - obtain a reference on the specified css
 * @css: target css
 *
 * The caller must already have a reference.
98 99 100 101
 */
static inline void css_get(struct cgroup_subsys_state *css)
{
	/* We don't need to reference count the root state */
102
	if (!(css->flags & CSS_ROOT))
103
		percpu_ref_get(&css->refcnt);
104
}
P
Paul Menage 已提交
105

106 107 108 109 110 111 112 113 114 115
/**
 * css_tryget - try to obtain a reference on the specified css
 * @css: target css
 *
 * Obtain a reference on @css if it's alive.  The caller naturally needs to
 * ensure that @css is accessible but doesn't have to be holding a
 * reference on it - IOW, RCU protected access is good enough for this
 * function.  Returns %true if a reference count was successfully obtained;
 * %false otherwise.
 */
P
Paul Menage 已提交
116 117
static inline bool css_tryget(struct cgroup_subsys_state *css)
{
118
	if (css->flags & CSS_ROOT)
P
Paul Menage 已提交
119
		return true;
120
	return percpu_ref_tryget(&css->refcnt);
P
Paul Menage 已提交
121 122
}

123 124 125 126 127 128
/**
 * css_put - put a css reference
 * @css: target css
 *
 * Put a reference obtained via css_get() and css_tryget().
 */
129 130
static inline void css_put(struct cgroup_subsys_state *css)
{
131
	if (!(css->flags & CSS_ROOT))
132
		percpu_ref_put(&css->refcnt);
133 134
}

135 136 137
/* bits in struct cgroup flags field */
enum {
	/* Control Group is dead */
138
	CGRP_DEAD,
P
Paul Menage 已提交
139 140 141 142
	/*
	 * Control Group has previously had a child cgroup or a task,
	 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
	 */
143 144 145
	CGRP_RELEASABLE,
	/* Control Group requires release notifications to userspace */
	CGRP_NOTIFY_ON_RELEASE,
146
	/*
147 148 149
	 * Clone the parent's configuration when creating a new child
	 * cpuset cgroup.  For historical reasons, this option can be
	 * specified at mount time and thus is implemented here.
150
	 */
151
	CGRP_CPUSET_CLONE_CHILDREN,
152 153
	/* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */
	CGRP_SANE_BEHAVIOR,
154 155
};

156 157 158
struct cgroup_name {
	struct rcu_head rcu_head;
	char name[];
159 160
};

161 162 163
struct cgroup {
	unsigned long flags;		/* "unsigned long" so bitops work */

T
Tejun Heo 已提交
164 165
	int id;				/* ida allocated in-hierarchy ID */

166 167 168 169 170 171
	/*
	 * We link our 'sibling' struct into our parent's 'children'.
	 * Our children link their 'sibling' into our 'children'.
	 */
	struct list_head sibling;	/* my parent's children */
	struct list_head children;	/* my children */
T
Tejun Heo 已提交
172
	struct list_head files;		/* my files */
173

P
Paul Menage 已提交
174
	struct cgroup *parent;		/* my parent */
175
	struct dentry *dentry;		/* cgroup fs entry, RCU protected */
176

177 178 179 180 181 182 183 184
	/*
	 * Monotonically increasing unique serial number which defines a
	 * uniform order among all cgroups.  It's guaranteed that all
	 * ->children lists are in the ascending order of ->serial_nr.
	 * It's used to allow interrupting and resuming iterations.
	 */
	u64 serial_nr;

185 186 187 188 189 190 191 192 193 194 195 196 197
	/*
	 * This is a copy of dentry->d_name, and it's needed because
	 * we can't use dentry->d_name in cgroup_path().
	 *
	 * You must acquire rcu_read_lock() to access cgrp->name, and
	 * the only place that can change it is rename(), which is
	 * protected by parent dir's i_mutex.
	 *
	 * Normally you should use cgroup_name() wrapper rather than
	 * access it directly.
	 */
	struct cgroup_name __rcu *name;

198 199 200 201
	/* Private pointers for each registered subsystem */
	struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];

	struct cgroupfs_root *root;
202 203

	/*
204 205
	 * List of cgrp_cset_links pointing at css_sets with tasks in this
	 * cgroup.  Protected by css_set_lock.
206
	 */
207
	struct list_head cset_links;
208 209 210 211 212 213 214

	/*
	 * Linked list running through all cgroups that can
	 * potentially be reaped by the release agent. Protected by
	 * release_list_lock
	 */
	struct list_head release_list;
215

216 217 218 219 220 221
	/*
	 * list of pidlists, up to two for each namespace (one for procs, one
	 * for tasks); created on demand.
	 */
	struct list_head pidlists;
	struct mutex pidlist_mutex;
222

223
	/* For css percpu_ref killing and RCU-protected deletion */
224
	struct rcu_head rcu_head;
225
	struct work_struct destroy_work;
226
	atomic_t css_kill_cnt;
227

L
Lucas De Marchi 已提交
228
	/* List of events which userspace want to receive */
229 230
	struct list_head event_list;
	spinlock_t event_list_lock;
A
Aristeu Rozanski 已提交
231 232 233

	/* directory xattrs */
	struct simple_xattrs xattrs;
234 235
};

236 237 238 239
#define MAX_CGROUP_ROOT_NAMELEN 64

/* cgroupfs_root->flags */
enum {
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
	/*
	 * Unfortunately, cgroup core and various controllers are riddled
	 * with idiosyncrasies and pointless options.  The following flag,
	 * when set, will force sane behavior - some options are forced on,
	 * others are disallowed, and some controllers will change their
	 * hierarchical or other behaviors.
	 *
	 * The set of behaviors affected by this flag are still being
	 * determined and developed and the mount option for this flag is
	 * prefixed with __DEVEL__.  The prefix will be dropped once we
	 * reach the point where all behaviors are compatible with the
	 * planned unified hierarchy, which will automatically turn on this
	 * flag.
	 *
	 * The followings are the behaviors currently affected this flag.
	 *
	 * - Mount options "noprefix" and "clone_children" are disallowed.
	 *   Also, cgroupfs file cgroup.clone_children is not created.
	 *
	 * - When mounting an existing superblock, mount options should
	 *   match.
	 *
	 * - Remount is disallowed.
	 *
264 265
	 * - "tasks" is removed.  Everything should be at process
	 *   granularity.  Use "cgroup.procs" instead.
266
	 *
267 268
	 * - "release_agent" and "notify_on_release" are removed.
	 *   Replacement notification mechanism will be implemented.
269
	 *
270 271
	 * - rename(2) is disallowed.
	 *
272 273
	 * - memcg: use_hierarchy is on by default and the cgroup file for
	 *   the flag is not created.
274 275 276
	 */
	CGRP_ROOT_SANE_BEHAVIOR	= (1 << 0),

277 278 279 280 281 282 283 284 285 286 287 288
	CGRP_ROOT_NOPREFIX	= (1 << 1), /* mounted subsystems have no named prefix */
	CGRP_ROOT_XATTR		= (1 << 2), /* supports extended attributes */
};

/*
 * A cgroupfs_root represents the root of a cgroup hierarchy, and may be
 * associated with a superblock to form an active hierarchy.  This is
 * internal to cgroup core.  Don't access directly from controllers.
 */
struct cgroupfs_root {
	struct super_block *sb;

289
	/* The bitmask of subsystems attached to this hierarchy */
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	unsigned long subsys_mask;

	/* Unique id for this hierarchy. */
	int hierarchy_id;

	/* A list running through the attached subsystems */
	struct list_head subsys_list;

	/* The root cgroup for this hierarchy */
	struct cgroup top_cgroup;

	/* Tracks how many cgroups are currently defined in hierarchy.*/
	int number_of_cgroups;

	/* A list running through the active hierarchies */
	struct list_head root_list;

	/* Hierarchy-specific flags */
	unsigned long flags;

	/* IDs for cgroups in this hierarchy */
	struct ida cgroup_ida;

	/* The path to use for release notifications. */
	char release_agent_path[PATH_MAX];

	/* The name for this hierarchy - may be empty */
	char name[MAX_CGROUP_ROOT_NAMELEN];
};

P
Paul Menage 已提交
320 321
/*
 * A css_set is a structure holding pointers to a set of
322 323
 * cgroup_subsys_state objects. This saves space in the task struct
 * object and speeds up fork()/exit(), since a single inc/dec and a
P
Paul Menage 已提交
324 325
 * list_add()/del() can bump the reference count on the entire cgroup
 * set for a task.
326 327 328 329 330
 */

struct css_set {

	/* Reference count */
331
	atomic_t refcount;
332

333 334 335 336 337 338
	/*
	 * List running through all cgroup groups in the same hash
	 * slot. Protected by css_set_lock
	 */
	struct hlist_node hlist;

339 340 341 342 343 344 345
	/*
	 * List running through all tasks using this cgroup
	 * group. Protected by css_set_lock
	 */
	struct list_head tasks;

	/*
346 347
	 * List of cgrp_cset_links pointing at cgroups referenced from this
	 * css_set.  Protected by css_set_lock.
348
	 */
349
	struct list_head cgrp_links;
350 351 352 353

	/*
	 * Set of subsystem states, one for each subsystem. This array
	 * is immutable after creation apart from the init_css_set
B
Ben Blum 已提交
354 355
	 * during subsystem registration (at boot time) and modular subsystem
	 * loading/unloading.
356 357
	 */
	struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
358 359 360

	/* For RCU-protected deletion */
	struct rcu_head rcu_head;
361 362
};

363 364 365 366 367 368 369 370 371 372
/*
 * cgroup_map_cb is an abstract callback API for reporting map-valued
 * control files
 */

struct cgroup_map_cb {
	int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
	void *state;
};

P
Paul Menage 已提交
373 374
/*
 * struct cftype: handler definitions for cgroup control files
375 376
 *
 * When reading/writing to a file:
L
Li Zefan 已提交
377
 *	- the cgroup to use is file->f_dentry->d_parent->d_fsdata
378 379 380
 *	- the 'cftype' of the file is file->f_dentry->d_fsdata
 */

381
/* cftype->flags */
382 383 384 385 386
enum {
	CFTYPE_ONLY_ON_ROOT	= (1 << 0),	/* only create on root cg */
	CFTYPE_NOT_ON_ROOT	= (1 << 1),	/* don't create on root cg */
	CFTYPE_INSANE		= (1 << 2),	/* don't create if sane_behavior */
};
387 388 389

#define MAX_CFTYPE_NAME		64

390
struct cftype {
P
Paul Menage 已提交
391 392
	/*
	 * By convention, the name should begin with the name of the
393 394
	 * subsystem, followed by a period.  Zero length string indicates
	 * end of cftype array.
P
Paul Menage 已提交
395
	 */
396 397
	char name[MAX_CFTYPE_NAME];
	int private;
L
Li Zefan 已提交
398 399 400 401
	/*
	 * If not 0, file mode is set to this value, otherwise it will
	 * be figured out automatically
	 */
A
Al Viro 已提交
402
	umode_t mode;
403 404 405 406 407 408 409

	/*
	 * If non-zero, defines the maximum length of string that can
	 * be passed to write_string; defaults to 64
	 */
	size_t max_write_len;

410 411 412
	/* CFTYPE_* flags */
	unsigned int flags;

413 414 415 416
	int (*open)(struct inode *inode, struct file *file);
	ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
			struct file *file,
			char __user *buf, size_t nbytes, loff_t *ppos);
417
	/*
418
	 * read_u64() is a shortcut for the common case of returning a
419 420
	 * single integer. Use it in place of read()
	 */
421
	u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
422 423 424
	/*
	 * read_s64() is a signed version of read_u64()
	 */
425
	s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
426 427 428 429 430 431
	/*
	 * read_map() is used for defining a map of key/value
	 * pairs. It should call cb->fill(cb, key, value) for each
	 * entry. The key/value pairs (and their ordering) should not
	 * change between reboots.
	 */
L
Li Zefan 已提交
432
	int (*read_map)(struct cgroup *cgrp, struct cftype *cft,
433
			struct cgroup_map_cb *cb);
434 435 436 437
	/*
	 * read_seq_string() is used for outputting a simple sequence
	 * using seqfile.
	 */
L
Li Zefan 已提交
438
	int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft,
439
			       struct seq_file *m);
440

441 442 443
	ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
			 struct file *file,
			 const char __user *buf, size_t nbytes, loff_t *ppos);
444 445

	/*
446
	 * write_u64() is a shortcut for the common case of accepting
447 448 449
	 * a single integer (as parsed by simple_strtoull) from
	 * userspace. Use in place of write(); return 0 or error.
	 */
450
	int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
451 452 453
	/*
	 * write_s64() is a signed version of write_u64()
	 */
454
	int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
455

456 457 458 459 460 461 462
	/*
	 * write_string() is passed a nul-terminated kernelspace
	 * buffer of maximum length determined by max_write_len.
	 * Returns 0 or -ve error code.
	 */
	int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
			    const char *buffer);
463 464 465 466 467 468 469 470
	/*
	 * trigger() callback can be used to get some kick from the
	 * userspace, when the actual string written is not important
	 * at all. The private field can be used to determine the
	 * kick type for multiplexing.
	 */
	int (*trigger)(struct cgroup *cgrp, unsigned int event);

471
	int (*release)(struct inode *inode, struct file *file);
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486

	/*
	 * register_event() callback will be used to add new userspace
	 * waiter for changes related to the cftype. Implement it if
	 * you want to provide this functionality. Use eventfd_signal()
	 * on eventfd to send notification to userspace.
	 */
	int (*register_event)(struct cgroup *cgrp, struct cftype *cft,
			struct eventfd_ctx *eventfd, const char *args);
	/*
	 * unregister_event() callback will be called when userspace
	 * closes the eventfd or on cgroup removing.
	 * This callback must be implemented, if you want provide
	 * notification functionality.
	 */
487
	void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
488
			struct eventfd_ctx *eventfd);
489 490
};

491 492 493 494 495 496 497
/*
 * cftype_sets describe cftypes belonging to a subsystem and are chained at
 * cgroup_subsys->cftsets.  Each cftset points to an array of cftypes
 * terminated by zero length name.
 */
struct cftype_set {
	struct list_head		node;	/* chained at subsys->cftsets */
A
Aristeu Rozanski 已提交
498
	struct cftype			*cfts;
499 500
};

501 502 503 504 505 506
struct cgroup_scanner {
	struct cgroup *cg;
	int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
	void (*process_task)(struct task_struct *p,
			struct cgroup_scanner *scan);
	struct ptr_heap *heap;
507
	void *data;
508 509
};

510 511 512 513 514 515 516 517 518
/*
 * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details.  This
 * function can be called as long as @cgrp is accessible.
 */
static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
{
	return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
}

519 520 521 522 523 524
/* Caller should hold rcu_read_lock() */
static inline const char *cgroup_name(const struct cgroup *cgrp)
{
	return rcu_dereference(cgrp->name)->name;
}

A
Aristeu Rozanski 已提交
525 526
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
527

528
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
529

L
Li Zefan 已提交
530
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
531 532
int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id,
				    char *buf, size_t buflen);
533

L
Li Zefan 已提交
534
int cgroup_task_count(const struct cgroup *cgrp);
535

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
/*
 * Control Group taskset, used to pass around set of tasks to cgroup_subsys
 * methods.
 */
struct cgroup_taskset;
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
int cgroup_taskset_size(struct cgroup_taskset *tset);

/**
 * cgroup_taskset_for_each - iterate cgroup_taskset
 * @task: the loop cursor
 * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
 * @tset: taskset to iterate
 */
#define cgroup_taskset_for_each(task, skip_cgrp, tset)			\
	for ((task) = cgroup_taskset_first((tset)); (task);		\
	     (task) = cgroup_taskset_next((tset)))			\
		if (!(skip_cgrp) ||					\
		    cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))

558 559 560 561
/*
 * Control Group subsystem type.
 * See Documentation/cgroups/cgroups.txt for details
 */
562 563

struct cgroup_subsys {
564 565 566 567 568
	struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp);
	int (*css_online)(struct cgroup *cgrp);
	void (*css_offline)(struct cgroup *cgrp);
	void (*css_free)(struct cgroup *cgrp);

569 570 571 572 573 574 575
	int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
	void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
	void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
	void (*fork)(struct task_struct *task);
	void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
		     struct task_struct *task);
	void (*bind)(struct cgroup *root);
576

577
	int subsys_id;
578
	int disabled;
579
	int early_init;
K
KAMEZAWA Hiroyuki 已提交
580 581 582 583 584
	/*
	 * True if this subsys uses ID. ID is not available before cgroup_init()
	 * (not available in early_init time.)
	 */
	bool use_id;
585

586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
	/*
	 * If %false, this subsystem is properly hierarchical -
	 * configuration, resource accounting and restriction on a parent
	 * cgroup cover those of its children.  If %true, hierarchy support
	 * is broken in some ways - some subsystems ignore hierarchy
	 * completely while others are only implemented half-way.
	 *
	 * It's now disallowed to create nested cgroups if the subsystem is
	 * broken and cgroup core will emit a warning message on such
	 * cases.  Eventually, all subsystems will be made properly
	 * hierarchical and this will go away.
	 */
	bool broken_hierarchy;
	bool warned_broken_hierarchy;

601 602 603
#define MAX_CGROUP_TYPE_NAMELEN 32
	const char *name;

604 605
	/*
	 * Link to parent, and list entry in parent's children.
L
Li Zefan 已提交
606
	 * Protected by cgroup_lock()
607 608
	 */
	struct cgroupfs_root *root;
609
	struct list_head sibling;
K
KAMEZAWA Hiroyuki 已提交
610 611
	/* used when use_id == true */
	struct idr idr;
612
	spinlock_t id_lock;
613

614 615 616 617 618 619 620
	/* list of cftype_sets */
	struct list_head cftsets;

	/* base cftypes, automatically [de]registered with subsys itself */
	struct cftype *base_cftypes;
	struct cftype_set base_cftset;

621 622
	/* should be defined only by modular subsystems */
	struct module *module;
623 624 625
};

#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
626
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
627
#include <linux/cgroup_subsys.h>
628
#undef IS_SUBSYS_ENABLED
629 630 631
#undef SUBSYS

static inline struct cgroup_subsys_state *cgroup_subsys_state(
L
Li Zefan 已提交
632
	struct cgroup *cgrp, int subsys_id)
633
{
L
Li Zefan 已提交
634
	return cgrp->subsys[subsys_id];
635 636
}

637 638 639 640 641
/*
 * function to get the cgroup_subsys_state which allows for extra
 * rcu_dereference_check() conditions, such as locks used during the
 * cgroup_subsys::attach() methods.
 */
T
Tejun Heo 已提交
642 643
#ifdef CONFIG_PROVE_RCU
extern struct mutex cgroup_mutex;
644
#define task_subsys_state_check(task, subsys_id, __c)			\
T
Tejun Heo 已提交
645 646 647 648
	rcu_dereference_check((task)->cgroups->subsys[(subsys_id)],	\
			      lockdep_is_held(&(task)->alloc_lock) ||	\
			      lockdep_is_held(&cgroup_mutex) || (__c))
#else
649
#define task_subsys_state_check(task, subsys_id, __c)			\
T
Tejun Heo 已提交
650 651
	rcu_dereference((task)->cgroups->subsys[(subsys_id)])
#endif
652 653 654

static inline struct cgroup_subsys_state *
task_subsys_state(struct task_struct *task, int subsys_id)
655
{
656
	return task_subsys_state_check(task, subsys_id, false);
657 658 659 660 661 662 663 664
}

static inline struct cgroup* task_cgroup(struct task_struct *task,
					       int subsys_id)
{
	return task_subsys_state(task, subsys_id)->cgroup;
}

665 666
struct cgroup *cgroup_next_sibling(struct cgroup *pos);

667 668 669
/**
 * cgroup_for_each_child - iterate through children of a cgroup
 * @pos: the cgroup * to use as the loop cursor
670
 * @cgrp: cgroup whose children to walk
671
 *
672
 * Walk @cgrp's children.  Must be called under rcu_read_lock().  A child
673 674
 * cgroup which hasn't finished ->css_online() or already has finished
 * ->css_offline() may show up during traversal and it's each subsystem's
675 676
 * responsibility to verify that each @pos is alive.
 *
677 678 679
 * If a subsystem synchronizes against the parent in its ->css_online() and
 * before starting iterating, a cgroup which finished ->css_online() is
 * guaranteed to be visible in the future iterations.
680 681 682 683
 *
 * It is allowed to temporarily drop RCU read lock during iteration.  The
 * caller is responsible for ensuring that @pos remains accessible until
 * the start of the next iteration by, for example, bumping the css refcnt.
684
 */
685 686 687 688
#define cgroup_for_each_child(pos, cgrp)				\
	for ((pos) = list_first_or_null_rcu(&(cgrp)->children,		\
					    struct cgroup, sibling);	\
	     (pos); (pos) = cgroup_next_sibling((pos)))
689 690 691

struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
					  struct cgroup *cgroup);
692
struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
693 694 695 696 697 698 699

/**
 * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
 * @pos: the cgroup * to use as the loop cursor
 * @cgroup: cgroup whose descendants to walk
 *
 * Walk @cgroup's descendants.  Must be called under rcu_read_lock().  A
700 701
 * descendant cgroup which hasn't finished ->css_online() or already has
 * finished ->css_offline() may show up during traversal and it's each
702 703
 * subsystem's responsibility to verify that each @pos is alive.
 *
704 705
 * If a subsystem synchronizes against the parent in its ->css_online() and
 * before starting iterating, and synchronizes against @pos on each
706
 * iteration, any descendant cgroup which finished ->css_online() is
707 708 709 710 711
 * guaranteed to be visible in the future iterations.
 *
 * In other words, the following guarantees that a descendant can't escape
 * state updates of its ancestors.
 *
712
 * my_online(@cgrp)
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
 * {
 *	Lock @cgrp->parent and @cgrp;
 *	Inherit state from @cgrp->parent;
 *	Unlock both.
 * }
 *
 * my_update_state(@cgrp)
 * {
 *	Lock @cgrp;
 *	Update @cgrp's state;
 *	Unlock @cgrp;
 *
 *	cgroup_for_each_descendant_pre(@pos, @cgrp) {
 *		Lock @pos;
 *		Verify @pos is alive and inherit state from @pos->parent;
 *		Unlock @pos;
 *	}
 * }
 *
 * As long as the inheriting step, including checking the parent state, is
 * enclosed inside @pos locking, double-locking the parent isn't necessary
 * while inheriting.  The state update to the parent is guaranteed to be
 * visible by walking order and, as long as inheriting operations to the
 * same @pos are atomic to each other, multiple updates racing each other
 * still result in the correct state.  It's guaranateed that at least one
 * inheritance happens for any cgroup after the latest update to its
 * parent.
 *
 * If checking parent's state requires locking the parent, each inheriting
 * iteration should lock and unlock both @pos->parent and @pos.
 *
 * Alternatively, a subsystem may choose to use a single global lock to
745
 * synchronize ->css_online() and ->css_offline() against tree-walking
746
 * operations.
747 748 749 750
 *
 * It is allowed to temporarily drop RCU read lock during iteration.  The
 * caller is responsible for ensuring that @pos remains accessible until
 * the start of the next iteration by, for example, bumping the css refcnt.
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
 */
#define cgroup_for_each_descendant_pre(pos, cgroup)			\
	for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos);	\
	     pos = cgroup_next_descendant_pre((pos), (cgroup)))

struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
					   struct cgroup *cgroup);

/**
 * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants
 * @pos: the cgroup * to use as the loop cursor
 * @cgroup: cgroup whose descendants to walk
 *
 * Similar to cgroup_for_each_descendant_pre() but performs post-order
 * traversal instead.  Note that the walk visibility guarantee described in
 * pre-order walk doesn't apply the same to post-order walks.
 */
#define cgroup_for_each_descendant_post(pos, cgroup)			\
	for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos);	\
	     pos = cgroup_next_descendant_post((pos), (cgroup)))

772 773
/* A cgroup_iter should be treated as an opaque object */
struct cgroup_iter {
774
	struct list_head *cset_link;
775 776 777
	struct list_head *task;
};

P
Paul Menage 已提交
778 779
/*
 * To iterate across the tasks in a cgroup:
780
 *
781
 * 1) call cgroup_iter_start to initialize an iterator
782 783 784 785 786
 *
 * 2) call cgroup_iter_next() to retrieve member tasks until it
 *    returns NULL or until you want to end the iteration
 *
 * 3) call cgroup_iter_end() to destroy the iterator.
787
 *
P
Paul Menage 已提交
788 789 790 791
 * Or, call cgroup_scan_tasks() to iterate through every task in a
 * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
 * the test_task() callback, but not while calling the process_task()
 * callback.
792
 */
L
Li Zefan 已提交
793 794
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
795
					struct cgroup_iter *it);
L
Li Zefan 已提交
796
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
797
int cgroup_scan_tasks(struct cgroup_scanner *scan);
M
Michael S. Tsirkin 已提交
798
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
799
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
M
Michael S. Tsirkin 已提交
800

K
KAMEZAWA Hiroyuki 已提交
801 802 803 804 805 806 807 808
/*
 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
 * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
 * CSS ID is assigned at cgroup allocation (create) automatically
 * and removed when subsys calls free_css_id() function. This is because
 * the lifetime of cgroup_subsys_state is subsys's matter.
 *
 * Looking up and scanning function should be called under rcu_read_lock().
L
Li Zefan 已提交
809
 * Taking cgroup_mutex is not necessary for following calls.
K
KAMEZAWA Hiroyuki 已提交
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
 * But the css returned by this routine can be "not populated yet" or "being
 * destroyed". The caller should check css and cgroup's status.
 */

/*
 * Typically Called at ->destroy(), or somewhere the subsys frees
 * cgroup_subsys_state.
 */
void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);

/* Find a cgroup_subsys_state which has given ID */

struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);

/* Returns true if root is ancestor of cg */
bool css_is_ancestor(struct cgroup_subsys_state *cg,
826
		     const struct cgroup_subsys_state *root);
K
KAMEZAWA Hiroyuki 已提交
827 828 829

/* Get id and depth of css */
unsigned short css_id(struct cgroup_subsys_state *css);
S
Stephane Eranian 已提交
830
struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
K
KAMEZAWA Hiroyuki 已提交
831

832 833 834 835
#else /* !CONFIG_CGROUPS */

static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
836
static inline void cgroup_fork(struct task_struct *p) {}
837
static inline void cgroup_post_fork(struct task_struct *p) {}
838
static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
839

B
Balbir Singh 已提交
840 841 842 843 844
static inline int cgroupstats_build(struct cgroupstats *stats,
					struct dentry *dentry)
{
	return -EINVAL;
}
845

846
/* No cgroups - nothing to do */
M
Michael S. Tsirkin 已提交
847 848 849 850 851
static inline int cgroup_attach_task_all(struct task_struct *from,
					 struct task_struct *t)
{
	return 0;
}
852

853 854 855
#endif /* !CONFIG_CGROUPS */

#endif /* _LINUX_CGROUP_H */