cgroup.c 129.8 KB
Newer Older
1 2 3 4 5 6
/*
 *  Generic process-grouping system.
 *
 *  Based originally on the cpuset system, extracted by Paul Menage
 *  Copyright (C) 2006 Google, Inc
 *
7 8 9 10
 *  Notifications support
 *  Copyright (C) 2009 Nokia Corporation
 *  Author: Kirill A. Shutemov
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *  Copyright notices from the original cpuset code:
 *  --------------------------------------------------
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
 *  2003-10-10 Written by Simon Derr.
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson.
 *  ---------------------------------------------------
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

#include <linux/cgroup.h>
30
#include <linux/cred.h>
31
#include <linux/ctype.h>
32
#include <linux/errno.h>
33
#include <linux/init_task.h>
34 35 36 37 38 39
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
40
#include <linux/proc_fs.h>
41 42 43 44
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
45
#include <linux/rwsem.h>
46
#include <linux/string.h>
47
#include <linux/sort.h>
48
#include <linux/kmod.h>
B
Balbir Singh 已提交
49 50
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
51
#include <linux/hashtable.h>
L
Li Zefan 已提交
52
#include <linux/pid_namespace.h>
53
#include <linux/idr.h>
54
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
55
#include <linux/kthread.h>
T
Tejun Heo 已提交
56
#include <linux/delay.h>
B
Balbir Singh 已提交
57

A
Arun Sharma 已提交
58
#include <linux/atomic.h>
59

60 61 62 63 64 65 66 67
/*
 * pidlists linger the following amount before being destroyed.  The goal
 * is avoiding frequent destruction in the middle of consecutive read calls
 * Expiring in the middle is a performance problem not a correctness one.
 * 1 sec should be enough.
 */
#define CGROUP_PIDLIST_DESTROY_DELAY	HZ

T
Tejun Heo 已提交
68 69 70
#define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
					 MAX_CFTYPE_NAME + 2)

T
Tejun Heo 已提交
71 72 73 74 75 76 77 78 79
/*
 * cgroup_tree_mutex nests above cgroup_mutex and protects cftypes, file
 * creation/removal and hierarchy changing operations including cgroup
 * creation, removal, css association and controller rebinding.  This outer
 * lock is needed mainly to resolve the circular dependency between kernfs
 * active ref and cgroup_mutex.  cgroup_tree_mutex nests above both.
 */
static DEFINE_MUTEX(cgroup_tree_mutex);

T
Tejun Heo 已提交
80 81 82 83
/*
 * cgroup_mutex is the master lock.  Any modification to cgroup or its
 * hierarchy must be performed while holding it.
 *
84 85
 * css_set_rwsem protects task->cgroups pointer, the list of css_set
 * objects, and the chain of tasks off each css_set.
T
Tejun Heo 已提交
86
 *
87 88
 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
 * cgroup.h can use them for lockdep annotations.
T
Tejun Heo 已提交
89
 */
T
Tejun Heo 已提交
90 91
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
92 93 94
DECLARE_RWSEM(css_set_rwsem);
EXPORT_SYMBOL_GPL(cgroup_mutex);
EXPORT_SYMBOL_GPL(css_set_rwsem);
T
Tejun Heo 已提交
95
#else
96
static DEFINE_MUTEX(cgroup_mutex);
97
static DECLARE_RWSEM(css_set_rwsem);
T
Tejun Heo 已提交
98 99
#endif

100 101 102 103 104
/*
 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
 */
static DEFINE_SPINLOCK(release_agent_path_lock);
105

T
Tejun Heo 已提交
106
#define cgroup_assert_mutexes_or_rcu_locked()				\
107
	rcu_lockdep_assert(rcu_read_lock_held() ||			\
T
Tejun Heo 已提交
108
			   lockdep_is_held(&cgroup_tree_mutex) ||	\
109
			   lockdep_is_held(&cgroup_mutex),		\
T
Tejun Heo 已提交
110
			   "cgroup_[tree_]mutex or RCU read lock required");
111

112 113 114 115 116 117 118 119
/*
 * cgroup destruction makes heavy use of work items and there can be a lot
 * of concurrent destructions.  Use a separate workqueue so that cgroup
 * destruction work items don't end up filling up max_active of system_wq
 * which may lead to deadlock.
 */
static struct workqueue_struct *cgroup_destroy_wq;

120 121 122 123 124 125
/*
 * pidlist destructions need to be flushed on cgroup destruction.  Use a
 * separate workqueue as flush domain.
 */
static struct workqueue_struct *cgroup_pidlist_destroy_wq;

T
Tejun Heo 已提交
126
/* generate an array of cgroup subsystem pointers */
127
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
T
Tejun Heo 已提交
128
static struct cgroup_subsys *cgroup_subsys[] = {
129 130
#include <linux/cgroup_subsys.h>
};
131 132 133 134 135
#undef SUBSYS

/* array of cgroup subsystem names */
#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
static const char *cgroup_subsys_name[] = {
136 137
#include <linux/cgroup_subsys.h>
};
138
#undef SUBSYS
139 140

/*
141
 * The default hierarchy, reserved for the subsystems that are otherwise
142 143
 * unattached - it never has more than a single cgroup, and all tasks are
 * part of that cgroup.
144
 */
T
Tejun Heo 已提交
145
struct cgroup_root cgrp_dfl_root;
146

T
Tejun Heo 已提交
147 148 149 150 151
/*
 * The default hierarchy always exists but is hidden until mounted for the
 * first time.  This is for backward compatibility.
 */
static bool cgrp_dfl_root_visible;
152 153 154

/* The list of hierarchy roots */

155 156
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
157

T
Tejun Heo 已提交
158
/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
159
static DEFINE_IDR(cgroup_hierarchy_idr);
160

161 162 163 164 165
/*
 * Assign a monotonically increasing serial number to cgroups.  It
 * guarantees cgroups with bigger numbers are newer than those with smaller
 * numbers.  Also, as cgroups are always appended to the parent's
 * ->children list, it guarantees that sibling cgroups are always sorted in
166 167
 * the ascending serial number order on the list.  Protected by
 * cgroup_mutex.
168
 */
169
static u64 cgroup_serial_nr_next = 1;
170

171
/* This flag indicates whether tasks in the fork and exit paths should
L
Li Zefan 已提交
172 173 174
 * check for fork/exit handlers to call. This avoids us having to do
 * extra work in the fork/exit path if none of the subsystems need to
 * be called.
175
 */
176
static int need_forkexit_callback __read_mostly;
177

178 179
static struct cftype cgroup_base_files[];

180
static void cgroup_put(struct cgroup *cgrp);
181
static int rebind_subsystems(struct cgroup_root *dst_root,
182
			     unsigned long ss_mask);
183
static void cgroup_destroy_css_killed(struct cgroup *cgrp);
184
static int cgroup_destroy_locked(struct cgroup *cgrp);
185 186
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add);
187
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
188

T
Tejun Heo 已提交
189 190 191
/**
 * cgroup_css - obtain a cgroup's css for the specified subsystem
 * @cgrp: the cgroup of interest
192
 * @ss: the subsystem of interest (%NULL returns the dummy_css)
T
Tejun Heo 已提交
193
 *
194 195 196 197 198
 * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
 * function must be called either under cgroup_mutex or rcu_read_lock() and
 * the caller is responsible for pinning the returned css if it wants to
 * keep accessing it outside the said locks.  This function may return
 * %NULL if @cgrp doesn't have @subsys_id enabled.
T
Tejun Heo 已提交
199 200
 */
static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
201
					      struct cgroup_subsys *ss)
T
Tejun Heo 已提交
202
{
203
	if (ss)
204
		return rcu_dereference_check(cgrp->subsys[ss->id],
T
Tejun Heo 已提交
205 206
					lockdep_is_held(&cgroup_tree_mutex) ||
					lockdep_is_held(&cgroup_mutex));
207 208
	else
		return &cgrp->dummy_css;
T
Tejun Heo 已提交
209
}
210

211
/* convenient tests for these bits */
212
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
213
{
214
	return test_bit(CGRP_DEAD, &cgrp->flags);
215 216
}

217 218
struct cgroup_subsys_state *seq_css(struct seq_file *seq)
{
T
Tejun Heo 已提交
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	struct kernfs_open_file *of = seq->private;
	struct cgroup *cgrp = of->kn->parent->priv;
	struct cftype *cft = seq_cft(seq);

	/*
	 * This is open and unprotected implementation of cgroup_css().
	 * seq_css() is only called from a kernfs file operation which has
	 * an active reference on the file.  Because all the subsystem
	 * files are drained before a css is disassociated with a cgroup,
	 * the matching css from the cgroup's subsys table is guaranteed to
	 * be and stay valid until the enclosing operation is complete.
	 */
	if (cft->ss)
		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
	else
		return &cgrp->dummy_css;
235 236 237
}
EXPORT_SYMBOL_GPL(seq_css);

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
/**
 * cgroup_is_descendant - test ancestry
 * @cgrp: the cgroup to be tested
 * @ancestor: possible ancestor of @cgrp
 *
 * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
 * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
 * and @ancestor are accessible.
 */
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
{
	while (cgrp) {
		if (cgrp == ancestor)
			return true;
		cgrp = cgrp->parent;
	}
	return false;
}
256

257
static int cgroup_is_releasable(const struct cgroup *cgrp)
258 259
{
	const int bits =
260 261 262
		(1 << CGRP_RELEASABLE) |
		(1 << CGRP_NOTIFY_ON_RELEASE);
	return (cgrp->flags & bits) == bits;
263 264
}

265
static int notify_on_release(const struct cgroup *cgrp)
266
{
267
	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
268 269
}

T
Tejun Heo 已提交
270 271 272 273 274 275 276 277 278 279 280 281
/**
 * for_each_css - iterate all css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
 * Should be called under cgroup_mutex.
 */
#define for_each_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = rcu_dereference_check(			\
				(cgrp)->subsys[(ssid)],			\
T
Tejun Heo 已提交
282
				lockdep_is_held(&cgroup_tree_mutex) ||	\
T
Tejun Heo 已提交
283 284 285
				lockdep_is_held(&cgroup_mutex)))) { }	\
		else

286
/**
T
Tejun Heo 已提交
287
 * for_each_subsys - iterate all enabled cgroup subsystems
288
 * @ss: the iteration cursor
289
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
290
 */
291
#define for_each_subsys(ss, ssid)					\
T
Tejun Heo 已提交
292 293
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
294

295 296
/* iterate across the hierarchies */
#define for_each_root(root)						\
297
	list_for_each_entry((root), &cgroup_roots, root_list)
298

299 300 301 302
/**
 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
 * @cgrp: the cgroup to be checked for liveness
 *
T
Tejun Heo 已提交
303 304
 * On success, returns true; the mutex should be later unlocked.  On
 * failure returns false with no lock held.
305
 */
306
static bool cgroup_lock_live_group(struct cgroup *cgrp)
307 308
{
	mutex_lock(&cgroup_mutex);
309
	if (cgroup_is_dead(cgrp)) {
310 311 312 313 314 315
		mutex_unlock(&cgroup_mutex);
		return false;
	}
	return true;
}

316 317 318
/* the list of cgroups eligible for automatic release. Protected by
 * release_list_lock */
static LIST_HEAD(release_list);
319
static DEFINE_RAW_SPINLOCK(release_list_lock);
320 321
static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
322
static void check_for_release(struct cgroup *cgrp);
323

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
/*
 * A cgroup can be associated with multiple css_sets as different tasks may
 * belong to different cgroups on different hierarchies.  In the other
 * direction, a css_set is naturally associated with multiple cgroups.
 * This M:N relationship is represented by the following link structure
 * which exists for each association and allows traversing the associations
 * from both sides.
 */
struct cgrp_cset_link {
	/* the cgroup and css_set this link associates */
	struct cgroup		*cgrp;
	struct css_set		*cset;

	/* list of cgrp_cset_links anchored at cgrp->cset_links */
	struct list_head	cset_link;

	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
	struct list_head	cgrp_link;
342 343
};

344 345
/*
 * The default css_set - used by init and its children prior to any
346 347 348 349 350
 * hierarchies being mounted. It contains a pointer to the root state
 * for each subsystem. Also used to anchor the list of css_sets. Not
 * reference-counted, to improve performance when child cgroups
 * haven't been created.
 */
351 352 353 354 355 356 357 358
static struct css_set init_css_set = {
	.refcount		= ATOMIC_INIT(1),
	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
};
359

360
static int css_set_count	= 1;	/* 1 for init_css_set */
361

362 363 364 365 366
/*
 * hash table for cgroup groups. This improves the performance to find
 * an existing css_set. This hash doesn't (currently) take into
 * account cgroups in empty hierarchies.
 */
367
#define CSS_SET_HASH_BITS	7
368
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
369

370
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
371
{
372
	unsigned long key = 0UL;
373 374
	struct cgroup_subsys *ss;
	int i;
375

376
	for_each_subsys(ss, i)
377 378
		key += (unsigned long)css[i];
	key = (key >> 16) ^ key;
379

380
	return key;
381 382
}

383
static void put_css_set_locked(struct css_set *cset, bool taskexit)
384
{
385
	struct cgrp_cset_link *link, *tmp_link;
386

387 388 389
	lockdep_assert_held(&css_set_rwsem);

	if (!atomic_dec_and_test(&cset->refcount))
390
		return;
391

392
	/* This css_set is dead. unlink it and release cgroup refcounts */
393
	hash_del(&cset->hlist);
394 395
	css_set_count--;

396
	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
397
		struct cgroup *cgrp = link->cgrp;
398

399 400
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
L
Li Zefan 已提交
401

402
		/* @cgrp can't go away while we're holding css_set_rwsem */
T
Tejun Heo 已提交
403
		if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) {
404
			if (taskexit)
405 406
				set_bit(CGRP_RELEASABLE, &cgrp->flags);
			check_for_release(cgrp);
407
		}
408 409

		kfree(link);
410
	}
411

412
	kfree_rcu(cset, rcu_head);
413 414
}

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
static void put_css_set(struct css_set *cset, bool taskexit)
{
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * rwlock
	 */
	if (atomic_add_unless(&cset->refcount, -1, 1))
		return;

	down_write(&css_set_rwsem);
	put_css_set_locked(cset, taskexit);
	up_write(&css_set_rwsem);
}

430 431 432
/*
 * refcounted get/put for css_set objects
 */
433
static inline void get_css_set(struct css_set *cset)
434
{
435
	atomic_inc(&cset->refcount);
436 437
}

438
/**
439
 * compare_css_sets - helper function for find_existing_css_set().
440 441
 * @cset: candidate css_set being tested
 * @old_cset: existing css_set for a task
442 443 444
 * @new_cgrp: cgroup that's being entered by the task
 * @template: desired set of css pointers in css_set (pre-calculated)
 *
L
Li Zefan 已提交
445
 * Returns true if "cset" matches "old_cset" except for the hierarchy
446 447
 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
 */
448 449
static bool compare_css_sets(struct css_set *cset,
			     struct css_set *old_cset,
450 451 452 453 454
			     struct cgroup *new_cgrp,
			     struct cgroup_subsys_state *template[])
{
	struct list_head *l1, *l2;

455
	if (memcmp(template, cset->subsys, sizeof(cset->subsys))) {
456 457 458 459 460 461 462 463 464 465 466 467 468
		/* Not all subsystems matched */
		return false;
	}

	/*
	 * Compare cgroup pointers in order to distinguish between
	 * different cgroups in heirarchies with no subsystems. We
	 * could get by with just this check alone (and skip the
	 * memcmp above) but on most setups the memcmp check will
	 * avoid the need for this more expensive check on almost all
	 * candidates.
	 */

469 470
	l1 = &cset->cgrp_links;
	l2 = &old_cset->cgrp_links;
471
	while (1) {
472
		struct cgrp_cset_link *link1, *link2;
473
		struct cgroup *cgrp1, *cgrp2;
474 475 476 477

		l1 = l1->next;
		l2 = l2->next;
		/* See if we reached the end - both lists are equal length. */
478 479
		if (l1 == &cset->cgrp_links) {
			BUG_ON(l2 != &old_cset->cgrp_links);
480 481
			break;
		} else {
482
			BUG_ON(l2 == &old_cset->cgrp_links);
483 484
		}
		/* Locate the cgroups associated with these links. */
485 486 487 488
		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
		cgrp1 = link1->cgrp;
		cgrp2 = link2->cgrp;
489
		/* Hierarchies should be linked in the same order. */
490
		BUG_ON(cgrp1->root != cgrp2->root);
491 492 493 494 495 496 497 498

		/*
		 * If this hierarchy is the hierarchy of the cgroup
		 * that's changing, then we need to check that this
		 * css_set points to the new cgroup; if it's any other
		 * hierarchy, then this css_set should point to the
		 * same cgroup as the old css_set.
		 */
499 500
		if (cgrp1->root == new_cgrp->root) {
			if (cgrp1 != new_cgrp)
501 502
				return false;
		} else {
503
			if (cgrp1 != cgrp2)
504 505 506 507 508 509
				return false;
		}
	}
	return true;
}

510 511 512 513 514
/**
 * find_existing_css_set - init css array and find the matching css_set
 * @old_cset: the css_set that we're using before the cgroup transition
 * @cgrp: the cgroup that we're moving into
 * @template: out param for the new set of csses, should be clear on entry
515
 */
516 517 518
static struct css_set *find_existing_css_set(struct css_set *old_cset,
					struct cgroup *cgrp,
					struct cgroup_subsys_state *template[])
519
{
520
	struct cgroup_root *root = cgrp->root;
521
	struct cgroup_subsys *ss;
522
	struct css_set *cset;
523
	unsigned long key;
524
	int i;
525

B
Ben Blum 已提交
526 527 528 529 530
	/*
	 * Build the set of subsystem state objects that we want to see in the
	 * new css_set. while subsystems can change globally, the entries here
	 * won't change, so no need for locking.
	 */
531
	for_each_subsys(ss, i) {
532
		if (root->cgrp.subsys_mask & (1UL << i)) {
533 534 535
			/* Subsystem is in this hierarchy. So we want
			 * the subsystem state from the new
			 * cgroup */
536
			template[i] = cgroup_css(cgrp, ss);
537 538 539
		} else {
			/* Subsystem is not in this hierarchy, so we
			 * don't want to change the subsystem state */
540
			template[i] = old_cset->subsys[i];
541 542 543
		}
	}

544
	key = css_set_hash(template);
545 546
	hash_for_each_possible(css_set_table, cset, hlist, key) {
		if (!compare_css_sets(cset, old_cset, cgrp, template))
547 548 549
			continue;

		/* This css_set matches what we need */
550
		return cset;
551
	}
552 553 554 555 556

	/* No existing cgroup group matched */
	return NULL;
}

557
static void free_cgrp_cset_links(struct list_head *links_to_free)
558
{
559
	struct cgrp_cset_link *link, *tmp_link;
560

561 562
	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
		list_del(&link->cset_link);
563 564 565 566
		kfree(link);
	}
}

567 568 569 570 571 572 573
/**
 * allocate_cgrp_cset_links - allocate cgrp_cset_links
 * @count: the number of links to allocate
 * @tmp_links: list_head the allocated links are put on
 *
 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
 * through ->cset_link.  Returns 0 on success or -errno.
574
 */
575
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
576
{
577
	struct cgrp_cset_link *link;
578
	int i;
579 580 581

	INIT_LIST_HEAD(tmp_links);

582
	for (i = 0; i < count; i++) {
583
		link = kzalloc(sizeof(*link), GFP_KERNEL);
584
		if (!link) {
585
			free_cgrp_cset_links(tmp_links);
586 587
			return -ENOMEM;
		}
588
		list_add(&link->cset_link, tmp_links);
589 590 591 592
	}
	return 0;
}

593 594
/**
 * link_css_set - a helper function to link a css_set to a cgroup
595
 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
596
 * @cset: the css_set to be linked
597 598
 * @cgrp: the destination cgroup
 */
599 600
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
			 struct cgroup *cgrp)
601
{
602
	struct cgrp_cset_link *link;
603

604 605 606
	BUG_ON(list_empty(tmp_links));
	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
	link->cset = cset;
607
	link->cgrp = cgrp;
608
	list_move(&link->cset_link, &cgrp->cset_links);
609 610 611 612
	/*
	 * Always add links to the tail of the list so that the list
	 * is sorted by order of hierarchy creation
	 */
613
	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
614 615
}

616 617 618 619 620 621 622
/**
 * find_css_set - return a new css_set with one cgroup updated
 * @old_cset: the baseline css_set
 * @cgrp: the cgroup to be updated
 *
 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
 * substituted into the appropriate hierarchy.
623
 */
624 625
static struct css_set *find_css_set(struct css_set *old_cset,
				    struct cgroup *cgrp)
626
{
627
	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
628
	struct css_set *cset;
629 630
	struct list_head tmp_links;
	struct cgrp_cset_link *link;
631
	unsigned long key;
632

633 634
	lockdep_assert_held(&cgroup_mutex);

635 636
	/* First see if we already have a cgroup group that matches
	 * the desired set */
637
	down_read(&css_set_rwsem);
638 639 640
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
		get_css_set(cset);
641
	up_read(&css_set_rwsem);
642

643 644
	if (cset)
		return cset;
645

646
	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
647
	if (!cset)
648 649
		return NULL;

650
	/* Allocate all the cgrp_cset_link objects that we'll need */
651
	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
652
		kfree(cset);
653 654 655
		return NULL;
	}

656
	atomic_set(&cset->refcount, 1);
657
	INIT_LIST_HEAD(&cset->cgrp_links);
658
	INIT_LIST_HEAD(&cset->tasks);
T
Tejun Heo 已提交
659
	INIT_LIST_HEAD(&cset->mg_tasks);
660
	INIT_LIST_HEAD(&cset->mg_preload_node);
661
	INIT_LIST_HEAD(&cset->mg_node);
662
	INIT_HLIST_NODE(&cset->hlist);
663 664 665

	/* Copy the set of subsystem state objects generated in
	 * find_existing_css_set() */
666
	memcpy(cset->subsys, template, sizeof(cset->subsys));
667

668
	down_write(&css_set_rwsem);
669
	/* Add reference counts and links from the new css_set. */
670
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
671
		struct cgroup *c = link->cgrp;
672

673 674
		if (c->root == cgrp->root)
			c = cgrp;
675
		link_css_set(&tmp_links, cset, c);
676
	}
677

678
	BUG_ON(!list_empty(&tmp_links));
679 680

	css_set_count++;
681 682

	/* Add this cgroup group to the hash table */
683 684
	key = css_set_hash(cset->subsys);
	hash_add(css_set_table, &cset->hlist, key);
685

686
	up_write(&css_set_rwsem);
687

688
	return cset;
689 690
}

691
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
692
{
693
	struct cgroup *root_cgrp = kf_root->kn->priv;
T
Tejun Heo 已提交
694

695
	return root_cgrp->root;
T
Tejun Heo 已提交
696 697
}

698
static int cgroup_init_root_id(struct cgroup_root *root)
699 700 701 702 703
{
	int id;

	lockdep_assert_held(&cgroup_mutex);

704
	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
705 706 707 708 709 710 711
	if (id < 0)
		return id;

	root->hierarchy_id = id;
	return 0;
}

712
static void cgroup_exit_root_id(struct cgroup_root *root)
713 714 715 716 717 718 719 720 721
{
	lockdep_assert_held(&cgroup_mutex);

	if (root->hierarchy_id) {
		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
		root->hierarchy_id = 0;
	}
}

722
static void cgroup_free_root(struct cgroup_root *root)
723 724 725 726 727 728 729 730 731 732
{
	if (root) {
		/* hierarhcy ID shoulid already have been released */
		WARN_ON_ONCE(root->hierarchy_id);

		idr_destroy(&root->cgroup_idr);
		kfree(root);
	}
}

733
static void cgroup_destroy_root(struct cgroup_root *root)
734
{
735
	struct cgroup *cgrp = &root->cgrp;
736 737
	struct cgrp_cset_link *link, *tmp_link;

T
Tejun Heo 已提交
738 739
	mutex_lock(&cgroup_tree_mutex);
	mutex_lock(&cgroup_mutex);
740

T
Tejun Heo 已提交
741
	BUG_ON(atomic_read(&root->nr_cgrps));
742 743 744
	BUG_ON(!list_empty(&cgrp->children));

	/* Rebind all subsystems back to the default hierarchy */
745
	rebind_subsystems(&cgrp_dfl_root, cgrp->subsys_mask);
746 747

	/*
748 749
	 * Release all the links from cset_links to this hierarchy's
	 * root cgroup
750
	 */
751
	down_write(&css_set_rwsem);
752 753 754 755 756 757

	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
		kfree(link);
	}
758
	up_write(&css_set_rwsem);
759 760 761 762 763 764 765 766 767 768 769

	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
		cgroup_root_count--;
	}

	cgroup_exit_root_id(root);

	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_tree_mutex);

T
Tejun Heo 已提交
770
	kernfs_destroy_root(root->kf_root);
771 772 773
	cgroup_free_root(root);
}

774 775
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
776
					    struct cgroup_root *root)
777 778 779
{
	struct cgroup *res = NULL;

780 781 782
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

783
	if (cset == &init_css_set) {
784
		res = &root->cgrp;
785
	} else {
786 787 788
		struct cgrp_cset_link *link;

		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
789
			struct cgroup *c = link->cgrp;
790

791 792 793 794 795 796
			if (c->root == root) {
				res = c;
				break;
			}
		}
	}
797

798 799 800 801
	BUG_ON(!res);
	return res;
}

802
/*
803 804 805 806
 * Return the cgroup for "task" from the given hierarchy. Must be
 * called with cgroup_mutex and css_set_rwsem held.
 */
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
807
					    struct cgroup_root *root)
808 809 810 811 812 813 814 815 816
{
	/*
	 * No need to lock the task - since we hold cgroup_mutex the
	 * task can't change groups, so the only thing that can happen
	 * is that it exits and its css is set back to init_css_set.
	 */
	return cset_cgroup_from_root(task_css_set(task), root);
}

817 818 819 820 821 822
/*
 * A task must hold cgroup_mutex to modify cgroups.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding cgroup_mutex can't rely on the count
 * field not changing.  However, if the count goes to zero, then only
823
 * cgroup_attach_task() can increment it again.  Because a count of zero
824 825 826 827 828 829 830 831 832 833 834 835 836
 * means that no tasks are currently attached, therefore there is no
 * way a task attached to that cgroup can fork (the other way to
 * increment the count).  So code holding cgroup_mutex can safely
 * assume that if the count is zero, it will stay zero. Similarly, if
 * a task holds cgroup_mutex on a cgroup with zero count, it
 * knows that the cgroup won't be removed, as cgroup_rmdir()
 * needs that mutex.
 *
 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
 * (usually) take cgroup_mutex.  These are the two most performance
 * critical pieces of code here.  The exception occurs on cgroup_exit(),
 * when a task in a notify_on_release cgroup exits.  Then cgroup_mutex
 * is taken, and if the cgroup count is zero, a usermode call made
L
Li Zefan 已提交
837 838
 * to the release agent with the name of the cgroup (path relative to
 * the root of cgroup file system) as the argument.
839 840 841 842
 *
 * A cgroup can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cgroups is empty.  Since all
 * tasks in the system use _some_ cgroup, and since there is always at
843
 * least one task in the system (init, pid == 1), therefore, root cgroup
844
 * always has either children cgroups and/or using tasks.  So we don't
845
 * need a special hack to ensure that root cgroup cannot be deleted.
846 847
 *
 * P.S.  One more locking exception.  RCU is used to guard the
848
 * update of a tasks cgroup pointer by cgroup_attach_task()
849 850
 */

851
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
T
Tejun Heo 已提交
852
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
853
static const struct file_operations proc_cgroupstats_operations;
854

T
Tejun Heo 已提交
855 856
static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
			      char *buf)
857
{
T
Tejun Heo 已提交
858 859 860 861 862 863 864
	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
			 cft->ss->name, cft->name);
	else
		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
	return buf;
865 866
}

867 868 869 870 871 872 873 874 875 876
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
 * returns cft->mode if ->mode is not 0
 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
 * returns S_IRUGO if it has only a read handler
 * returns S_IWUSR if it has only a write hander
 */
static umode_t cgroup_file_mode(const struct cftype *cft)
877
{
878
	umode_t mode = 0;
879

880 881 882 883 884 885 886 887 888 889 890
	if (cft->mode)
		return cft->mode;

	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
		mode |= S_IRUGO;

	if (cft->write_u64 || cft->write_s64 || cft->write_string ||
	    cft->trigger)
		mode |= S_IWUSR;

	return mode;
891 892
}

893 894
static void cgroup_free_fn(struct work_struct *work)
{
895
	struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
896

897
	atomic_dec(&cgrp->root->nr_cgrps);
898
	cgroup_pidlist_destroy_all(cgrp);
899

T
Tejun Heo 已提交
900 901 902 903 904 905 906 907 908 909 910
	if (cgrp->parent) {
		/*
		 * We get a ref to the parent, and put the ref when this
		 * cgroup is being freed, so it's guaranteed that the
		 * parent won't be destroyed before its children.
		 */
		cgroup_put(cgrp->parent);
		kernfs_put(cgrp->kn);
		kfree(cgrp);
	} else {
		/*
911
		 * This is root cgroup's refcnt reaching zero, which
T
Tejun Heo 已提交
912 913 914 915
		 * indicates that the root should be released.
		 */
		cgroup_destroy_root(cgrp->root);
	}
916 917 918 919 920 921
}

static void cgroup_free_rcu(struct rcu_head *head)
{
	struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);

922
	INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
923
	queue_work(cgroup_destroy_wq, &cgrp->destroy_work);
924 925
}

926
static void cgroup_get(struct cgroup *cgrp)
927
{
T
Tejun Heo 已提交
928 929 930
	WARN_ON_ONCE(cgroup_is_dead(cgrp));
	WARN_ON_ONCE(atomic_read(&cgrp->refcnt) <= 0);
	atomic_inc(&cgrp->refcnt);
931 932
}

933
static void cgroup_put(struct cgroup *cgrp)
T
Tejun Heo 已提交
934
{
T
Tejun Heo 已提交
935 936
	if (!atomic_dec_and_test(&cgrp->refcnt))
		return;
T
Tejun Heo 已提交
937
	if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp)))
T
Tejun Heo 已提交
938
		return;
T
Tejun Heo 已提交
939

940
	/*
T
Tejun Heo 已提交
941 942 943 944
	 * XXX: cgrp->id is only used to look up css's.  As cgroup and
	 * css's lifetimes will be decoupled, it should be made
	 * per-subsystem and moved to css->id so that lookups are
	 * successful until the target css is released.
945
	 */
T
Tejun Heo 已提交
946 947 948 949
	mutex_lock(&cgroup_mutex);
	idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
	mutex_unlock(&cgroup_mutex);
	cgrp->id = -1;
T
Tejun Heo 已提交
950

T
Tejun Heo 已提交
951
	call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
952
}
T
Tejun Heo 已提交
953

954
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
T
Tejun Heo 已提交
955
{
T
Tejun Heo 已提交
956
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
957

T
Tejun Heo 已提交
958
	lockdep_assert_held(&cgroup_tree_mutex);
T
Tejun Heo 已提交
959
	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
T
Tejun Heo 已提交
960 961
}

962
/**
963
 * cgroup_clear_dir - remove subsys files in a cgroup directory
964
 * @cgrp: target cgroup
965 966
 * @subsys_mask: mask of the subsystem ids whose files should be removed
 */
967
static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
T
Tejun Heo 已提交
968
{
969
	struct cgroup_subsys *ss;
970
	int i;
T
Tejun Heo 已提交
971

972
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
973
		struct cftype *cfts;
974 975

		if (!test_bit(i, &subsys_mask))
976
			continue;
T
Tejun Heo 已提交
977 978
		list_for_each_entry(cfts, &ss->cfts, node)
			cgroup_addrm_files(cgrp, cfts, false);
979
	}
980 981
}

982
static int rebind_subsystems(struct cgroup_root *dst_root,
983
			     unsigned long ss_mask)
984
{
985
	struct cgroup_subsys *ss;
986
	int ssid, ret;
987

T
Tejun Heo 已提交
988 989
	lockdep_assert_held(&cgroup_tree_mutex);
	lockdep_assert_held(&cgroup_mutex);
990

991 992 993
	for_each_subsys(ss, ssid) {
		if (!(ss_mask & (1 << ssid)))
			continue;
B
Ben Blum 已提交
994

995
		/* if @ss is on the dummy_root, we can always move it */
996
		if (ss->root == &cgrp_dfl_root)
997
			continue;
998

999
		/* if @ss has non-root cgroups attached to it, can't move */
1000
		if (!list_empty(&ss->root->cgrp.children))
T
Tejun Heo 已提交
1001
			return -EBUSY;
1002

1003
		/* can't move between two non-dummy roots either */
1004
		if (dst_root != &cgrp_dfl_root)
1005
			return -EBUSY;
1006 1007
	}

T
Tejun Heo 已提交
1008 1009 1010
	ret = cgroup_populate_dir(&dst_root->cgrp, ss_mask);
	if (ret) {
		if (dst_root != &cgrp_dfl_root)
1011
			return ret;
1012

T
Tejun Heo 已提交
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
		/*
		 * Rebinding back to the default root is not allowed to
		 * fail.  Using both default and non-default roots should
		 * be rare.  Moving subsystems back and forth even more so.
		 * Just warn about it and continue.
		 */
		if (cgrp_dfl_root_visible) {
			pr_warning("cgroup: failed to create files (%d) while rebinding 0x%lx to default root\n",
				   ret, ss_mask);
			pr_warning("cgroup: you may retry by moving them to a different hierarchy and unbinding\n");
		}
1024
	}
1025 1026 1027 1028 1029

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
1030
	mutex_unlock(&cgroup_mutex);
1031
	for_each_subsys(ss, ssid)
T
Tejun Heo 已提交
1032
		if (ss_mask & (1 << ssid))
1033
			cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
1034
	mutex_lock(&cgroup_mutex);
1035

1036
	for_each_subsys(ss, ssid) {
1037
		struct cgroup_root *src_root;
1038
		struct cgroup_subsys_state *css;
1039

1040 1041
		if (!(ss_mask & (1 << ssid)))
			continue;
1042

1043
		src_root = ss->root;
1044
		css = cgroup_css(&src_root->cgrp, ss);
1045

1046
		WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
1047

1048 1049
		RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
		rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
1050
		ss->root = dst_root;
1051
		css->cgroup = &dst_root->cgrp;
1052

1053 1054
		src_root->cgrp.subsys_mask &= ~(1 << ssid);
		dst_root->cgrp.subsys_mask |= 1 << ssid;
1055

1056 1057
		if (ss->bind)
			ss->bind(css);
1058 1059
	}

T
Tejun Heo 已提交
1060
	kernfs_activate(dst_root->cgrp.kn);
1061 1062 1063
	return 0;
}

T
Tejun Heo 已提交
1064 1065
static int cgroup_show_options(struct seq_file *seq,
			       struct kernfs_root *kf_root)
1066
{
1067
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1068
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
1069
	int ssid;
1070

T
Tejun Heo 已提交
1071
	for_each_subsys(ss, ssid)
1072
		if (root->cgrp.subsys_mask & (1 << ssid))
T
Tejun Heo 已提交
1073
			seq_printf(seq, ",%s", ss->name);
1074 1075
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
		seq_puts(seq, ",sane_behavior");
1076
	if (root->flags & CGRP_ROOT_NOPREFIX)
1077
		seq_puts(seq, ",noprefix");
1078
	if (root->flags & CGRP_ROOT_XATTR)
A
Aristeu Rozanski 已提交
1079
		seq_puts(seq, ",xattr");
1080 1081

	spin_lock(&release_agent_path_lock);
1082 1083
	if (strlen(root->release_agent_path))
		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
1084 1085
	spin_unlock(&release_agent_path_lock);

1086
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1087
		seq_puts(seq, ",clone_children");
1088 1089
	if (strlen(root->name))
		seq_printf(seq, ",name=%s", root->name);
1090 1091 1092 1093
	return 0;
}

struct cgroup_sb_opts {
1094
	unsigned long subsys_mask;
1095
	unsigned long flags;
1096
	char *release_agent;
1097
	bool cpuset_clone_children;
1098
	char *name;
1099 1100
	/* User explicitly requested empty subsystem */
	bool none;
1101 1102
};

B
Ben Blum 已提交
1103
/*
1104 1105 1106 1107
 * Convert a hierarchy specifier into a bitmask of subsystems and
 * flags. Call with cgroup_mutex held to protect the cgroup_subsys[]
 * array. This function takes refcounts on subsystems to be used, unless it
 * returns error, in which case no refcounts are taken.
B
Ben Blum 已提交
1108
 */
B
Ben Blum 已提交
1109
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1110
{
1111 1112
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
1113
	unsigned long mask = (unsigned long)-1;
1114 1115
	struct cgroup_subsys *ss;
	int i;
1116

B
Ben Blum 已提交
1117 1118
	BUG_ON(!mutex_is_locked(&cgroup_mutex));

1119
#ifdef CONFIG_CPUSETS
1120
	mask = ~(1UL << cpuset_cgrp_id);
1121
#endif
1122

1123
	memset(opts, 0, sizeof(*opts));
1124 1125 1126 1127

	while ((token = strsep(&o, ",")) != NULL) {
		if (!*token)
			return -EINVAL;
1128
		if (!strcmp(token, "none")) {
1129 1130
			/* Explicitly have no subsystems */
			opts->none = true;
1131 1132 1133 1134 1135 1136 1137 1138 1139
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
1140 1141 1142 1143
		if (!strcmp(token, "__DEVEL__sane_behavior")) {
			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
			continue;
		}
1144
		if (!strcmp(token, "noprefix")) {
1145
			opts->flags |= CGRP_ROOT_NOPREFIX;
1146 1147 1148
			continue;
		}
		if (!strcmp(token, "clone_children")) {
1149
			opts->cpuset_clone_children = true;
1150 1151
			continue;
		}
A
Aristeu Rozanski 已提交
1152
		if (!strcmp(token, "xattr")) {
1153
			opts->flags |= CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
1154 1155
			continue;
		}
1156
		if (!strncmp(token, "release_agent=", 14)) {
1157 1158 1159
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
1160
			opts->release_agent =
1161
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1162 1163
			if (!opts->release_agent)
				return -ENOMEM;
1164 1165 1166
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
1184
					      MAX_CGROUP_ROOT_NAMELEN - 1,
1185 1186 1187
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
1188 1189 1190 1191

			continue;
		}

1192
		for_each_subsys(ss, i) {
1193 1194 1195 1196 1197 1198 1199 1200
			if (strcmp(token, ss->name))
				continue;
			if (ss->disabled)
				continue;

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
1201
			set_bit(i, &opts->subsys_mask);
1202 1203 1204 1205 1206 1207 1208 1209
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

1210 1211
	/* Consistency checks */

1212 1213 1214
	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
		pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");

1215 1216 1217 1218
		if ((opts->flags & (CGRP_ROOT_NOPREFIX | CGRP_ROOT_XATTR)) ||
		    opts->cpuset_clone_children || opts->release_agent ||
		    opts->name) {
			pr_err("cgroup: sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n");
1219 1220
			return -EINVAL;
		}
T
Tejun Heo 已提交
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	} else {
		/*
		 * If the 'all' option was specified select all the
		 * subsystems, otherwise if 'none', 'name=' and a subsystem
		 * name options were not specified, let's default to 'all'
		 */
		if (all_ss || (!one_ss && !opts->none && !opts->name))
			for_each_subsys(ss, i)
				if (!ss->disabled)
					set_bit(i, &opts->subsys_mask);
1231

T
Tejun Heo 已提交
1232 1233 1234 1235 1236
		/*
		 * We either have to specify by name or by subsystems. (So
		 * all empty hierarchies must have a name).
		 */
		if (!opts->subsys_mask && !opts->name)
1237 1238 1239
			return -EINVAL;
	}

1240 1241 1242 1243 1244
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
1245
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1246 1247
		return -EINVAL;

1248 1249

	/* Can't specify "none" and some subsystems */
1250
	if (opts->subsys_mask && opts->none)
1251 1252
		return -EINVAL;

1253 1254 1255
	return 0;
}

T
Tejun Heo 已提交
1256
static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1257 1258
{
	int ret = 0;
1259
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1260
	struct cgroup_sb_opts opts;
1261
	unsigned long added_mask, removed_mask;
1262

1263 1264 1265 1266 1267
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
		pr_err("cgroup: sane_behavior: remount is not allowed\n");
		return -EINVAL;
	}

T
Tejun Heo 已提交
1268
	mutex_lock(&cgroup_tree_mutex);
1269 1270 1271 1272 1273 1274 1275
	mutex_lock(&cgroup_mutex);

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

1276
	if (opts.subsys_mask != root->cgrp.subsys_mask || opts.release_agent)
1277 1278 1279
		pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n",
			   task_tgid_nr(current), current->comm);

1280 1281
	added_mask = opts.subsys_mask & ~root->cgrp.subsys_mask;
	removed_mask = root->cgrp.subsys_mask & ~opts.subsys_mask;
1282

B
Ben Blum 已提交
1283
	/* Don't allow flags or name to change at remount */
1284
	if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) ||
B
Ben Blum 已提交
1285
	    (opts.name && strcmp(opts.name, root->name))) {
1286 1287 1288
		pr_err("cgroup: option or name mismatch, new: 0x%lx \"%s\", old: 0x%lx \"%s\"\n",
		       opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "",
		       root->flags & CGRP_ROOT_OPTION_MASK, root->name);
1289 1290 1291 1292
		ret = -EINVAL;
		goto out_unlock;
	}

1293
	/* remounting is not allowed for populated hierarchies */
1294
	if (!list_empty(&root->cgrp.children)) {
1295
		ret = -EBUSY;
1296
		goto out_unlock;
B
Ben Blum 已提交
1297
	}
1298

1299
	ret = rebind_subsystems(root, added_mask);
1300
	if (ret)
1301
		goto out_unlock;
1302

1303
	rebind_subsystems(&cgrp_dfl_root, removed_mask);
1304

1305 1306
	if (opts.release_agent) {
		spin_lock(&release_agent_path_lock);
1307
		strcpy(root->release_agent_path, opts.release_agent);
1308 1309
		spin_unlock(&release_agent_path_lock);
	}
1310
 out_unlock:
1311
	kfree(opts.release_agent);
1312
	kfree(opts.name);
1313
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
1314
	mutex_unlock(&cgroup_tree_mutex);
1315 1316 1317
	return ret;
}

1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
/*
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
 * words after the first mount.
 */
static bool use_task_css_set_links __read_mostly;

static void cgroup_enable_task_cg_lists(void)
{
	struct task_struct *p, *g;

1330
	down_write(&css_set_rwsem);
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352

	if (use_task_css_set_links)
		goto out_unlock;

	use_task_css_set_links = true;

	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
	do_each_thread(g, p) {
		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
			     task_css_set(p) != &init_css_set);

		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
1353 1354
		 * Do it while holding siglock so that we don't end up
		 * racing against cgroup_exit().
1355
		 */
1356
		spin_lock_irq(&p->sighand->siglock);
1357 1358 1359 1360 1361 1362
		if (!(p->flags & PF_EXITING)) {
			struct css_set *cset = task_css_set(p);

			list_add(&p->cg_list, &cset->tasks);
			get_css_set(cset);
		}
1363
		spin_unlock_irq(&p->sighand->siglock);
1364 1365 1366
	} while_each_thread(g, p);
	read_unlock(&tasklist_lock);
out_unlock:
1367
	up_write(&css_set_rwsem);
1368
}
1369

1370 1371
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
T
Tejun Heo 已提交
1372
	atomic_set(&cgrp->refcnt, 1);
1373 1374
	INIT_LIST_HEAD(&cgrp->sibling);
	INIT_LIST_HEAD(&cgrp->children);
1375
	INIT_LIST_HEAD(&cgrp->cset_links);
1376
	INIT_LIST_HEAD(&cgrp->release_list);
1377 1378
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
T
Tejun Heo 已提交
1379
	cgrp->dummy_css.cgroup = cgrp;
1380
}
1381

1382
static void init_cgroup_root(struct cgroup_root *root,
1383
			     struct cgroup_sb_opts *opts)
1384
{
1385
	struct cgroup *cgrp = &root->cgrp;
1386

1387
	INIT_LIST_HEAD(&root->root_list);
1388
	atomic_set(&root->nr_cgrps, 1);
1389
	cgrp->root = root;
1390
	init_cgroup_housekeeping(cgrp);
1391
	idr_init(&root->cgroup_idr);
1392 1393 1394 1395 1396 1397

	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
1398
	if (opts->cpuset_clone_children)
1399
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1400 1401
}

1402
static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
1403
{
1404
	LIST_HEAD(tmp_links);
1405
	struct cgroup *root_cgrp = &root->cgrp;
1406 1407
	struct css_set *cset;
	int i, ret;
1408

1409 1410
	lockdep_assert_held(&cgroup_tree_mutex);
	lockdep_assert_held(&cgroup_mutex);
1411

1412 1413
	ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
	if (ret < 0)
T
Tejun Heo 已提交
1414
		goto out;
1415
	root_cgrp->id = ret;
1416

1417
	/*
1418
	 * We're accessing css_set_count without locking css_set_rwsem here,
1419 1420 1421 1422 1423 1424
	 * but that's OK - it can only be increased by someone holding
	 * cgroup_lock, and that's us. The worst that can happen is that we
	 * have some link structures left over
	 */
	ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
	if (ret)
T
Tejun Heo 已提交
1425
		goto out;
1426

1427
	ret = cgroup_init_root_id(root);
1428
	if (ret)
T
Tejun Heo 已提交
1429
		goto out;
1430

T
Tejun Heo 已提交
1431 1432 1433 1434 1435 1436 1437 1438
	root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
					   KERNFS_ROOT_CREATE_DEACTIVATED,
					   root_cgrp);
	if (IS_ERR(root->kf_root)) {
		ret = PTR_ERR(root->kf_root);
		goto exit_root_id;
	}
	root_cgrp->kn = root->kf_root->kn;
1439

1440 1441
	ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
	if (ret)
T
Tejun Heo 已提交
1442
		goto destroy_root;
1443

1444
	ret = rebind_subsystems(root, ss_mask);
1445
	if (ret)
T
Tejun Heo 已提交
1446
		goto destroy_root;
1447

1448 1449 1450 1451 1452 1453 1454
	/*
	 * There must be no failure case after here, since rebinding takes
	 * care of subsystems' refcounts, which are explicitly dropped in
	 * the failure exit path.
	 */
	list_add(&root->root_list, &cgroup_roots);
	cgroup_root_count++;
A
Al Viro 已提交
1455

1456
	/*
1457
	 * Link the root cgroup in this hierarchy into all the css_set
1458 1459
	 * objects.
	 */
1460
	down_write(&css_set_rwsem);
1461 1462
	hash_for_each(css_set_table, i, cset, hlist)
		link_css_set(&tmp_links, cset, root_cgrp);
1463
	up_write(&css_set_rwsem);
1464

1465
	BUG_ON(!list_empty(&root_cgrp->children));
1466
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1467

T
Tejun Heo 已提交
1468
	kernfs_activate(root_cgrp->kn);
1469
	ret = 0;
T
Tejun Heo 已提交
1470
	goto out;
1471

T
Tejun Heo 已提交
1472 1473 1474 1475
destroy_root:
	kernfs_destroy_root(root->kf_root);
	root->kf_root = NULL;
exit_root_id:
1476
	cgroup_exit_root_id(root);
T
Tejun Heo 已提交
1477
out:
1478 1479
	free_cgrp_cset_links(&tmp_links);
	return ret;
1480 1481
}

A
Al Viro 已提交
1482
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1483
			 int flags, const char *unused_dev_name,
A
Al Viro 已提交
1484
			 void *data)
1485
{
1486
	struct cgroup_root *root;
1487
	struct cgroup_sb_opts opts;
T
Tejun Heo 已提交
1488
	struct dentry *dentry;
1489
	int ret;
L
Li Zefan 已提交
1490
	bool new_sb;
1491

1492 1493 1494 1495 1496 1497
	/*
	 * The first time anyone tries to mount a cgroup, enable the list
	 * linking each css_set to its tasks and fix up all existing tasks.
	 */
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();
T
Tejun Heo 已提交
1498
retry:
1499
	mutex_lock(&cgroup_tree_mutex);
B
Ben Blum 已提交
1500
	mutex_lock(&cgroup_mutex);
1501 1502

	/* First find the desired set of subsystems */
1503
	ret = parse_cgroupfs_options(data, &opts);
1504
	if (ret)
1505
		goto out_unlock;
1506

T
Tejun Heo 已提交
1507
	/* look for a matching existing root */
T
Tejun Heo 已提交
1508 1509 1510 1511 1512 1513
	if (!opts.subsys_mask && !opts.none && !opts.name) {
		cgrp_dfl_root_visible = true;
		root = &cgrp_dfl_root;
		cgroup_get(&root->cgrp);
		ret = 0;
		goto out_unlock;
1514 1515
	}

1516
	for_each_root(root) {
T
Tejun Heo 已提交
1517
		bool name_match = false;
1518

1519
		if (root == &cgrp_dfl_root)
1520
			continue;
1521

B
Ben Blum 已提交
1522
		/*
T
Tejun Heo 已提交
1523 1524 1525
		 * If we asked for a name then it must match.  Also, if
		 * name matches but sybsys_mask doesn't, we should fail.
		 * Remember whether name matched.
B
Ben Blum 已提交
1526
		 */
T
Tejun Heo 已提交
1527 1528 1529 1530 1531
		if (opts.name) {
			if (strcmp(opts.name, root->name))
				continue;
			name_match = true;
		}
1532

1533
		/*
T
Tejun Heo 已提交
1534 1535
		 * If we asked for subsystems (or explicitly for no
		 * subsystems) then they must match.
1536
		 */
T
Tejun Heo 已提交
1537
		if ((opts.subsys_mask || opts.none) &&
1538
		    (opts.subsys_mask != root->cgrp.subsys_mask)) {
T
Tejun Heo 已提交
1539 1540 1541 1542 1543
			if (!name_match)
				continue;
			ret = -EBUSY;
			goto out_unlock;
		}
1544

1545
		if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) {
1546 1547 1548
			if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
				pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
				ret = -EINVAL;
1549
				goto out_unlock;
1550 1551 1552
			} else {
				pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
			}
1553
		}
1554

T
Tejun Heo 已提交
1555
		/*
1556
		 * A root's lifetime is governed by its root cgroup.  Zero
T
Tejun Heo 已提交
1557 1558 1559 1560 1561
		 * ref indicate that the root is being destroyed.  Wait for
		 * destruction to complete so that the subsystems are free.
		 * We can use wait_queue for the wait but this path is
		 * super cold.  Let's just sleep for a bit and retry.
		 */
1562
		if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
T
Tejun Heo 已提交
1563 1564
			mutex_unlock(&cgroup_mutex);
			mutex_unlock(&cgroup_tree_mutex);
1565 1566
			kfree(opts.release_agent);
			kfree(opts.name);
T
Tejun Heo 已提交
1567 1568 1569
			msleep(10);
			goto retry;
		}
1570

T
Tejun Heo 已提交
1571
		ret = 0;
T
Tejun Heo 已提交
1572
		goto out_unlock;
1573 1574
	}

1575
	/*
1576 1577 1578
	 * No such thing, create a new one.  name= matching without subsys
	 * specification is allowed for already existing hierarchies but we
	 * can't create new one without subsys specification.
1579
	 */
1580 1581 1582
	if (!opts.subsys_mask && !opts.none) {
		ret = -EINVAL;
		goto out_unlock;
1583 1584
	}

1585 1586 1587
	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root) {
		ret = -ENOMEM;
T
Tejun Heo 已提交
1588
		goto out_unlock;
1589
	}
1590

1591 1592
	init_cgroup_root(root, &opts);

T
Tejun Heo 已提交
1593
	ret = cgroup_setup_root(root, opts.subsys_mask);
T
Tejun Heo 已提交
1594 1595
	if (ret)
		cgroup_free_root(root);
1596

1597
out_unlock:
1598
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
1599
	mutex_unlock(&cgroup_tree_mutex);
1600

1601 1602
	kfree(opts.release_agent);
	kfree(opts.name);
A
Aristeu Rozanski 已提交
1603

T
Tejun Heo 已提交
1604
	if (ret)
1605
		return ERR_PTR(ret);
T
Tejun Heo 已提交
1606

L
Li Zefan 已提交
1607 1608
	dentry = kernfs_mount(fs_type, flags, root->kf_root, &new_sb);
	if (IS_ERR(dentry) || !new_sb)
1609
		cgroup_put(&root->cgrp);
T
Tejun Heo 已提交
1610 1611 1612 1613 1614 1615
	return dentry;
}

static void cgroup_kill_sb(struct super_block *sb)
{
	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
1616
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
T
Tejun Heo 已提交
1617

1618
	cgroup_put(&root->cgrp);
T
Tejun Heo 已提交
1619
	kernfs_kill_sb(sb);
1620 1621 1622 1623
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
A
Al Viro 已提交
1624
	.mount = cgroup_mount,
1625 1626 1627
	.kill_sb = cgroup_kill_sb,
};

1628 1629
static struct kobject *cgroup_kobj;

1630
/**
1631
 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1632 1633 1634 1635
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
1636 1637 1638 1639 1640
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
T
Tejun Heo 已提交
1641
 * Return value is the same as kernfs_path().
1642
 */
T
Tejun Heo 已提交
1643
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1644
{
1645
	struct cgroup_root *root;
1646
	struct cgroup *cgrp;
T
Tejun Heo 已提交
1647 1648
	int hierarchy_id = 1;
	char *path = NULL;
1649 1650

	mutex_lock(&cgroup_mutex);
1651
	down_read(&css_set_rwsem);
1652

1653 1654
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

1655 1656
	if (root) {
		cgrp = task_cgroup_from_root(task, root);
T
Tejun Heo 已提交
1657
		path = cgroup_path(cgrp, buf, buflen);
1658 1659
	} else {
		/* if no hierarchy exists, everyone is in "/" */
T
Tejun Heo 已提交
1660 1661
		if (strlcpy(buf, "/", buflen) < buflen)
			path = buf;
1662 1663
	}

1664
	up_read(&css_set_rwsem);
1665
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
1666
	return path;
1667
}
1668
EXPORT_SYMBOL_GPL(task_cgroup_path);
1669

1670
/* used to track tasks and other necessary states during migration */
1671
struct cgroup_taskset {
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
	/* the src and dst cset list running through cset->mg_node */
	struct list_head	src_csets;
	struct list_head	dst_csets;

	/*
	 * Fields for cgroup_taskset_*() iteration.
	 *
	 * Before migration is committed, the target migration tasks are on
	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
	 * or ->dst_csets depending on whether migration is committed.
	 *
	 * ->cur_csets and ->cur_task point to the current task position
	 * during iteration.
	 */
	struct list_head	*csets;
	struct css_set		*cur_cset;
	struct task_struct	*cur_task;
1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
};

/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
 *
 * @tset iteration is initialized and the first task is returned.
 */
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
{
1700 1701 1702 1703
	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
	tset->cur_task = NULL;

	return cgroup_taskset_next(tset);
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
}

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
{
1715 1716
	struct css_set *cset = tset->cur_cset;
	struct task_struct *task = tset->cur_task;
1717

1718 1719 1720 1721 1722 1723
	while (&cset->mg_node != tset->csets) {
		if (!task)
			task = list_first_entry(&cset->mg_tasks,
						struct task_struct, cg_list);
		else
			task = list_next_entry(task, cg_list);
1724

1725 1726 1727 1728 1729
		if (&task->cg_list != &cset->mg_tasks) {
			tset->cur_cset = cset;
			tset->cur_task = task;
			return task;
		}
1730

1731 1732 1733
		cset = list_next_entry(cset, mg_node);
		task = NULL;
	}
1734

1735
	return NULL;
1736 1737
}

1738
/**
B
Ben Blum 已提交
1739
 * cgroup_task_migrate - move a task from one cgroup to another.
1740 1741 1742
 * @old_cgrp; the cgroup @tsk is being migrated from
 * @tsk: the task being migrated
 * @new_cset: the new css_set @tsk is being attached to
B
Ben Blum 已提交
1743
 *
1744
 * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
B
Ben Blum 已提交
1745
 */
1746 1747 1748
static void cgroup_task_migrate(struct cgroup *old_cgrp,
				struct task_struct *tsk,
				struct css_set *new_cset)
B
Ben Blum 已提交
1749
{
1750
	struct css_set *old_cset;
B
Ben Blum 已提交
1751

1752 1753 1754
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

B
Ben Blum 已提交
1755
	/*
1756 1757 1758
	 * We are synchronized through threadgroup_lock() against PF_EXITING
	 * setting such that we can't race against cgroup_exit() changing the
	 * css_set to init_css_set and dropping the old one.
B
Ben Blum 已提交
1759
	 */
1760
	WARN_ON_ONCE(tsk->flags & PF_EXITING);
1761
	old_cset = task_css_set(tsk);
B
Ben Blum 已提交
1762

1763
	get_css_set(new_cset);
1764
	rcu_assign_pointer(tsk->cgroups, new_cset);
B
Ben Blum 已提交
1765

1766 1767 1768 1769 1770 1771 1772
	/*
	 * Use move_tail so that cgroup_taskset_first() still returns the
	 * leader after migration.  This works because cgroup_migrate()
	 * ensures that the dst_cset of the leader is the first on the
	 * tset's dst_csets list.
	 */
	list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
B
Ben Blum 已提交
1773 1774

	/*
1775 1776 1777
	 * We just gained a reference on old_cset by taking it from the
	 * task. As trading it for new_cset is protected by cgroup_mutex,
	 * we're safe to drop it here; it will be freed under RCU.
B
Ben Blum 已提交
1778
	 */
1779
	set_bit(CGRP_RELEASABLE, &old_cgrp->flags);
1780
	put_css_set_locked(old_cset, false);
B
Ben Blum 已提交
1781 1782
}

L
Li Zefan 已提交
1783
/**
1784 1785
 * cgroup_migrate_finish - cleanup after attach
 * @preloaded_csets: list of preloaded css_sets
B
Ben Blum 已提交
1786
 *
1787 1788
 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
 * those functions for details.
B
Ben Blum 已提交
1789
 */
1790
static void cgroup_migrate_finish(struct list_head *preloaded_csets)
B
Ben Blum 已提交
1791
{
1792
	struct css_set *cset, *tmp_cset;
B
Ben Blum 已提交
1793

1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
	lockdep_assert_held(&cgroup_mutex);

	down_write(&css_set_rwsem);
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
		cset->mg_src_cgrp = NULL;
		cset->mg_dst_cset = NULL;
		list_del_init(&cset->mg_preload_node);
		put_css_set_locked(cset, false);
	}
	up_write(&css_set_rwsem);
}

/**
 * cgroup_migrate_add_src - add a migration source css_set
 * @src_cset: the source css_set to add
 * @dst_cgrp: the destination cgroup
 * @preloaded_csets: list of preloaded css_sets
 *
 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
 * @src_cset and add it to @preloaded_csets, which should later be cleaned
 * up by cgroup_migrate_finish().
 *
 * This function may be called without holding threadgroup_lock even if the
 * target is a process.  Threads may be created and destroyed but as long
 * as cgroup_mutex is not dropped, no new css_set can be put into play and
 * the preloaded css_sets are guaranteed to cover all migrations.
 */
static void cgroup_migrate_add_src(struct css_set *src_cset,
				   struct cgroup *dst_cgrp,
				   struct list_head *preloaded_csets)
{
	struct cgroup *src_cgrp;

	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);

	/* nothing to do if this cset already belongs to the cgroup */
	if (src_cgrp == dst_cgrp)
		return;

	if (!list_empty(&src_cset->mg_preload_node))
		return;

	WARN_ON(src_cset->mg_src_cgrp);
	WARN_ON(!list_empty(&src_cset->mg_tasks));
	WARN_ON(!list_empty(&src_cset->mg_node));

	src_cset->mg_src_cgrp = src_cgrp;
	get_css_set(src_cset);
	list_add(&src_cset->mg_preload_node, preloaded_csets);
}

/**
 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
 * @dst_cgrp: the destination cgroup
 * @preloaded_csets: list of preloaded source css_sets
 *
 * Tasks are about to be moved to @dst_cgrp and all the source css_sets
 * have been preloaded to @preloaded_csets.  This function looks up and
 * pins all destination css_sets, links each to its source, and put them on
 * @preloaded_csets.
 *
 * This function must be called after cgroup_migrate_add_src() has been
 * called on each migration source css_set.  After migration is performed
 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
 * @preloaded_csets.
 */
static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
				      struct list_head *preloaded_csets)
{
	LIST_HEAD(csets);
	struct css_set *src_cset;

	lockdep_assert_held(&cgroup_mutex);

	/* look up the dst cset for each src cset and link it to src */
	list_for_each_entry(src_cset, preloaded_csets, mg_preload_node) {
		struct css_set *dst_cset;

		dst_cset = find_css_set(src_cset, dst_cgrp);
		if (!dst_cset)
			goto err;

		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
		src_cset->mg_dst_cset = dst_cset;

		if (list_empty(&dst_cset->mg_preload_node))
			list_add(&dst_cset->mg_preload_node, &csets);
		else
			put_css_set(dst_cset, false);
	}

	list_splice(&csets, preloaded_csets);
	return 0;
err:
	cgroup_migrate_finish(&csets);
	return -ENOMEM;
}

/**
 * cgroup_migrate - migrate a process or task to a cgroup
 * @cgrp: the destination cgroup
 * @leader: the leader of the process or the task to migrate
 * @threadgroup: whether @leader points to the whole process or a single task
 *
 * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
 * process, the caller must be holding threadgroup_lock of @leader.  The
 * caller is also responsible for invoking cgroup_migrate_add_src() and
 * cgroup_migrate_prepare_dst() on the targets before invoking this
 * function and following up with cgroup_migrate_finish().
 *
 * As long as a controller's ->can_attach() doesn't fail, this function is
 * guaranteed to succeed.  This means that, excluding ->can_attach()
 * failure, when migrating multiple targets, the success or failure can be
 * decided for all targets by invoking group_migrate_prepare_dst() before
 * actually starting migrating.
 */
static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
			  bool threadgroup)
B
Ben Blum 已提交
1915
{
1916 1917 1918 1919 1920
	struct cgroup_taskset tset = {
		.src_csets	= LIST_HEAD_INIT(tset.src_csets),
		.dst_csets	= LIST_HEAD_INIT(tset.dst_csets),
		.csets		= &tset.src_csets,
	};
T
Tejun Heo 已提交
1921
	struct cgroup_subsys_state *css, *failed_css = NULL;
1922 1923 1924
	struct css_set *cset, *tmp_cset;
	struct task_struct *task, *tmp_task;
	int i, ret;
B
Ben Blum 已提交
1925

1926 1927 1928 1929 1930
	/*
	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 */
1931
	down_write(&css_set_rwsem);
1932
	rcu_read_lock();
1933
	task = leader;
B
Ben Blum 已提交
1934
	do {
1935 1936
		/* @task either already exited or can't exit until the end */
		if (task->flags & PF_EXITING)
1937
			goto next;
1938

1939 1940
		/* leave @task alone if post_fork() hasn't linked it yet */
		if (list_empty(&task->cg_list))
1941
			goto next;
1942

1943
		cset = task_css_set(task);
1944
		if (!cset->mg_src_cgrp)
1945
			goto next;
1946

1947
		/*
1948 1949
		 * cgroup_taskset_first() must always return the leader.
		 * Take care to avoid disturbing the ordering.
1950
		 */
1951 1952 1953 1954 1955 1956
		list_move_tail(&task->cg_list, &cset->mg_tasks);
		if (list_empty(&cset->mg_node))
			list_add_tail(&cset->mg_node, &tset.src_csets);
		if (list_empty(&cset->mg_dst_cset->mg_node))
			list_move_tail(&cset->mg_dst_cset->mg_node,
				       &tset.dst_csets);
1957
	next:
1958 1959
		if (!threadgroup)
			break;
1960
	} while_each_thread(leader, task);
1961
	rcu_read_unlock();
1962
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
1963

1964
	/* methods shouldn't be called if no task is actually migrating */
1965 1966
	if (list_empty(&tset.src_csets))
		return 0;
1967

1968
	/* check that we can legitimately attach to the cgroup */
T
Tejun Heo 已提交
1969 1970
	for_each_css(css, i, cgrp) {
		if (css->ss->can_attach) {
1971 1972
			ret = css->ss->can_attach(css, &tset);
			if (ret) {
T
Tejun Heo 已提交
1973
				failed_css = css;
B
Ben Blum 已提交
1974 1975 1976 1977 1978 1979
				goto out_cancel_attach;
			}
		}
	}

	/*
1980 1981 1982
	 * Now that we're guaranteed success, proceed to move all tasks to
	 * the new cgroup.  There are no failure cases after here, so this
	 * is the commit point.
B
Ben Blum 已提交
1983
	 */
1984
	down_write(&css_set_rwsem);
1985 1986 1987 1988
	list_for_each_entry(cset, &tset.src_csets, mg_node) {
		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
			cgroup_task_migrate(cset->mg_src_cgrp, task,
					    cset->mg_dst_cset);
B
Ben Blum 已提交
1989
	}
1990
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
1991 1992

	/*
1993 1994 1995
	 * Migration is committed, all target tasks are now on dst_csets.
	 * Nothing is sensitive to fork() after this point.  Notify
	 * controllers that migration is complete.
B
Ben Blum 已提交
1996
	 */
1997
	tset.csets = &tset.dst_csets;
B
Ben Blum 已提交
1998

T
Tejun Heo 已提交
1999 2000 2001
	for_each_css(css, i, cgrp)
		if (css->ss->attach)
			css->ss->attach(css, &tset);
B
Ben Blum 已提交
2002

2003
	ret = 0;
2004 2005
	goto out_release_tset;

B
Ben Blum 已提交
2006
out_cancel_attach:
2007 2008 2009 2010 2011
	for_each_css(css, i, cgrp) {
		if (css == failed_css)
			break;
		if (css->ss->cancel_attach)
			css->ss->cancel_attach(css, &tset);
B
Ben Blum 已提交
2012
	}
2013 2014 2015 2016
out_release_tset:
	down_write(&css_set_rwsem);
	list_splice_init(&tset.dst_csets, &tset.src_csets);
	list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
2017
		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2018 2019 2020
		list_del_init(&cset->mg_node);
	}
	up_write(&css_set_rwsem);
2021
	return ret;
B
Ben Blum 已提交
2022 2023
}

2024 2025 2026 2027 2028 2029
/**
 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
 * @dst_cgrp: the cgroup to attach to
 * @leader: the task or the leader of the threadgroup to be attached
 * @threadgroup: attach the whole threadgroup?
 *
2030
 * Call holding cgroup_mutex and threadgroup_lock of @leader.
2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058
 */
static int cgroup_attach_task(struct cgroup *dst_cgrp,
			      struct task_struct *leader, bool threadgroup)
{
	LIST_HEAD(preloaded_csets);
	struct task_struct *task;
	int ret;

	/* look up all src csets */
	down_read(&css_set_rwsem);
	rcu_read_lock();
	task = leader;
	do {
		cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
				       &preloaded_csets);
		if (!threadgroup)
			break;
	} while_each_thread(leader, task);
	rcu_read_unlock();
	up_read(&css_set_rwsem);

	/* prepare dst csets and commit */
	ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
	if (!ret)
		ret = cgroup_migrate(dst_cgrp, leader, threadgroup);

	cgroup_migrate_finish(&preloaded_csets);
	return ret;
B
Ben Blum 已提交
2059 2060 2061 2062
}

/*
 * Find the task_struct of the task to attach by vpid and pass it along to the
2063
 * function to attach either it or all tasks in its threadgroup. Will lock
2064
 * cgroup_mutex and threadgroup.
2065
 */
B
Ben Blum 已提交
2066
static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2067 2068
{
	struct task_struct *tsk;
2069
	const struct cred *cred = current_cred(), *tcred;
2070 2071
	int ret;

B
Ben Blum 已提交
2072 2073 2074
	if (!cgroup_lock_live_group(cgrp))
		return -ENODEV;

2075 2076
retry_find_task:
	rcu_read_lock();
2077
	if (pid) {
2078
		tsk = find_task_by_vpid(pid);
B
Ben Blum 已提交
2079 2080
		if (!tsk) {
			rcu_read_unlock();
S
SeongJae Park 已提交
2081
			ret = -ESRCH;
2082
			goto out_unlock_cgroup;
2083
		}
B
Ben Blum 已提交
2084 2085 2086 2087
		/*
		 * even if we're attaching all tasks in the thread group, we
		 * only need to check permissions on one of them.
		 */
2088
		tcred = __task_cred(tsk);
2089 2090 2091
		if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
		    !uid_eq(cred->euid, tcred->uid) &&
		    !uid_eq(cred->euid, tcred->suid)) {
2092
			rcu_read_unlock();
2093 2094
			ret = -EACCES;
			goto out_unlock_cgroup;
2095
		}
2096 2097
	} else
		tsk = current;
2098 2099

	if (threadgroup)
2100
		tsk = tsk->group_leader;
2101 2102

	/*
2103
	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2104 2105 2106
	 * trapped in a cpuset, or RT worker may be born in a cgroup
	 * with no rt_runtime allocated.  Just say no.
	 */
2107
	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2108 2109 2110 2111 2112
		ret = -EINVAL;
		rcu_read_unlock();
		goto out_unlock_cgroup;
	}

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
	get_task_struct(tsk);
	rcu_read_unlock();

	threadgroup_lock(tsk);
	if (threadgroup) {
		if (!thread_group_leader(tsk)) {
			/*
			 * a race with de_thread from another thread's exec()
			 * may strip us of our leadership, if this happens,
			 * there is no choice but to throw this task away and
			 * try again; this is
			 * "double-double-toil-and-trouble-check locking".
			 */
			threadgroup_unlock(tsk);
			put_task_struct(tsk);
			goto retry_find_task;
		}
2130 2131 2132 2133
	}

	ret = cgroup_attach_task(cgrp, tsk, threadgroup);

2134 2135
	threadgroup_unlock(tsk);

2136
	put_task_struct(tsk);
2137
out_unlock_cgroup:
T
Tejun Heo 已提交
2138
	mutex_unlock(&cgroup_mutex);
2139 2140 2141
	return ret;
}

2142 2143 2144 2145 2146 2147 2148
/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
2149
	struct cgroup_root *root;
2150 2151
	int retval = 0;

T
Tejun Heo 已提交
2152
	mutex_lock(&cgroup_mutex);
2153
	for_each_root(root) {
2154 2155
		struct cgroup *from_cgrp;

2156
		if (root == &cgrp_dfl_root)
2157 2158
			continue;

2159 2160 2161
		down_read(&css_set_rwsem);
		from_cgrp = task_cgroup_from_root(from, root);
		up_read(&css_set_rwsem);
2162

L
Li Zefan 已提交
2163
		retval = cgroup_attach_task(from_cgrp, tsk, false);
2164 2165 2166
		if (retval)
			break;
	}
T
Tejun Heo 已提交
2167
	mutex_unlock(&cgroup_mutex);
2168 2169 2170 2171 2172

	return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);

2173 2174
static int cgroup_tasks_write(struct cgroup_subsys_state *css,
			      struct cftype *cft, u64 pid)
B
Ben Blum 已提交
2175
{
2176
	return attach_task_by_pid(css->cgroup, pid, false);
B
Ben Blum 已提交
2177 2178
}

2179 2180
static int cgroup_procs_write(struct cgroup_subsys_state *css,
			      struct cftype *cft, u64 tgid)
2181
{
2182
	return attach_task_by_pid(css->cgroup, tgid, true);
2183 2184
}

2185
static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
2186
				      struct cftype *cft, char *buffer)
2187
{
2188
	struct cgroup_root *root = css->cgroup->root;
2189 2190

	BUILD_BUG_ON(sizeof(root->release_agent_path) < PATH_MAX);
2191
	if (!cgroup_lock_live_group(css->cgroup))
2192
		return -ENODEV;
2193
	spin_lock(&release_agent_path_lock);
2194 2195
	strlcpy(root->release_agent_path, buffer,
		sizeof(root->release_agent_path));
2196
	spin_unlock(&release_agent_path_lock);
T
Tejun Heo 已提交
2197
	mutex_unlock(&cgroup_mutex);
2198 2199 2200
	return 0;
}

2201
static int cgroup_release_agent_show(struct seq_file *seq, void *v)
2202
{
2203
	struct cgroup *cgrp = seq_css(seq)->cgroup;
2204

2205 2206 2207 2208
	if (!cgroup_lock_live_group(cgrp))
		return -ENODEV;
	seq_puts(seq, cgrp->root->release_agent_path);
	seq_putc(seq, '\n');
T
Tejun Heo 已提交
2209
	mutex_unlock(&cgroup_mutex);
2210 2211 2212
	return 0;
}

2213
static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
2214
{
2215 2216 2217
	struct cgroup *cgrp = seq_css(seq)->cgroup;

	seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
2218 2219 2220
	return 0;
}

T
Tejun Heo 已提交
2221 2222
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
				 size_t nbytes, loff_t off)
2223
{
T
Tejun Heo 已提交
2224 2225 2226
	struct cgroup *cgrp = of->kn->parent->priv;
	struct cftype *cft = of->kn->priv;
	struct cgroup_subsys_state *css;
2227
	int ret;
2228

T
Tejun Heo 已提交
2229 2230 2231 2232 2233 2234 2235 2236 2237
	/*
	 * kernfs guarantees that a file isn't deleted with operations in
	 * flight, which means that the matching css is and stays alive and
	 * doesn't need to be pinned.  The RCU locking is not necessary
	 * either.  It's just for the convenience of using cgroup_css().
	 */
	rcu_read_lock();
	css = cgroup_css(cgrp, cft->ss);
	rcu_read_unlock();
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252

	if (cft->write_string) {
		ret = cft->write_string(css, cft, strstrip(buf));
	} else if (cft->write_u64) {
		unsigned long long v;
		ret = kstrtoull(buf, 0, &v);
		if (!ret)
			ret = cft->write_u64(css, cft, v);
	} else if (cft->write_s64) {
		long long v;
		ret = kstrtoll(buf, 0, &v);
		if (!ret)
			ret = cft->write_s64(css, cft, v);
	} else if (cft->trigger) {
		ret = cft->trigger(css, (unsigned int)cft->private);
2253
	} else {
2254
		ret = -EINVAL;
2255
	}
T
Tejun Heo 已提交
2256

2257
	return ret ?: nbytes;
2258 2259
}

2260
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
2261
{
T
Tejun Heo 已提交
2262
	return seq_cft(seq)->seq_start(seq, ppos);
2263 2264
}

2265
static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
2266
{
T
Tejun Heo 已提交
2267
	return seq_cft(seq)->seq_next(seq, v, ppos);
2268 2269
}

2270
static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
2271
{
T
Tejun Heo 已提交
2272
	seq_cft(seq)->seq_stop(seq, v);
2273 2274
}

2275
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
2276
{
2277 2278
	struct cftype *cft = seq_cft(m);
	struct cgroup_subsys_state *css = seq_css(m);
2279

2280 2281
	if (cft->seq_show)
		return cft->seq_show(m, arg);
2282

2283
	if (cft->read_u64)
2284 2285 2286 2287 2288 2289
		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
	else if (cft->read_s64)
		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
	else
		return -EINVAL;
	return 0;
2290 2291
}

T
Tejun Heo 已提交
2292 2293 2294 2295
static struct kernfs_ops cgroup_kf_single_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_show		= cgroup_seqfile_show,
2296 2297
};

T
Tejun Heo 已提交
2298 2299 2300 2301 2302 2303 2304 2305
static struct kernfs_ops cgroup_kf_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_start		= cgroup_seqfile_start,
	.seq_next		= cgroup_seqfile_next,
	.seq_stop		= cgroup_seqfile_stop,
	.seq_show		= cgroup_seqfile_show,
};
2306 2307 2308 2309

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
T
Tejun Heo 已提交
2310 2311
static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
			 const char *new_name_str)
2312
{
T
Tejun Heo 已提交
2313
	struct cgroup *cgrp = kn->priv;
2314 2315
	int ret;

T
Tejun Heo 已提交
2316
	if (kernfs_type(kn) != KERNFS_DIR)
2317
		return -ENOTDIR;
T
Tejun Heo 已提交
2318
	if (kn->parent != new_parent)
2319
		return -EIO;
2320

2321 2322 2323 2324 2325 2326
	/*
	 * This isn't a proper migration and its usefulness is very
	 * limited.  Disallow if sane_behavior.
	 */
	if (cgroup_sane_behavior(cgrp))
		return -EPERM;
L
Li Zefan 已提交
2327

2328 2329 2330 2331 2332 2333 2334
	/*
	 * We're gonna grab cgroup_tree_mutex which nests outside kernfs
	 * active_ref.  kernfs_rename() doesn't require active_ref
	 * protection.  Break them before grabbing cgroup_tree_mutex.
	 */
	kernfs_break_active_protection(new_parent);
	kernfs_break_active_protection(kn);
L
Li Zefan 已提交
2335

T
Tejun Heo 已提交
2336 2337
	mutex_lock(&cgroup_tree_mutex);
	mutex_lock(&cgroup_mutex);
L
Li Zefan 已提交
2338

T
Tejun Heo 已提交
2339
	ret = kernfs_rename(kn, new_parent, new_name_str);
L
Li Zefan 已提交
2340

T
Tejun Heo 已提交
2341 2342
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_tree_mutex);
2343 2344 2345

	kernfs_unbreak_active_protection(kn);
	kernfs_unbreak_active_protection(new_parent);
T
Tejun Heo 已提交
2346
	return ret;
L
Li Zefan 已提交
2347 2348
}

2349
static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
2350
{
T
Tejun Heo 已提交
2351
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
2352 2353
	struct kernfs_node *kn;
	struct lock_class_key *key = NULL;
T
Tejun Heo 已提交
2354

T
Tejun Heo 已提交
2355 2356 2357 2358 2359 2360
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	key = &cft->lockdep_key;
#endif
	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
				  NULL, false, key);
F
Fengguang Wu 已提交
2361
	return PTR_ERR_OR_ZERO(kn);
2362 2363
}

2364 2365 2366 2367 2368 2369 2370
/**
 * cgroup_addrm_files - add or remove files to a cgroup directory
 * @cgrp: the target cgroup
 * @cfts: array of cftypes to be added
 * @is_add: whether to add or remove
 *
 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
2371 2372 2373
 * For removals, this function never fails.  If addition fails, this
 * function doesn't remove files already added.  The caller is responsible
 * for cleaning up.
2374
 */
2375 2376
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add)
2377
{
A
Aristeu Rozanski 已提交
2378
	struct cftype *cft;
2379 2380
	int ret;

T
Tejun Heo 已提交
2381
	lockdep_assert_held(&cgroup_tree_mutex);
T
Tejun Heo 已提交
2382 2383

	for (cft = cfts; cft->name[0] != '\0'; cft++) {
2384
		/* does cft->flags tell us to skip this file on @cgrp? */
T
Tejun Heo 已提交
2385 2386
		if ((cft->flags & CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
			continue;
2387 2388
		if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp))
			continue;
2389 2390 2391 2392 2393
		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent)
			continue;
		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
			continue;

2394
		if (is_add) {
2395
			ret = cgroup_add_file(cgrp, cft);
2396
			if (ret) {
2397
				pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
2398 2399 2400
					cft->name, ret);
				return ret;
			}
2401 2402
		} else {
			cgroup_rm_file(cgrp, cft);
T
Tejun Heo 已提交
2403
		}
2404
	}
2405
	return 0;
2406 2407
}

2408
static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
2409 2410
{
	LIST_HEAD(pending);
2411
	struct cgroup_subsys *ss = cfts[0].ss;
2412
	struct cgroup *root = &ss->root->cgrp;
2413
	struct cgroup_subsys_state *css;
2414
	int ret = 0;
2415

2416
	lockdep_assert_held(&cgroup_tree_mutex);
2417

2418
	/* don't bother if @ss isn't attached */
2419
	if (ss->root == &cgrp_dfl_root)
2420
		return 0;
2421 2422

	/* add/rm files for all cgroups created before */
2423
	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
2424 2425
		struct cgroup *cgrp = css->cgroup;

2426 2427 2428
		if (cgroup_is_dead(cgrp))
			continue;

2429
		ret = cgroup_addrm_files(cgrp, cfts, is_add);
2430 2431
		if (ret)
			break;
2432
	}
2433 2434 2435

	if (is_add && !ret)
		kernfs_activate(root->kn);
2436
	return ret;
2437 2438
}

2439
static void cgroup_exit_cftypes(struct cftype *cfts)
2440
{
2441
	struct cftype *cft;
2442

T
Tejun Heo 已提交
2443 2444 2445 2446 2447
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		/* free copy for custom atomic_write_len, see init_cftypes() */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
			kfree(cft->kf_ops);
		cft->kf_ops = NULL;
2448
		cft->ss = NULL;
T
Tejun Heo 已提交
2449
	}
2450 2451
}

T
Tejun Heo 已提交
2452
static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
2453 2454 2455
{
	struct cftype *cft;

T
Tejun Heo 已提交
2456 2457 2458
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		struct kernfs_ops *kf_ops;

T
Tejun Heo 已提交
2459 2460
		WARN_ON(cft->ss || cft->kf_ops);

T
Tejun Heo 已提交
2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
		if (cft->seq_start)
			kf_ops = &cgroup_kf_ops;
		else
			kf_ops = &cgroup_kf_single_ops;

		/*
		 * Ugh... if @cft wants a custom max_write_len, we need to
		 * make a copy of kf_ops to set its atomic_write_len.
		 */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
			if (!kf_ops) {
				cgroup_exit_cftypes(cfts);
				return -ENOMEM;
			}
			kf_ops->atomic_write_len = cft->max_write_len;
		}
2478

T
Tejun Heo 已提交
2479
		cft->kf_ops = kf_ops;
2480
		cft->ss = ss;
T
Tejun Heo 已提交
2481
	}
2482

T
Tejun Heo 已提交
2483
	return 0;
2484 2485
}

2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
static int cgroup_rm_cftypes_locked(struct cftype *cfts)
{
	lockdep_assert_held(&cgroup_tree_mutex);

	if (!cfts || !cfts[0].ss)
		return -ENOENT;

	list_del(&cfts->node);
	cgroup_apply_cftypes(cfts, false);
	cgroup_exit_cftypes(cfts);
	return 0;
2497 2498
}

2499 2500 2501 2502
/**
 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
2503 2504 2505
 * Unregister @cfts.  Files described by @cfts are removed from all
 * existing cgroups and all future cgroups won't have them either.  This
 * function can be called anytime whether @cfts' subsys is attached or not.
2506 2507
 *
 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
2508
 * registered.
2509
 */
2510
int cgroup_rm_cftypes(struct cftype *cfts)
2511
{
2512
	int ret;
2513

2514 2515 2516 2517
	mutex_lock(&cgroup_tree_mutex);
	ret = cgroup_rm_cftypes_locked(cfts);
	mutex_unlock(&cgroup_tree_mutex);
	return ret;
T
Tejun Heo 已提交
2518 2519
}

2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533
/**
 * cgroup_add_cftypes - add an array of cftypes to a subsystem
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Register @cfts to @ss.  Files described by @cfts are created for all
 * existing cgroups to which @ss is attached and all future cgroups will
 * have them too.  This function can be called anytime whether @ss is
 * attached or not.
 *
 * Returns 0 on successful registration, -errno on failure.  Note that this
 * function currently returns 0 as long as @cfts registration is successful
 * even if some file creation attempts on existing cgroups fail.
 */
A
Aristeu Rozanski 已提交
2534
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
2535
{
2536
	int ret;
2537

2538 2539
	if (!cfts || cfts[0].name[0] == '\0')
		return 0;
2540

T
Tejun Heo 已提交
2541 2542 2543
	ret = cgroup_init_cftypes(ss, cfts);
	if (ret)
		return ret;
2544

2545 2546
	mutex_lock(&cgroup_tree_mutex);

T
Tejun Heo 已提交
2547
	list_add_tail(&cfts->node, &ss->cfts);
2548
	ret = cgroup_apply_cftypes(cfts, true);
2549
	if (ret)
2550
		cgroup_rm_cftypes_locked(cfts);
2551

2552
	mutex_unlock(&cgroup_tree_mutex);
2553
	return ret;
2554 2555
}

L
Li Zefan 已提交
2556 2557 2558 2559 2560 2561
/**
 * cgroup_task_count - count the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 *
 * Return the number of tasks in the cgroup.
 */
2562
static int cgroup_task_count(const struct cgroup *cgrp)
2563 2564
{
	int count = 0;
2565
	struct cgrp_cset_link *link;
2566

2567
	down_read(&css_set_rwsem);
2568 2569
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
2570
	up_read(&css_set_rwsem);
2571 2572 2573
	return count;
}

2574
/**
2575 2576 2577
 * css_next_child - find the next child of a given css
 * @pos_css: the current position (%NULL to initiate traversal)
 * @parent_css: css whose children to walk
2578
 *
2579
 * This function returns the next child of @parent_css and should be called
2580 2581 2582
 * under either cgroup_mutex or RCU read lock.  The only requirement is
 * that @parent_css and @pos_css are accessible.  The next sibling is
 * guaranteed to be returned regardless of their states.
2583
 */
2584 2585 2586
struct cgroup_subsys_state *
css_next_child(struct cgroup_subsys_state *pos_css,
	       struct cgroup_subsys_state *parent_css)
2587
{
2588 2589
	struct cgroup *pos = pos_css ? pos_css->cgroup : NULL;
	struct cgroup *cgrp = parent_css->cgroup;
2590 2591
	struct cgroup *next;

T
Tejun Heo 已提交
2592
	cgroup_assert_mutexes_or_rcu_locked();
2593 2594 2595 2596

	/*
	 * @pos could already have been removed.  Once a cgroup is removed,
	 * its ->sibling.next is no longer updated when its next sibling
2597 2598 2599 2600 2601 2602 2603
	 * changes.  As CGRP_DEAD assertion is serialized and happens
	 * before the cgroup is taken off the ->sibling list, if we see it
	 * unasserted, it's guaranteed that the next sibling hasn't
	 * finished its grace period even if it's already removed, and thus
	 * safe to dereference from this RCU critical section.  If
	 * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
	 * to be visible as %true here.
2604 2605 2606 2607 2608 2609 2610 2611
	 *
	 * If @pos is dead, its next pointer can't be dereferenced;
	 * however, as each cgroup is given a monotonically increasing
	 * unique serial number and always appended to the sibling list,
	 * the next one can be found by walking the parent's children until
	 * we see a cgroup with higher serial number than @pos's.  While
	 * this path can be slower, it's taken only when either the current
	 * cgroup is removed or iteration and removal race.
2612
	 */
2613 2614 2615
	if (!pos) {
		next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling);
	} else if (likely(!cgroup_is_dead(pos))) {
2616
		next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
2617 2618 2619 2620
	} else {
		list_for_each_entry_rcu(next, &cgrp->children, sibling)
			if (next->serial_nr > pos->serial_nr)
				break;
2621 2622
	}

2623 2624 2625
	if (&next->sibling == &cgrp->children)
		return NULL;

2626
	return cgroup_css(next, parent_css->ss);
2627 2628
}

2629
/**
2630
 * css_next_descendant_pre - find the next descendant for pre-order walk
2631
 * @pos: the current position (%NULL to initiate traversal)
2632
 * @root: css whose descendants to walk
2633
 *
2634
 * To be used by css_for_each_descendant_pre().  Find the next descendant
2635 2636
 * to visit for pre-order traversal of @root's descendants.  @root is
 * included in the iteration and the first node to be visited.
2637
 *
2638 2639 2640 2641
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @root are accessible and @pos is a descendant of @root.
2642
 */
2643 2644 2645
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
			struct cgroup_subsys_state *root)
2646
{
2647
	struct cgroup_subsys_state *next;
2648

T
Tejun Heo 已提交
2649
	cgroup_assert_mutexes_or_rcu_locked();
2650

2651
	/* if first iteration, visit @root */
2652
	if (!pos)
2653
		return root;
2654 2655

	/* visit the first child if exists */
2656
	next = css_next_child(NULL, pos);
2657 2658 2659 2660
	if (next)
		return next;

	/* no child, visit my or the closest ancestor's next sibling */
2661 2662
	while (pos != root) {
		next = css_next_child(pos, css_parent(pos));
2663
		if (next)
2664
			return next;
2665
		pos = css_parent(pos);
2666
	}
2667 2668 2669 2670

	return NULL;
}

2671
/**
2672 2673
 * css_rightmost_descendant - return the rightmost descendant of a css
 * @pos: css of interest
2674
 *
2675 2676
 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
 * is returned.  This can be used during pre-order traversal to skip
2677
 * subtree of @pos.
2678
 *
2679 2680 2681 2682
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct rightmost descendant as
 * long as @pos is accessible.
2683
 */
2684 2685
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
2686
{
2687
	struct cgroup_subsys_state *last, *tmp;
2688

T
Tejun Heo 已提交
2689
	cgroup_assert_mutexes_or_rcu_locked();
2690 2691 2692 2693 2694

	do {
		last = pos;
		/* ->prev isn't RCU safe, walk ->next till the end */
		pos = NULL;
2695
		css_for_each_child(tmp, last)
2696 2697 2698 2699 2700 2701
			pos = tmp;
	} while (pos);

	return last;
}

2702 2703
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
2704
{
2705
	struct cgroup_subsys_state *last;
2706 2707 2708

	do {
		last = pos;
2709
		pos = css_next_child(NULL, pos);
2710 2711 2712 2713 2714 2715
	} while (pos);

	return last;
}

/**
2716
 * css_next_descendant_post - find the next descendant for post-order walk
2717
 * @pos: the current position (%NULL to initiate traversal)
2718
 * @root: css whose descendants to walk
2719
 *
2720
 * To be used by css_for_each_descendant_post().  Find the next descendant
2721 2722
 * to visit for post-order traversal of @root's descendants.  @root is
 * included in the iteration and the last node to be visited.
2723
 *
2724 2725 2726 2727 2728
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @cgroup are accessible and @pos is a descendant of
 * @cgroup.
2729
 */
2730 2731 2732
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
			 struct cgroup_subsys_state *root)
2733
{
2734
	struct cgroup_subsys_state *next;
2735

T
Tejun Heo 已提交
2736
	cgroup_assert_mutexes_or_rcu_locked();
2737

2738 2739 2740
	/* if first iteration, visit leftmost descendant which may be @root */
	if (!pos)
		return css_leftmost_descendant(root);
2741

2742 2743 2744 2745
	/* if we visited @root, we're done */
	if (pos == root)
		return NULL;

2746
	/* if there's an unvisited sibling, visit its leftmost descendant */
2747
	next = css_next_child(pos, css_parent(pos));
2748
	if (next)
2749
		return css_leftmost_descendant(next);
2750 2751

	/* no sibling left, visit parent */
2752
	return css_parent(pos);
2753 2754
}

2755
/**
2756
 * css_advance_task_iter - advance a task itererator to the next css_set
2757 2758 2759
 * @it: the iterator to advance
 *
 * Advance @it to the next css_set to walk.
2760
 */
2761
static void css_advance_task_iter(struct css_task_iter *it)
2762 2763 2764 2765 2766 2767 2768 2769
{
	struct list_head *l = it->cset_link;
	struct cgrp_cset_link *link;
	struct css_set *cset;

	/* Advance to the next non-empty css_set */
	do {
		l = l->next;
2770
		if (l == &it->origin_css->cgroup->cset_links) {
2771 2772 2773 2774 2775
			it->cset_link = NULL;
			return;
		}
		link = list_entry(l, struct cgrp_cset_link, cset_link);
		cset = link->cset;
T
Tejun Heo 已提交
2776 2777
	} while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));

2778
	it->cset_link = l;
T
Tejun Heo 已提交
2779 2780 2781 2782 2783

	if (!list_empty(&cset->tasks))
		it->task = cset->tasks.next;
	else
		it->task = cset->mg_tasks.next;
2784 2785
}

2786
/**
2787 2788
 * css_task_iter_start - initiate task iteration
 * @css: the css to walk tasks of
2789 2790
 * @it: the task iterator to use
 *
2791 2792 2793 2794
 * Initiate iteration through the tasks of @css.  The caller can call
 * css_task_iter_next() to walk through the tasks until the function
 * returns NULL.  On completion of iteration, css_task_iter_end() must be
 * called.
2795 2796 2797 2798 2799
 *
 * Note that this function acquires a lock which is released when the
 * iteration finishes.  The caller can't sleep while iteration is in
 * progress.
 */
2800 2801
void css_task_iter_start(struct cgroup_subsys_state *css,
			 struct css_task_iter *it)
2802
	__acquires(css_set_rwsem)
2803
{
2804 2805
	/* no one should try to iterate before mounting cgroups */
	WARN_ON_ONCE(!use_task_css_set_links);
2806

2807
	down_read(&css_set_rwsem);
2808

2809 2810
	it->origin_css = css;
	it->cset_link = &css->cgroup->cset_links;
2811

2812
	css_advance_task_iter(it);
2813 2814
}

2815
/**
2816
 * css_task_iter_next - return the next task for the iterator
2817 2818 2819
 * @it: the task iterator being iterated
 *
 * The "next" function for task iteration.  @it should have been
2820 2821
 * initialized via css_task_iter_start().  Returns NULL when the iteration
 * reaches the end.
2822
 */
2823
struct task_struct *css_task_iter_next(struct css_task_iter *it)
2824 2825 2826
{
	struct task_struct *res;
	struct list_head *l = it->task;
T
Tejun Heo 已提交
2827 2828
	struct cgrp_cset_link *link = list_entry(it->cset_link,
					struct cgrp_cset_link, cset_link);
2829 2830

	/* If the iterator cg is NULL, we have no tasks */
2831
	if (!it->cset_link)
2832 2833
		return NULL;
	res = list_entry(l, struct task_struct, cg_list);
T
Tejun Heo 已提交
2834 2835 2836 2837 2838 2839

	/*
	 * Advance iterator to find next entry.  cset->tasks is consumed
	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
	 * next cset.
	 */
2840
	l = l->next;
T
Tejun Heo 已提交
2841 2842 2843 2844 2845

	if (l == &link->cset->tasks)
		l = link->cset->mg_tasks.next;

	if (l == &link->cset->mg_tasks)
2846
		css_advance_task_iter(it);
T
Tejun Heo 已提交
2847
	else
2848
		it->task = l;
T
Tejun Heo 已提交
2849

2850 2851 2852
	return res;
}

2853
/**
2854
 * css_task_iter_end - finish task iteration
2855 2856
 * @it: the task iterator to finish
 *
2857
 * Finish task iteration started by css_task_iter_start().
2858
 */
2859
void css_task_iter_end(struct css_task_iter *it)
2860
	__releases(css_set_rwsem)
2861
{
2862
	up_read(&css_set_rwsem);
2863 2864 2865
}

/**
2866 2867 2868
 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
2869
 *
2870 2871 2872 2873 2874
 * Locking rules between cgroup_post_fork() and the migration path
 * guarantee that, if a task is forking while being migrated, the new child
 * is guaranteed to be either visible in the source cgroup after the
 * parent's migration is complete or put into the target cgroup.  No task
 * can slip out of migration through forking.
2875
 */
2876
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
2877
{
2878 2879
	LIST_HEAD(preloaded_csets);
	struct cgrp_cset_link *link;
2880
	struct css_task_iter it;
2881
	struct task_struct *task;
2882
	int ret;
2883

2884
	mutex_lock(&cgroup_mutex);
2885

2886 2887 2888 2889 2890
	/* all tasks in @from are being moved, all csets are source */
	down_read(&css_set_rwsem);
	list_for_each_entry(link, &from->cset_links, cset_link)
		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
	up_read(&css_set_rwsem);
2891

2892 2893 2894
	ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
	if (ret)
		goto out_err;
2895

2896 2897 2898 2899
	/*
	 * Migrate tasks one-by-one until @form is empty.  This fails iff
	 * ->can_attach() fails.
	 */
2900 2901 2902 2903 2904 2905 2906 2907
	do {
		css_task_iter_start(&from->dummy_css, &it);
		task = css_task_iter_next(&it);
		if (task)
			get_task_struct(task);
		css_task_iter_end(&it);

		if (task) {
2908
			ret = cgroup_migrate(to, task, false);
2909 2910 2911
			put_task_struct(task);
		}
	} while (task && !ret);
2912 2913
out_err:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
2914
	mutex_unlock(&cgroup_mutex);
2915
	return ret;
2916 2917
}

2918
/*
2919
 * Stuff for reading the 'tasks'/'procs' files.
2920 2921 2922 2923 2924 2925 2926 2927
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
/* which pidlist file are we talking about? */
enum cgroup_filetype {
	CGROUP_FILE_PROCS,
	CGROUP_FILE_TASKS,
};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {
	/*
	 * used to find which pidlist is wanted. doesn't change as long as
	 * this particular list stays in the list.
	*/
	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
	/* array of xids */
	pid_t *list;
	/* how many elements the above list has */
	int length;
	/* each of these stored in a list by its cgroup */
	struct list_head links;
	/* pointer to the cgroup we belong to, for list removal purposes */
	struct cgroup *owner;
2954 2955
	/* for delayed destruction */
	struct delayed_work destroy_dwork;
2956 2957
};

2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970
/*
 * The following two functions "fix" the issue where there are more pids
 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 * TODO: replace with a kernel-wide solution to this problem
 */
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(count * sizeof(pid_t));
	else
		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
2971

2972 2973 2974 2975 2976 2977 2978 2979
static void pidlist_free(void *p)
{
	if (is_vmalloc_addr(p))
		vfree(p);
	else
		kfree(p);
}

2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
/*
 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 * should be left afterwards.
 */
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
{
	struct cgroup_pidlist *l, *tmp_l;

	mutex_lock(&cgrp->pidlist_mutex);
	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
	mutex_unlock(&cgrp->pidlist_mutex);

	flush_workqueue(cgroup_pidlist_destroy_wq);
	BUG_ON(!list_empty(&cgrp->pidlists));
}

static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
						destroy_dwork);
	struct cgroup_pidlist *tofree = NULL;

	mutex_lock(&l->owner->pidlist_mutex);

	/*
3007 3008
	 * Destroy iff we didn't get queued again.  The state won't change
	 * as destroy_dwork can only be queued while locked.
3009
	 */
3010
	if (!delayed_work_pending(dwork)) {
3011 3012 3013 3014 3015 3016 3017 3018 3019 3020
		list_del(&l->links);
		pidlist_free(l->list);
		put_pid_ns(l->key.ns);
		tofree = l;
	}

	mutex_unlock(&l->owner->pidlist_mutex);
	kfree(tofree);
}

3021
/*
3022
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3023
 * Returns the number of unique elements.
3024
 */
3025
static int pidlist_uniq(pid_t *list, int length)
3026
{
3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050
	int src, dest = 1;

	/*
	 * we presume the 0th element is unique, so i starts at 1. trivial
	 * edge cases first; no work needs to be done for either
	 */
	if (length == 0 || length == 1)
		return length;
	/* src and dest walk down the list; dest counts unique elements */
	for (src = 1; src < length; src++) {
		/* find next unique element */
		while (list[src] == list[src-1]) {
			src++;
			if (src == length)
				goto after;
		}
		/* dest always points to where the next unique element goes */
		list[dest] = list[src];
		dest++;
	}
after:
	return dest;
}

3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083
/*
 * The two pid files - task and cgroup.procs - guaranteed that the result
 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 * different per namespace, each namespace needs differently sorted list,
 * making it impossible to use, for example, single rbtree of member tasks
 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 * per open file is dangerous, so cgroup had to implement shared pool of
 * pidlists keyed by cgroup and namespace.
 *
 * All this extra complexity was caused by the original implementation
 * committing to an entirely unnecessary property.  In the long term, we
 * want to do away with it.  Explicitly scramble sort order if
 * sane_behavior so that no such expectation exists in the new interface.
 *
 * Scrambling is done by swapping every two consecutive bits, which is
 * non-identity one-to-one mapping which disturbs sort order sufficiently.
 */
static pid_t pid_fry(pid_t pid)
{
	unsigned a = pid & 0x55555555;
	unsigned b = pid & 0xAAAAAAAA;

	return (a << 1) | (b >> 1);
}

static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
{
	if (cgroup_sane_behavior(cgrp))
		return pid_fry(pid);
	else
		return pid;
}

3084 3085 3086 3087 3088
static int cmppid(const void *a, const void *b)
{
	return *(pid_t *)a - *(pid_t *)b;
}

3089 3090 3091 3092 3093
static int fried_cmppid(const void *a, const void *b)
{
	return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
}

T
Tejun Heo 已提交
3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{
	struct cgroup_pidlist *l;
	/* don't need task_nsproxy() if we're looking at ourself */
	struct pid_namespace *ns = task_active_pid_ns(current);

	lockdep_assert_held(&cgrp->pidlist_mutex);

	list_for_each_entry(l, &cgrp->pidlists, links)
		if (l->key.type == type && l->key.ns == ns)
			return l;
	return NULL;
}

3109 3110 3111 3112 3113 3114
/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
T
Tejun Heo 已提交
3115 3116
static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
						enum cgroup_filetype type)
3117 3118
{
	struct cgroup_pidlist *l;
3119

T
Tejun Heo 已提交
3120 3121 3122 3123 3124 3125
	lockdep_assert_held(&cgrp->pidlist_mutex);

	l = cgroup_pidlist_find(cgrp, type);
	if (l)
		return l;

3126
	/* entry not found; create a new one */
3127
	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
T
Tejun Heo 已提交
3128
	if (!l)
3129
		return l;
T
Tejun Heo 已提交
3130

3131
	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
3132
	l->key.type = type;
T
Tejun Heo 已提交
3133 3134
	/* don't need task_nsproxy() if we're looking at ourself */
	l->key.ns = get_pid_ns(task_active_pid_ns(current));
3135 3136 3137 3138 3139
	l->owner = cgrp;
	list_add(&l->links, &cgrp->pidlists);
	return l;
}

3140 3141 3142
/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
3143 3144
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
3145 3146 3147 3148
{
	pid_t *array;
	int length;
	int pid, n = 0; /* used for populating the array */
3149
	struct css_task_iter it;
3150
	struct task_struct *tsk;
3151 3152
	struct cgroup_pidlist *l;

3153 3154
	lockdep_assert_held(&cgrp->pidlist_mutex);

3155 3156 3157 3158 3159 3160 3161
	/*
	 * If cgroup gets more users after we read count, we won't have
	 * enough space - tough.  This race is indistinguishable to the
	 * caller from the case that the additional cgroup users didn't
	 * show up until sometime later on.
	 */
	length = cgroup_task_count(cgrp);
3162
	array = pidlist_allocate(length);
3163 3164 3165
	if (!array)
		return -ENOMEM;
	/* now, populate the array */
3166 3167
	css_task_iter_start(&cgrp->dummy_css, &it);
	while ((tsk = css_task_iter_next(&it))) {
3168
		if (unlikely(n == length))
3169
			break;
3170
		/* get tgid or pid for procs or tasks file respectively */
3171 3172 3173 3174
		if (type == CGROUP_FILE_PROCS)
			pid = task_tgid_vnr(tsk);
		else
			pid = task_pid_vnr(tsk);
3175 3176
		if (pid > 0) /* make sure to only use valid results */
			array[n++] = pid;
3177
	}
3178
	css_task_iter_end(&it);
3179 3180
	length = n;
	/* now sort & (if procs) strip out duplicates */
3181 3182 3183 3184
	if (cgroup_sane_behavior(cgrp))
		sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
	else
		sort(array, length, sizeof(pid_t), cmppid, NULL);
3185
	if (type == CGROUP_FILE_PROCS)
3186
		length = pidlist_uniq(array, length);
T
Tejun Heo 已提交
3187 3188

	l = cgroup_pidlist_find_create(cgrp, type);
3189
	if (!l) {
T
Tejun Heo 已提交
3190
		mutex_unlock(&cgrp->pidlist_mutex);
3191
		pidlist_free(array);
3192
		return -ENOMEM;
3193
	}
T
Tejun Heo 已提交
3194 3195

	/* store array, freeing old if necessary */
3196
	pidlist_free(l->list);
3197 3198
	l->list = array;
	l->length = length;
3199
	*lp = l;
3200
	return 0;
3201 3202
}

B
Balbir Singh 已提交
3203
/**
L
Li Zefan 已提交
3204
 * cgroupstats_build - build and fill cgroupstats
B
Balbir Singh 已提交
3205 3206 3207
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
L
Li Zefan 已提交
3208 3209 3210
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
B
Balbir Singh 已提交
3211 3212 3213
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
T
Tejun Heo 已提交
3214
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
3215
	struct cgroup *cgrp;
3216
	struct css_task_iter it;
B
Balbir Singh 已提交
3217
	struct task_struct *tsk;
3218

T
Tejun Heo 已提交
3219 3220 3221 3222 3223
	/* it should be kernfs_node belonging to cgroupfs and is a directory */
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
		return -EINVAL;

3224 3225
	mutex_lock(&cgroup_mutex);

B
Balbir Singh 已提交
3226
	/*
T
Tejun Heo 已提交
3227 3228 3229
	 * We aren't being called from kernfs and there's no guarantee on
	 * @kn->priv's validity.  For this and css_tryget_from_dir(),
	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
B
Balbir Singh 已提交
3230
	 */
T
Tejun Heo 已提交
3231 3232
	rcu_read_lock();
	cgrp = rcu_dereference(kn->priv);
3233
	if (!cgrp || cgroup_is_dead(cgrp)) {
T
Tejun Heo 已提交
3234
		rcu_read_unlock();
3235
		mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
3236 3237
		return -ENOENT;
	}
3238
	rcu_read_unlock();
B
Balbir Singh 已提交
3239

3240 3241
	css_task_iter_start(&cgrp->dummy_css, &it);
	while ((tsk = css_task_iter_next(&it))) {
B
Balbir Singh 已提交
3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260
		switch (tsk->state) {
		case TASK_RUNNING:
			stats->nr_running++;
			break;
		case TASK_INTERRUPTIBLE:
			stats->nr_sleeping++;
			break;
		case TASK_UNINTERRUPTIBLE:
			stats->nr_uninterruptible++;
			break;
		case TASK_STOPPED:
			stats->nr_stopped++;
			break;
		default:
			if (delayacct_is_task_waiting_on_io(tsk))
				stats->nr_io_wait++;
			break;
		}
	}
3261
	css_task_iter_end(&it);
B
Balbir Singh 已提交
3262

3263
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
3264
	return 0;
B
Balbir Singh 已提交
3265 3266
}

3267

3268
/*
3269
 * seq_file methods for the tasks/procs files. The seq_file position is the
3270
 * next pid to display; the seq_file iterator is a pointer to the pid
3271
 * in the cgroup->l->list array.
3272
 */
3273

3274
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
3275
{
3276 3277 3278 3279 3280 3281
	/*
	 * Initially we receive a position value that corresponds to
	 * one more than the last pid shown (or 0 on the first call or
	 * after a seek to the start). Use a binary-search to find the
	 * next pid to display, if any
	 */
T
Tejun Heo 已提交
3282
	struct kernfs_open_file *of = s->private;
3283
	struct cgroup *cgrp = seq_css(s)->cgroup;
3284
	struct cgroup_pidlist *l;
3285
	enum cgroup_filetype type = seq_cft(s)->private;
3286
	int index = 0, pid = *pos;
3287 3288 3289 3290 3291
	int *iter, ret;

	mutex_lock(&cgrp->pidlist_mutex);

	/*
3292
	 * !NULL @of->priv indicates that this isn't the first start()
3293
	 * after open.  If the matching pidlist is around, we can use that.
3294
	 * Look for it.  Note that @of->priv can't be used directly.  It
3295 3296
	 * could already have been destroyed.
	 */
3297 3298
	if (of->priv)
		of->priv = cgroup_pidlist_find(cgrp, type);
3299 3300 3301 3302 3303

	/*
	 * Either this is the first start() after open or the matching
	 * pidlist has been destroyed inbetween.  Create a new one.
	 */
3304 3305 3306
	if (!of->priv) {
		ret = pidlist_array_load(cgrp, type,
					 (struct cgroup_pidlist **)&of->priv);
3307 3308 3309
		if (ret)
			return ERR_PTR(ret);
	}
3310
	l = of->priv;
3311 3312

	if (pid) {
3313
		int end = l->length;
S
Stephen Rothwell 已提交
3314

3315 3316
		while (index < end) {
			int mid = (index + end) / 2;
3317
			if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
3318 3319
				index = mid;
				break;
3320
			} else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
3321 3322 3323 3324 3325 3326
				index = mid + 1;
			else
				end = mid;
		}
	}
	/* If we're off the end of the array, we're done */
3327
	if (index >= l->length)
3328 3329
		return NULL;
	/* Update the abstract position to be the actual pid that we found */
3330
	iter = l->list + index;
3331
	*pos = cgroup_pid_fry(cgrp, *iter);
3332 3333 3334
	return iter;
}

3335
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
3336
{
T
Tejun Heo 已提交
3337
	struct kernfs_open_file *of = s->private;
3338
	struct cgroup_pidlist *l = of->priv;
3339

3340 3341
	if (l)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
3342
				 CGROUP_PIDLIST_DESTROY_DELAY);
3343
	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
3344 3345
}

3346
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
3347
{
T
Tejun Heo 已提交
3348
	struct kernfs_open_file *of = s->private;
3349
	struct cgroup_pidlist *l = of->priv;
3350 3351
	pid_t *p = v;
	pid_t *end = l->list + l->length;
3352 3353 3354 3355 3356 3357 3358 3359
	/*
	 * Advance to the next pid in the array. If this goes off the
	 * end, we're done
	 */
	p++;
	if (p >= end) {
		return NULL;
	} else {
3360
		*pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
3361 3362 3363 3364
		return p;
	}
}

3365
static int cgroup_pidlist_show(struct seq_file *s, void *v)
3366 3367 3368
{
	return seq_printf(s, "%d\n", *(int *)v);
}
3369

3370 3371 3372 3373 3374 3375 3376 3377 3378
/*
 * seq_operations functions for iterating on pidlists through seq_file -
 * independent of whether it's tasks or procs
 */
static const struct seq_operations cgroup_pidlist_seq_operations = {
	.start = cgroup_pidlist_start,
	.stop = cgroup_pidlist_stop,
	.next = cgroup_pidlist_next,
	.show = cgroup_pidlist_show,
3379 3380
};

3381 3382
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
3383
{
3384
	return notify_on_release(css->cgroup);
3385 3386
}

3387 3388
static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
3389
{
3390
	clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
3391
	if (val)
3392
		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
3393
	else
3394
		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
3395 3396 3397
	return 0;
}

3398 3399
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
3400
{
3401
	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
3402 3403
}

3404 3405
static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
3406 3407
{
	if (val)
3408
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
3409
	else
3410
		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
3411 3412 3413
	return 0;
}

3414
static struct cftype cgroup_base_files[] = {
3415
	{
3416
		.name = "cgroup.procs",
3417 3418 3419 3420
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
3421
		.private = CGROUP_FILE_PROCS,
B
Ben Blum 已提交
3422 3423
		.write_u64 = cgroup_procs_write,
		.mode = S_IRUGO | S_IWUSR,
3424
	},
3425 3426
	{
		.name = "cgroup.clone_children",
3427
		.flags = CFTYPE_INSANE,
3428 3429 3430
		.read_u64 = cgroup_clone_children_read,
		.write_u64 = cgroup_clone_children_write,
	},
3431 3432 3433
	{
		.name = "cgroup.sane_behavior",
		.flags = CFTYPE_ONLY_ON_ROOT,
3434
		.seq_show = cgroup_sane_behavior_show,
3435
	},
3436 3437 3438 3439 3440 3441 3442 3443 3444

	/*
	 * Historical crazy stuff.  These don't have "cgroup."  prefix and
	 * don't exist if sane_behavior.  If you're depending on these, be
	 * prepared to be burned.
	 */
	{
		.name = "tasks",
		.flags = CFTYPE_INSANE,		/* use "procs" instead */
3445 3446 3447 3448
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
3449
		.private = CGROUP_FILE_TASKS,
3450 3451 3452 3453 3454 3455 3456 3457 3458
		.write_u64 = cgroup_tasks_write,
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "notify_on_release",
		.flags = CFTYPE_INSANE,
		.read_u64 = cgroup_read_notify_on_release,
		.write_u64 = cgroup_write_notify_on_release,
	},
3459 3460
	{
		.name = "release_agent",
3461
		.flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
3462
		.seq_show = cgroup_release_agent_show,
3463
		.write_string = cgroup_release_agent_write,
3464
		.max_write_len = PATH_MAX - 1,
3465
	},
T
Tejun Heo 已提交
3466
	{ }	/* terminate */
3467 3468
};

3469
/**
3470
 * cgroup_populate_dir - create subsys files in a cgroup directory
3471 3472
 * @cgrp: target cgroup
 * @subsys_mask: mask of the subsystem ids whose files should be added
3473 3474
 *
 * On failure, no file is added.
3475
 */
3476
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
3477 3478
{
	struct cgroup_subsys *ss;
3479
	int i, ret = 0;
3480

3481
	/* process cftsets of each subsystem */
3482
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
3483
		struct cftype *cfts;
3484 3485

		if (!test_bit(i, &subsys_mask))
3486
			continue;
3487

T
Tejun Heo 已提交
3488 3489
		list_for_each_entry(cfts, &ss->cfts, node) {
			ret = cgroup_addrm_files(cgrp, cfts, true);
3490 3491 3492
			if (ret < 0)
				goto err;
		}
3493 3494
	}
	return 0;
3495 3496 3497
err:
	cgroup_clear_dir(cgrp, subsys_mask);
	return ret;
3498 3499
}

3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521
/*
 * css destruction is four-stage process.
 *
 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
 *    Implemented in kill_css().
 *
 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
 *    and thus css_tryget() is guaranteed to fail, the css can be offlined
 *    by invoking offline_css().  After offlining, the base ref is put.
 *    Implemented in css_killed_work_fn().
 *
 * 3. When the percpu_ref reaches zero, the only possible remaining
 *    accessors are inside RCU read sections.  css_release() schedules the
 *    RCU callback.
 *
 * 4. After the grace period, the css can be freed.  Implemented in
 *    css_free_work_fn().
 *
 * It is actually hairier because both step 2 and 4 require process context
 * and thus involve punting to css->destroy_work adding two additional
 * steps to the already complex sequence.
 */
3522
static void css_free_work_fn(struct work_struct *work)
3523 3524
{
	struct cgroup_subsys_state *css =
3525
		container_of(work, struct cgroup_subsys_state, destroy_work);
3526
	struct cgroup *cgrp = css->cgroup;
3527

3528 3529 3530
	if (css->parent)
		css_put(css->parent);

3531
	css->ss->css_free(css);
T
Tejun Heo 已提交
3532
	cgroup_put(cgrp);
3533 3534
}

3535
static void css_free_rcu_fn(struct rcu_head *rcu_head)
3536 3537
{
	struct cgroup_subsys_state *css =
3538
		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
3539

3540
	INIT_WORK(&css->destroy_work, css_free_work_fn);
3541
	queue_work(cgroup_destroy_wq, &css->destroy_work);
3542 3543
}

3544 3545 3546 3547 3548
static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

3549
	RCU_INIT_POINTER(css->cgroup->subsys[css->ss->id], NULL);
3550
	call_rcu(&css->rcu_head, css_free_rcu_fn);
3551 3552
}

3553 3554
static void init_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss,
		     struct cgroup *cgrp)
3555
{
3556
	css->cgroup = cgrp;
3557
	css->ss = ss;
3558
	css->flags = 0;
3559 3560

	if (cgrp->parent)
3561
		css->parent = cgroup_css(cgrp->parent, ss);
3562
	else
3563
		css->flags |= CSS_ROOT;
3564

3565
	BUG_ON(cgroup_css(cgrp, ss));
3566 3567
}

3568
/* invoke ->css_online() on a new CSS and mark it online if successful */
3569
static int online_css(struct cgroup_subsys_state *css)
3570
{
3571
	struct cgroup_subsys *ss = css->ss;
T
Tejun Heo 已提交
3572 3573
	int ret = 0;

T
Tejun Heo 已提交
3574
	lockdep_assert_held(&cgroup_tree_mutex);
3575 3576
	lockdep_assert_held(&cgroup_mutex);

3577
	if (ss->css_online)
3578
		ret = ss->css_online(css);
3579
	if (!ret) {
3580
		css->flags |= CSS_ONLINE;
3581
		css->cgroup->nr_css++;
3582
		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
3583
	}
T
Tejun Heo 已提交
3584
	return ret;
3585 3586
}

3587
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
3588
static void offline_css(struct cgroup_subsys_state *css)
3589
{
3590
	struct cgroup_subsys *ss = css->ss;
3591

T
Tejun Heo 已提交
3592
	lockdep_assert_held(&cgroup_tree_mutex);
3593 3594 3595 3596 3597
	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

3598
	if (ss->css_offline)
3599
		ss->css_offline(css);
3600

3601
	css->flags &= ~CSS_ONLINE;
3602
	css->cgroup->nr_css--;
3603
	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], css);
3604 3605
}

3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628
/**
 * create_css - create a cgroup_subsys_state
 * @cgrp: the cgroup new css will be associated with
 * @ss: the subsys of new css
 *
 * Create a new css associated with @cgrp - @ss pair.  On success, the new
 * css is online and installed in @cgrp with all interface files created.
 * Returns 0 on success, -errno on failure.
 */
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
{
	struct cgroup *parent = cgrp->parent;
	struct cgroup_subsys_state *css;
	int err;

	lockdep_assert_held(&cgroup_mutex);

	css = ss->css_alloc(cgroup_css(parent, ss));
	if (IS_ERR(css))
		return PTR_ERR(css);

	err = percpu_ref_init(&css->refcnt, css_release);
	if (err)
3629
		goto err_free_css;
3630 3631 3632

	init_css(css, ss, cgrp);

3633
	err = cgroup_populate_dir(cgrp, 1 << ss->id);
3634
	if (err)
3635
		goto err_free_percpu_ref;
3636 3637 3638

	err = online_css(css);
	if (err)
3639
		goto err_clear_dir;
3640

3641
	cgroup_get(cgrp);
3642 3643
	css_get(css->parent);

3644 3645
	cgrp->subsys_mask |= 1 << ss->id;

3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656
	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
	    parent->parent) {
		pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
			   current->comm, current->pid, ss->name);
		if (!strcmp(ss->name, "memory"))
			pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
		ss->warned_broken_hierarchy = true;
	}

	return 0;

3657
err_clear_dir:
3658
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
3659
err_free_percpu_ref:
3660
	percpu_ref_cancel_init(&css->refcnt);
3661
err_free_css:
3662 3663 3664 3665
	ss->css_free(css);
	return err;
}

T
Tejun Heo 已提交
3666
/**
L
Li Zefan 已提交
3667 3668
 * cgroup_create - create a cgroup
 * @parent: cgroup that will be parent of the new cgroup
T
Tejun Heo 已提交
3669
 * @name: name of the new cgroup
T
Tejun Heo 已提交
3670
 * @mode: mode to set on new cgroup
3671
 */
T
Tejun Heo 已提交
3672
static long cgroup_create(struct cgroup *parent, const char *name,
T
Tejun Heo 已提交
3673
			  umode_t mode)
3674
{
3675
	struct cgroup *cgrp;
3676
	struct cgroup_root *root = parent->root;
3677
	int ssid, err;
3678
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
3679
	struct kernfs_node *kn;
3680

T
Tejun Heo 已提交
3681 3682 3683 3684 3685 3686
	/*
	 * XXX: The default hierarchy isn't fully implemented yet.  Block
	 * !root cgroup creation on it for now.
	 */
	if (root == &cgrp_dfl_root)
		return -EINVAL;
3687

T
Tejun Heo 已提交
3688
	/* allocate the cgroup and its ID, 0 is reserved for the root */
3689 3690
	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
	if (!cgrp)
3691 3692
		return -ENOMEM;

T
Tejun Heo 已提交
3693
	mutex_lock(&cgroup_tree_mutex);
3694

3695 3696 3697 3698 3699 3700 3701 3702 3703
	/*
	 * Only live parents can have children.  Note that the liveliness
	 * check isn't strictly necessary because cgroup_mkdir() and
	 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
	 * anyway so that locking is contained inside cgroup proper and we
	 * don't get nasty surprises if we ever grow another caller.
	 */
	if (!cgroup_lock_live_group(parent)) {
		err = -ENODEV;
T
Tejun Heo 已提交
3704
		goto err_unlock_tree;
3705 3706 3707 3708 3709 3710 3711 3712 3713 3714
	}

	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
	cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
	if (cgrp->id < 0) {
		err = -ENOMEM;
		goto err_unlock;
3715 3716
	}

3717
	init_cgroup_housekeeping(cgrp);
3718

3719
	cgrp->parent = parent;
3720
	cgrp->dummy_css.parent = &parent->dummy_css;
3721
	cgrp->root = parent->root;
3722

3723 3724 3725
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

3726 3727
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
3728

T
Tejun Heo 已提交
3729
	/* create the directory */
T
Tejun Heo 已提交
3730
	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
T
Tejun Heo 已提交
3731 3732
	if (IS_ERR(kn)) {
		err = PTR_ERR(kn);
3733
		goto err_free_id;
T
Tejun Heo 已提交
3734 3735
	}
	cgrp->kn = kn;
3736

3737
	/*
3738 3739
	 * This extra ref will be put in cgroup_free_fn() and guarantees
	 * that @cgrp->kn is always accessible.
3740
	 */
3741
	kernfs_get(kn);
3742

3743
	cgrp->serial_nr = cgroup_serial_nr_next++;
3744

3745 3746
	/* allocation complete, commit to creation */
	list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
3747
	atomic_inc(&root->nr_cgrps);
3748
	cgroup_get(parent);
3749

3750 3751 3752 3753
	/*
	 * @cgrp is now fully operational.  If something fails after this
	 * point, it'll be released via the normal destruction path.
	 */
3754 3755
	idr_replace(&root->cgroup_idr, cgrp, cgrp->id);

3756
	err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
3757 3758 3759
	if (err)
		goto err_destroy;

3760
	/* let's create and online css's */
T
Tejun Heo 已提交
3761
	for_each_subsys(ss, ssid) {
3762
		if (root->cgrp.subsys_mask & (1 << ssid)) {
T
Tejun Heo 已提交
3763 3764 3765 3766
			err = create_css(cgrp, ss);
			if (err)
				goto err_destroy;
		}
3767
	}
3768

T
Tejun Heo 已提交
3769 3770
	kernfs_activate(kn);

3771
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
3772
	mutex_unlock(&cgroup_tree_mutex);
3773 3774 3775

	return 0;

T
Tejun Heo 已提交
3776
err_free_id:
3777
	idr_remove(&root->cgroup_idr, cgrp->id);
3778 3779
err_unlock:
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
3780 3781
err_unlock_tree:
	mutex_unlock(&cgroup_tree_mutex);
3782
	kfree(cgrp);
3783
	return err;
3784 3785 3786 3787

err_destroy:
	cgroup_destroy_locked(cgrp);
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
3788
	mutex_unlock(&cgroup_tree_mutex);
3789
	return err;
3790 3791
}

T
Tejun Heo 已提交
3792 3793
static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
			umode_t mode)
3794
{
T
Tejun Heo 已提交
3795
	struct cgroup *parent = parent_kn->priv;
3796
	int ret;
3797

3798 3799 3800 3801 3802 3803 3804 3805
	/*
	 * cgroup_create() grabs cgroup_tree_mutex which nests outside
	 * kernfs active_ref and cgroup_create() already synchronizes
	 * properly against removal through cgroup_lock_live_group().
	 * Break it before calling cgroup_create().
	 */
	cgroup_get(parent);
	kernfs_break_active_protection(parent_kn);
3806

3807 3808 3809 3810 3811
	ret = cgroup_create(parent, name, mode);

	kernfs_unbreak_active_protection(parent_kn);
	cgroup_put(parent);
	return ret;
3812 3813
}

3814 3815 3816 3817 3818
/*
 * This is called when the refcnt of a css is confirmed to be killed.
 * css_tryget() is now guaranteed to fail.
 */
static void css_killed_work_fn(struct work_struct *work)
3819
{
3820 3821 3822
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
	struct cgroup *cgrp = css->cgroup;
3823

T
Tejun Heo 已提交
3824
	mutex_lock(&cgroup_tree_mutex);
3825 3826
	mutex_lock(&cgroup_mutex);

3827 3828 3829 3830 3831 3832
	/*
	 * css_tryget() is guaranteed to fail now.  Tell subsystems to
	 * initate destruction.
	 */
	offline_css(css);

3833 3834 3835 3836 3837
	/*
	 * If @cgrp is marked dead, it's waiting for refs of all css's to
	 * be disabled before proceeding to the second phase of cgroup
	 * destruction.  If we are the last one, kick it off.
	 */
3838
	if (!cgrp->nr_css && cgroup_is_dead(cgrp))
3839 3840 3841
		cgroup_destroy_css_killed(cgrp);

	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
3842
	mutex_unlock(&cgroup_tree_mutex);
3843 3844 3845 3846 3847 3848 3849 3850 3851

	/*
	 * Put the css refs from kill_css().  Each css holds an extra
	 * reference to the cgroup's dentry and cgroup removal proceeds
	 * regardless of css refs.  On the last put of each css, whenever
	 * that may be, the extra dentry ref is put so that dentry
	 * destruction happens only after all css's are released.
	 */
	css_put(css);
3852 3853
}

3854 3855
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
3856 3857 3858 3859
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

3860
	INIT_WORK(&css->destroy_work, css_killed_work_fn);
3861
	queue_work(cgroup_destroy_wq, &css->destroy_work);
3862 3863
}

3864
static void __kill_css(struct cgroup_subsys_state *css)
T
Tejun Heo 已提交
3865
{
3866 3867
	lockdep_assert_held(&cgroup_tree_mutex);

T
Tejun Heo 已提交
3868 3869 3870 3871
	/*
	 * This must happen before css is disassociated with its cgroup.
	 * See seq_css() for details.
	 */
3872
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
3873

T
Tejun Heo 已提交
3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
	/*
	 * Killing would put the base ref, but we need to keep it alive
	 * until after ->css_offline().
	 */
	css_get(css);

	/*
	 * cgroup core guarantees that, by the time ->css_offline() is
	 * invoked, no new css reference will be given out via
	 * css_tryget().  We can't simply call percpu_ref_kill() and
	 * proceed to offlining css's because percpu_ref_kill() doesn't
	 * guarantee that the ref is seen as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.
	 */
	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
3891 3892
}

3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914
/**
 * kill_css - destroy a css
 * @css: css to destroy
 *
 * This function initiates destruction of @css by removing cgroup interface
 * files and putting its base reference.  ->css_offline() will be invoked
 * asynchronously once css_tryget() is guaranteed to fail and when the
 * reference count reaches zero, @css will be released.
 */
static void kill_css(struct cgroup_subsys_state *css)
{
	struct cgroup *cgrp = css->cgroup;

	lockdep_assert_held(&cgroup_tree_mutex);

	/* if already killed, noop */
	if (cgrp->subsys_mask & (1 << css->ss->id)) {
		cgrp->subsys_mask &= ~(1 << css->ss->id);
		__kill_css(css);
	}
}

3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938
/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
 * guarantee that css_tryget() won't succeed by the time ->css_offline() is
 * invoked.  To satisfy all the requirements, destruction is implemented in
 * the following two steps.
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
3939 3940
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
3941
{
3942
	struct cgroup *child;
T
Tejun Heo 已提交
3943
	struct cgroup_subsys_state *css;
3944
	bool empty;
T
Tejun Heo 已提交
3945
	int ssid;
3946

T
Tejun Heo 已提交
3947
	lockdep_assert_held(&cgroup_tree_mutex);
3948 3949
	lockdep_assert_held(&cgroup_mutex);

3950
	/*
3951
	 * css_set_rwsem synchronizes access to ->cset_links and prevents
3952
	 * @cgrp from being removed while put_css_set() is in progress.
3953
	 */
3954
	down_read(&css_set_rwsem);
3955
	empty = list_empty(&cgrp->cset_links);
3956
	up_read(&css_set_rwsem);
3957
	if (!empty)
3958
		return -EBUSY;
L
Li Zefan 已提交
3959

3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975
	/*
	 * Make sure there's no live children.  We can't test ->children
	 * emptiness as dead children linger on it while being destroyed;
	 * otherwise, "rmdir parent/child parent" may fail with -EBUSY.
	 */
	empty = true;
	rcu_read_lock();
	list_for_each_entry_rcu(child, &cgrp->children, sibling) {
		empty = cgroup_is_dead(child);
		if (!empty)
			break;
	}
	rcu_read_unlock();
	if (!empty)
		return -EBUSY;

3976 3977 3978
	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
	 * creation by disabling cgroup_lock_live_group().  Note that
3979
	 * CGRP_DEAD assertion is depended upon by css_next_child() to
3980
	 * resume iteration after dropping RCU read lock.  See
3981
	 * css_next_child() for details.
3982
	 */
3983
	set_bit(CGRP_DEAD, &cgrp->flags);
3984

3985
	/*
T
Tejun Heo 已提交
3986 3987
	 * Initiate massacre of all css's.  cgroup_destroy_css_killed()
	 * will be invoked to perform the rest of destruction once the
3988 3989
	 * percpu refs of all css's are confirmed to be killed.  This
	 * involves removing the subsystem's files, drop cgroup_mutex.
3990
	 */
3991
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
3992 3993
	for_each_css(css, ssid, cgrp)
		kill_css(css);
3994
	mutex_lock(&cgroup_mutex);
3995 3996 3997 3998 3999 4000 4001 4002

	/* CGRP_DEAD is set, remove from ->release_list for the last time */
	raw_spin_lock(&release_list_lock);
	if (!list_empty(&cgrp->release_list))
		list_del_init(&cgrp->release_list);
	raw_spin_unlock(&release_list_lock);

	/*
4003 4004 4005 4006 4007 4008 4009 4010
	 * If @cgrp has css's attached, the second stage of cgroup
	 * destruction is kicked off from css_killed_work_fn() after the
	 * refs of all attached css's are killed.  If @cgrp doesn't have
	 * any css, we kick it off here.
	 */
	if (!cgrp->nr_css)
		cgroup_destroy_css_killed(cgrp);

T
Tejun Heo 已提交
4011 4012 4013
	/* remove @cgrp directory along with the base files */
	mutex_unlock(&cgroup_mutex);

4014
	/*
T
Tejun Heo 已提交
4015 4016 4017 4018 4019
	 * There are two control paths which try to determine cgroup from
	 * dentry without going through kernfs - cgroupstats_build() and
	 * css_tryget_from_dir().  Those are supported by RCU protecting
	 * clearing of cgrp->kn->priv backpointer, which should happen
	 * after all files under it have been removed.
4020
	 */
4021
	kernfs_remove(cgrp->kn);	/* @cgrp has an extra ref on its kn */
T
Tejun Heo 已提交
4022 4023
	RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);

4024
	mutex_lock(&cgroup_mutex);
4025

4026 4027 4028
	return 0;
};

4029
/**
4030
 * cgroup_destroy_css_killed - the second step of cgroup destruction
4031 4032 4033
 * @work: cgroup->destroy_free_work
 *
 * This function is invoked from a work item for a cgroup which is being
4034 4035 4036
 * destroyed after all css's are offlined and performs the rest of
 * destruction.  This is the second step of destruction described in the
 * comment above cgroup_destroy_locked().
4037
 */
4038
static void cgroup_destroy_css_killed(struct cgroup *cgrp)
4039 4040 4041
{
	struct cgroup *parent = cgrp->parent;

T
Tejun Heo 已提交
4042
	lockdep_assert_held(&cgroup_tree_mutex);
4043
	lockdep_assert_held(&cgroup_mutex);
4044

4045
	/* delete this cgroup from parent->children */
4046
	list_del_rcu(&cgrp->sibling);
4047

4048
	cgroup_put(cgrp);
4049

4050
	set_bit(CGRP_RELEASABLE, &parent->flags);
4051
	check_for_release(parent);
4052 4053
}

T
Tejun Heo 已提交
4054
static int cgroup_rmdir(struct kernfs_node *kn)
4055
{
T
Tejun Heo 已提交
4056 4057
	struct cgroup *cgrp = kn->priv;
	int ret = 0;
4058

T
Tejun Heo 已提交
4059 4060 4061 4062 4063 4064 4065 4066
	/*
	 * This is self-destruction but @kn can't be removed while this
	 * callback is in progress.  Let's break active protection.  Once
	 * the protection is broken, @cgrp can be destroyed at any point.
	 * Pin it so that it stays accessible.
	 */
	cgroup_get(cgrp);
	kernfs_break_active_protection(kn);
4067

T
Tejun Heo 已提交
4068
	mutex_lock(&cgroup_tree_mutex);
4069
	mutex_lock(&cgroup_mutex);
4070 4071

	/*
T
Tejun Heo 已提交
4072 4073
	 * @cgrp might already have been destroyed while we're trying to
	 * grab the mutexes.
4074
	 */
T
Tejun Heo 已提交
4075 4076
	if (!cgroup_is_dead(cgrp))
		ret = cgroup_destroy_locked(cgrp);
4077

4078
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4079
	mutex_unlock(&cgroup_tree_mutex);
4080

T
Tejun Heo 已提交
4081 4082
	kernfs_unbreak_active_protection(kn);
	cgroup_put(cgrp);
4083
	return ret;
4084 4085
}

T
Tejun Heo 已提交
4086 4087 4088 4089 4090 4091 4092 4093
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
	.remount_fs		= cgroup_remount,
	.show_options		= cgroup_show_options,
	.mkdir			= cgroup_mkdir,
	.rmdir			= cgroup_rmdir,
	.rename			= cgroup_rename,
};

4094
static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
4095 4096
{
	struct cgroup_subsys_state *css;
D
Diego Calleja 已提交
4097 4098

	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
4099

T
Tejun Heo 已提交
4100
	mutex_lock(&cgroup_tree_mutex);
4101 4102
	mutex_lock(&cgroup_mutex);

T
Tejun Heo 已提交
4103
	INIT_LIST_HEAD(&ss->cfts);
4104

4105 4106 4107
	/* Create the root cgroup state for this subsystem */
	ss->root = &cgrp_dfl_root;
	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
4108 4109
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
4110
	init_css(css, ss, &cgrp_dfl_root.cgrp);
4111

L
Li Zefan 已提交
4112
	/* Update the init_css_set to contain a subsys
4113
	 * pointer to this state - since the subsystem is
L
Li Zefan 已提交
4114
	 * newly registered, all tasks and hence the
4115
	 * init_css_set is in the subsystem's root cgroup. */
4116
	init_css_set.subsys[ss->id] = css;
4117 4118 4119

	need_forkexit_callback |= ss->fork || ss->exit;

L
Li Zefan 已提交
4120 4121 4122 4123 4124
	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

4125
	BUG_ON(online_css(css));
4126

4127
	cgrp_dfl_root.cgrp.subsys_mask |= 1 << ss->id;
B
Ben Blum 已提交
4128 4129

	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4130
	mutex_unlock(&cgroup_tree_mutex);
B
Ben Blum 已提交
4131 4132
}

4133
/**
L
Li Zefan 已提交
4134 4135 4136 4137
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
4138 4139 4140
 */
int __init cgroup_init_early(void)
{
T
Tejun Heo 已提交
4141 4142
	static struct cgroup_sb_opts __initdata opts =
		{ .flags = CGRP_ROOT_SANE_BEHAVIOR };
4143
	struct cgroup_subsys *ss;
4144
	int i;
4145

4146
	init_cgroup_root(&cgrp_dfl_root, &opts);
4147
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
4148

T
Tejun Heo 已提交
4149
	for_each_subsys(ss, i) {
4150
		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
4151 4152
		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
4153
		     ss->id, ss->name);
4154 4155 4156
		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);

4157
		ss->id = i;
4158
		ss->name = cgroup_subsys_name[i];
4159 4160 4161 4162 4163 4164 4165 4166

		if (ss->early_init)
			cgroup_init_subsys(ss);
	}
	return 0;
}

/**
L
Li Zefan 已提交
4167 4168 4169 4170
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
4171 4172 4173
 */
int __init cgroup_init(void)
{
4174
	struct cgroup_subsys *ss;
4175
	unsigned long key;
4176
	int ssid, err;
4177

T
Tejun Heo 已提交
4178
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
4179

4180
	mutex_lock(&cgroup_tree_mutex);
T
Tejun Heo 已提交
4181 4182
	mutex_lock(&cgroup_mutex);

4183 4184 4185 4186
	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

4187
	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
4188

T
Tejun Heo 已提交
4189
	mutex_unlock(&cgroup_mutex);
4190
	mutex_unlock(&cgroup_tree_mutex);
T
Tejun Heo 已提交
4191

4192 4193 4194 4195 4196 4197 4198 4199 4200 4201
	for_each_subsys(ss, ssid) {
		if (!ss->early_init)
			cgroup_init_subsys(ss);

		/*
		 * cftype registration needs kmalloc and can't be done
		 * during early_init.  Register base cftypes separately.
		 */
		if (ss->base_cftypes)
			WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes));
4202 4203 4204
	}

	cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
T
Tejun Heo 已提交
4205 4206
	if (!cgroup_kobj)
		return -ENOMEM;
4207

4208
	err = register_filesystem(&cgroup_fs_type);
4209 4210
	if (err < 0) {
		kobject_put(cgroup_kobj);
T
Tejun Heo 已提交
4211
		return err;
4212
	}
4213

L
Li Zefan 已提交
4214
	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
T
Tejun Heo 已提交
4215
	return 0;
4216
}
4217

4218 4219 4220 4221 4222
static int __init cgroup_wq_init(void)
{
	/*
	 * There isn't much point in executing destruction path in
	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
4223
	 * Use 1 for @max_active.
4224 4225 4226 4227
	 *
	 * We would prefer to do this in cgroup_init() above, but that
	 * is called before init_workqueues(): so leave this until after.
	 */
4228
	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
4229
	BUG_ON(!cgroup_destroy_wq);
4230 4231 4232 4233 4234 4235 4236 4237 4238

	/*
	 * Used to destroy pidlists and separate to serve as flush domain.
	 * Cap @max_active to 1 too.
	 */
	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
						    0, 1);
	BUG_ON(!cgroup_pidlist_destroy_wq);

4239 4240 4241 4242
	return 0;
}
core_initcall(cgroup_wq_init);

4243 4244 4245 4246 4247 4248 4249
/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 */

/* TODO: Use a proper seq_file iterator */
4250
int proc_cgroup_show(struct seq_file *m, void *v)
4251 4252 4253
{
	struct pid *pid;
	struct task_struct *tsk;
T
Tejun Heo 已提交
4254
	char *buf, *path;
4255
	int retval;
4256
	struct cgroup_root *root;
4257 4258

	retval = -ENOMEM;
T
Tejun Heo 已提交
4259
	buf = kmalloc(PATH_MAX, GFP_KERNEL);
4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271
	if (!buf)
		goto out;

	retval = -ESRCH;
	pid = m->private;
	tsk = get_pid_task(pid, PIDTYPE_PID);
	if (!tsk)
		goto out_free;

	retval = 0;

	mutex_lock(&cgroup_mutex);
4272
	down_read(&css_set_rwsem);
4273

4274
	for_each_root(root) {
4275
		struct cgroup_subsys *ss;
4276
		struct cgroup *cgrp;
T
Tejun Heo 已提交
4277
		int ssid, count = 0;
4278

T
Tejun Heo 已提交
4279
		if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
4280 4281
			continue;

4282
		seq_printf(m, "%d:", root->hierarchy_id);
T
Tejun Heo 已提交
4283
		for_each_subsys(ss, ssid)
4284
			if (root->cgrp.subsys_mask & (1 << ssid))
T
Tejun Heo 已提交
4285
				seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
4286 4287 4288
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
4289
		seq_putc(m, ':');
4290
		cgrp = task_cgroup_from_root(tsk, root);
T
Tejun Heo 已提交
4291 4292 4293
		path = cgroup_path(cgrp, buf, PATH_MAX);
		if (!path) {
			retval = -ENAMETOOLONG;
4294
			goto out_unlock;
T
Tejun Heo 已提交
4295 4296
		}
		seq_puts(m, path);
4297 4298 4299 4300
		seq_putc(m, '\n');
	}

out_unlock:
4301
	up_read(&css_set_rwsem);
4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312
	mutex_unlock(&cgroup_mutex);
	put_task_struct(tsk);
out_free:
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
4313
	struct cgroup_subsys *ss;
4314 4315
	int i;

4316
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
B
Ben Blum 已提交
4317 4318 4319 4320 4321
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
4322
	mutex_lock(&cgroup_mutex);
4323 4324

	for_each_subsys(ss, i)
4325 4326
		seq_printf(m, "%s\t%d\t%d\t%d\n",
			   ss->name, ss->root->hierarchy_id,
4327
			   atomic_read(&ss->root->nr_cgrps), !ss->disabled);
4328

4329 4330 4331 4332 4333 4334
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
4335
	return single_open(file, proc_cgroupstats_show, NULL);
4336 4337
}

4338
static const struct file_operations proc_cgroupstats_operations = {
4339 4340 4341 4342 4343 4344
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

4345
/**
4346
 * cgroup_fork - initialize cgroup related fields during copy_process()
L
Li Zefan 已提交
4347
 * @child: pointer to task_struct of forking parent process.
4348
 *
4349 4350 4351
 * A task is associated with the init_css_set until cgroup_post_fork()
 * attaches it to the parent's css_set.  Empty cg_list indicates that
 * @child isn't holding reference to its css_set.
4352 4353 4354
 */
void cgroup_fork(struct task_struct *child)
{
4355
	RCU_INIT_POINTER(child->cgroups, &init_css_set);
4356
	INIT_LIST_HEAD(&child->cg_list);
4357 4358
}

4359
/**
L
Li Zefan 已提交
4360 4361 4362
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
4363 4364 4365
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
4366
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
4367
 * list.
L
Li Zefan 已提交
4368
 */
4369 4370
void cgroup_post_fork(struct task_struct *child)
{
4371
	struct cgroup_subsys *ss;
4372 4373
	int i;

4374
	/*
4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393
	 * This may race against cgroup_enable_task_cg_links().  As that
	 * function sets use_task_css_set_links before grabbing
	 * tasklist_lock and we just went through tasklist_lock to add
	 * @child, it's guaranteed that either we see the set
	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
	 * @child during its iteration.
	 *
	 * If we won the race, @child is associated with %current's
	 * css_set.  Grabbing css_set_rwsem guarantees both that the
	 * association is stable, and, on completion of the parent's
	 * migration, @child is visible in the source of migration or
	 * already in the destination cgroup.  This guarantee is necessary
	 * when implementing operations which need to migrate all tasks of
	 * a cgroup to another.
	 *
	 * Note that if we lose to cgroup_enable_task_cg_links(), @child
	 * will remain in init_css_set.  This is safe because all tasks are
	 * in the init_css_set before cg_links is enabled and there's no
	 * operation which transfers all tasks out of init_css_set.
4394
	 */
4395
	if (use_task_css_set_links) {
4396 4397
		struct css_set *cset;

4398
		down_write(&css_set_rwsem);
4399
		cset = task_css_set(current);
4400 4401 4402 4403 4404
		if (list_empty(&child->cg_list)) {
			rcu_assign_pointer(child->cgroups, cset);
			list_add(&child->cg_list, &cset->tasks);
			get_css_set(cset);
		}
4405
		up_write(&css_set_rwsem);
4406
	}
4407 4408 4409 4410 4411 4412 4413

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
	if (need_forkexit_callback) {
T
Tejun Heo 已提交
4414
		for_each_subsys(ss, i)
4415 4416 4417
			if (ss->fork)
				ss->fork(child);
	}
4418
}
4419

4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
4432 4433 4434 4435 4436
 * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
 * call cgroup_exit() while the task is still competent to handle
 * notify_on_release(), then leave the task attached to the root cgroup in
 * each hierarchy for the remainder of its exit.  No need to bother with
 * init_css_set refcnting.  init_css_set never goes away and we can't race
4437
 * with migration path - PF_EXITING is visible to migration path.
4438
 */
4439
void cgroup_exit(struct task_struct *tsk)
4440
{
4441
	struct cgroup_subsys *ss;
4442
	struct css_set *cset;
4443
	bool put_cset = false;
4444
	int i;
4445 4446

	/*
4447 4448
	 * Unlink from @tsk from its css_set.  As migration path can't race
	 * with us, we can check cg_list without grabbing css_set_rwsem.
4449 4450
	 */
	if (!list_empty(&tsk->cg_list)) {
4451
		down_write(&css_set_rwsem);
4452
		list_del_init(&tsk->cg_list);
4453
		up_write(&css_set_rwsem);
4454
		put_cset = true;
4455 4456
	}

4457
	/* Reassign the task to the init_css_set. */
4458 4459
	cset = task_css_set(tsk);
	RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
4460

4461
	if (need_forkexit_callback) {
T
Tejun Heo 已提交
4462 4463
		/* see cgroup_post_fork() for details */
		for_each_subsys(ss, i) {
4464
			if (ss->exit) {
4465 4466
				struct cgroup_subsys_state *old_css = cset->subsys[i];
				struct cgroup_subsys_state *css = task_css(tsk, i);
4467

4468
				ss->exit(css, old_css, tsk);
4469 4470 4471 4472
			}
		}
	}

4473 4474
	if (put_cset)
		put_css_set(cset, true);
4475
}
4476

4477
static void check_for_release(struct cgroup *cgrp)
4478
{
4479
	if (cgroup_is_releasable(cgrp) &&
T
Tejun Heo 已提交
4480
	    list_empty(&cgrp->cset_links) && list_empty(&cgrp->children)) {
4481 4482
		/*
		 * Control Group is currently removeable. If it's not
4483
		 * already queued for a userspace notification, queue
4484 4485
		 * it now
		 */
4486
		int need_schedule_work = 0;
4487

4488
		raw_spin_lock(&release_list_lock);
4489
		if (!cgroup_is_dead(cgrp) &&
4490 4491
		    list_empty(&cgrp->release_list)) {
			list_add(&cgrp->release_list, &release_list);
4492 4493
			need_schedule_work = 1;
		}
4494
		raw_spin_unlock(&release_list_lock);
4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526
		if (need_schedule_work)
			schedule_work(&release_agent_work);
	}
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
	BUG_ON(work != &release_agent_work);
	mutex_lock(&cgroup_mutex);
4527
	raw_spin_lock(&release_list_lock);
4528 4529 4530
	while (!list_empty(&release_list)) {
		char *argv[3], *envp[3];
		int i;
T
Tejun Heo 已提交
4531
		char *pathbuf = NULL, *agentbuf = NULL, *path;
4532
		struct cgroup *cgrp = list_entry(release_list.next,
4533 4534
						    struct cgroup,
						    release_list);
4535
		list_del_init(&cgrp->release_list);
4536
		raw_spin_unlock(&release_list_lock);
T
Tejun Heo 已提交
4537
		pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
4538 4539
		if (!pathbuf)
			goto continue_free;
T
Tejun Heo 已提交
4540 4541
		path = cgroup_path(cgrp, pathbuf, PATH_MAX);
		if (!path)
4542 4543 4544 4545
			goto continue_free;
		agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
		if (!agentbuf)
			goto continue_free;
4546 4547

		i = 0;
4548
		argv[i++] = agentbuf;
T
Tejun Heo 已提交
4549
		argv[i++] = path;
4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563
		argv[i] = NULL;

		i = 0;
		/* minimal command environment */
		envp[i++] = "HOME=/";
		envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
		envp[i] = NULL;

		/* Drop the lock while we invoke the usermode helper,
		 * since the exec could involve hitting disk and hence
		 * be a slow process */
		mutex_unlock(&cgroup_mutex);
		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
		mutex_lock(&cgroup_mutex);
4564 4565 4566
 continue_free:
		kfree(pathbuf);
		kfree(agentbuf);
4567
		raw_spin_lock(&release_list_lock);
4568
	}
4569
	raw_spin_unlock(&release_list_lock);
4570 4571
	mutex_unlock(&cgroup_mutex);
}
4572 4573 4574

static int __init cgroup_disable(char *str)
{
4575
	struct cgroup_subsys *ss;
4576
	char *token;
4577
	int i;
4578 4579 4580 4581

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
4582

T
Tejun Heo 已提交
4583
		for_each_subsys(ss, i) {
4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594
			if (!strcmp(token, ss->name)) {
				ss->disabled = 1;
				printk(KERN_INFO "Disabling %s control group"
					" subsystem\n", ss->name);
				break;
			}
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
K
KAMEZAWA Hiroyuki 已提交
4595

4596
/**
4597
 * css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir
4598 4599
 * @dentry: directory dentry of interest
 * @ss: subsystem of interest
4600
 *
4601 4602 4603
 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
 * to get the corresponding css and return it.  If such css doesn't exist
 * or can't be pinned, an ERR_PTR value is returned.
S
Stephane Eranian 已提交
4604
 */
4605 4606
struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
						struct cgroup_subsys *ss)
S
Stephane Eranian 已提交
4607
{
T
Tejun Heo 已提交
4608 4609
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
	struct cgroup_subsys_state *css = NULL;
S
Stephane Eranian 已提交
4610 4611
	struct cgroup *cgrp;

4612
	/* is @dentry a cgroup dir? */
T
Tejun Heo 已提交
4613 4614
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
S
Stephane Eranian 已提交
4615 4616
		return ERR_PTR(-EBADF);

4617 4618
	rcu_read_lock();

T
Tejun Heo 已提交
4619 4620 4621 4622 4623 4624 4625 4626
	/*
	 * This path doesn't originate from kernfs and @kn could already
	 * have been or be removed at any point.  @kn->priv is RCU
	 * protected for this access.  See destroy_locked() for details.
	 */
	cgrp = rcu_dereference(kn->priv);
	if (cgrp)
		css = cgroup_css(cgrp, ss);
4627 4628 4629 4630 4631 4632

	if (!css || !css_tryget(css))
		css = ERR_PTR(-ENOENT);

	rcu_read_unlock();
	return css;
S
Stephane Eranian 已提交
4633 4634
}

4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646
/**
 * css_from_id - lookup css by id
 * @id: the cgroup id
 * @ss: cgroup subsys to be looked into
 *
 * Returns the css if there's valid one with @id, otherwise returns NULL.
 * Should be called under rcu_read_lock().
 */
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
	struct cgroup *cgrp;

T
Tejun Heo 已提交
4647
	cgroup_assert_mutexes_or_rcu_locked();
4648 4649 4650

	cgrp = idr_find(&ss->root->cgroup_idr, id);
	if (cgrp)
4651
		return cgroup_css(cgrp, ss);
4652
	return NULL;
S
Stephane Eranian 已提交
4653 4654
}

4655
#ifdef CONFIG_CGROUP_DEBUG
4656 4657
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
4658 4659 4660 4661 4662 4663 4664 4665 4666
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

4667
static void debug_css_free(struct cgroup_subsys_state *css)
4668
{
4669
	kfree(css);
4670 4671
}

4672 4673
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
4674
{
4675
	return cgroup_task_count(css->cgroup);
4676 4677
}

4678 4679
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
4680 4681 4682 4683
{
	return (u64)(unsigned long)current->cgroups;
}

4684
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
L
Li Zefan 已提交
4685
					 struct cftype *cft)
4686 4687 4688 4689
{
	u64 count;

	rcu_read_lock();
4690
	count = atomic_read(&task_css_set(current)->refcount);
4691 4692 4693 4694
	rcu_read_unlock();
	return count;
}

4695
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
4696
{
4697
	struct cgrp_cset_link *link;
4698
	struct css_set *cset;
T
Tejun Heo 已提交
4699 4700 4701 4702 4703
	char *name_buf;

	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name_buf)
		return -ENOMEM;
4704

4705
	down_read(&css_set_rwsem);
4706
	rcu_read_lock();
4707
	cset = rcu_dereference(current->cgroups);
4708
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
4709 4710
		struct cgroup *c = link->cgrp;

T
Tejun Heo 已提交
4711
		cgroup_name(c, name_buf, NAME_MAX + 1);
4712
		seq_printf(seq, "Root %d group %s\n",
T
Tejun Heo 已提交
4713
			   c->root->hierarchy_id, name_buf);
4714 4715
	}
	rcu_read_unlock();
4716
	up_read(&css_set_rwsem);
T
Tejun Heo 已提交
4717
	kfree(name_buf);
4718 4719 4720 4721
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
4722
static int cgroup_css_links_read(struct seq_file *seq, void *v)
4723
{
4724
	struct cgroup_subsys_state *css = seq_css(seq);
4725
	struct cgrp_cset_link *link;
4726

4727
	down_read(&css_set_rwsem);
4728
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
4729
		struct css_set *cset = link->cset;
4730 4731
		struct task_struct *task;
		int count = 0;
T
Tejun Heo 已提交
4732

4733
		seq_printf(seq, "css_set %p\n", cset);
T
Tejun Heo 已提交
4734

4735
		list_for_each_entry(task, &cset->tasks, cg_list) {
T
Tejun Heo 已提交
4736 4737 4738 4739 4740 4741 4742 4743 4744
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
		}

		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
4745
		}
T
Tejun Heo 已提交
4746 4747 4748
		continue;
	overflow:
		seq_puts(seq, "  ...\n");
4749
	}
4750
	up_read(&css_set_rwsem);
4751 4752 4753
	return 0;
}

4754
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
4755
{
4756
	return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

4775 4776
	{
		.name = "current_css_set_cg_links",
4777
		.seq_show = current_css_set_cg_links_read,
4778 4779 4780 4781
	},

	{
		.name = "cgroup_css_links",
4782
		.seq_show = cgroup_css_links_read,
4783 4784
	},

4785 4786 4787 4788 4789
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

4790 4791
	{ }	/* terminate */
};
4792

4793
struct cgroup_subsys debug_cgrp_subsys = {
4794 4795
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,
4796
	.base_cftypes = debug_files,
4797 4798
};
#endif /* CONFIG_CGROUP_DEBUG */