cgroup.c 141.0 KB
Newer Older
1 2 3 4 5 6
/*
 *  Generic process-grouping system.
 *
 *  Based originally on the cpuset system, extracted by Paul Menage
 *  Copyright (C) 2006 Google, Inc
 *
7 8 9 10
 *  Notifications support
 *  Copyright (C) 2009 Nokia Corporation
 *  Author: Kirill A. Shutemov
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *  Copyright notices from the original cpuset code:
 *  --------------------------------------------------
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
 *  2003-10-10 Written by Simon Derr.
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson.
 *  ---------------------------------------------------
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/cgroup.h>
32
#include <linux/cred.h>
33
#include <linux/ctype.h>
34
#include <linux/errno.h>
35
#include <linux/init_task.h>
36 37 38 39 40 41
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
42
#include <linux/proc_fs.h>
43 44 45 46
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
47
#include <linux/rwsem.h>
48
#include <linux/string.h>
49
#include <linux/sort.h>
50
#include <linux/kmod.h>
B
Balbir Singh 已提交
51 52
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
53
#include <linux/hashtable.h>
L
Li Zefan 已提交
54
#include <linux/pid_namespace.h>
55
#include <linux/idr.h>
56
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
57
#include <linux/kthread.h>
T
Tejun Heo 已提交
58
#include <linux/delay.h>
B
Balbir Singh 已提交
59

A
Arun Sharma 已提交
60
#include <linux/atomic.h>
61

62 63 64 65 66 67 68 69
/*
 * pidlists linger the following amount before being destroyed.  The goal
 * is avoiding frequent destruction in the middle of consecutive read calls
 * Expiring in the middle is a performance problem not a correctness one.
 * 1 sec should be enough.
 */
#define CGROUP_PIDLIST_DESTROY_DELAY	HZ

T
Tejun Heo 已提交
70 71 72
#define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
					 MAX_CFTYPE_NAME + 2)

T
Tejun Heo 已提交
73 74 75 76
/*
 * cgroup_mutex is the master lock.  Any modification to cgroup or its
 * hierarchy must be performed while holding it.
 *
77 78
 * css_set_rwsem protects task->cgroups pointer, the list of css_set
 * objects, and the chain of tasks off each css_set.
T
Tejun Heo 已提交
79
 *
80 81
 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
 * cgroup.h can use them for lockdep annotations.
T
Tejun Heo 已提交
82
 */
T
Tejun Heo 已提交
83 84
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
85 86 87
DECLARE_RWSEM(css_set_rwsem);
EXPORT_SYMBOL_GPL(cgroup_mutex);
EXPORT_SYMBOL_GPL(css_set_rwsem);
T
Tejun Heo 已提交
88
#else
89
static DEFINE_MUTEX(cgroup_mutex);
90
static DECLARE_RWSEM(css_set_rwsem);
T
Tejun Heo 已提交
91 92
#endif

93
/*
94 95
 * Protects cgroup_idr and css_idr so that IDs can be released without
 * grabbing cgroup_mutex.
96 97 98
 */
static DEFINE_SPINLOCK(cgroup_idr_lock);

99 100 101 102 103
/*
 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
 */
static DEFINE_SPINLOCK(release_agent_path_lock);
104

T
Tejun Heo 已提交
105
#define cgroup_assert_mutex_or_rcu_locked()				\
106 107
	rcu_lockdep_assert(rcu_read_lock_held() ||			\
			   lockdep_is_held(&cgroup_mutex),		\
T
Tejun Heo 已提交
108
			   "cgroup_mutex or RCU read lock required");
109

110 111 112 113 114 115 116 117
/*
 * cgroup destruction makes heavy use of work items and there can be a lot
 * of concurrent destructions.  Use a separate workqueue so that cgroup
 * destruction work items don't end up filling up max_active of system_wq
 * which may lead to deadlock.
 */
static struct workqueue_struct *cgroup_destroy_wq;

118 119 120 121 122 123
/*
 * pidlist destructions need to be flushed on cgroup destruction.  Use a
 * separate workqueue as flush domain.
 */
static struct workqueue_struct *cgroup_pidlist_destroy_wq;

T
Tejun Heo 已提交
124
/* generate an array of cgroup subsystem pointers */
125
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
T
Tejun Heo 已提交
126
static struct cgroup_subsys *cgroup_subsys[] = {
127 128
#include <linux/cgroup_subsys.h>
};
129 130 131 132 133
#undef SUBSYS

/* array of cgroup subsystem names */
#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
static const char *cgroup_subsys_name[] = {
134 135
#include <linux/cgroup_subsys.h>
};
136
#undef SUBSYS
137 138

/*
139
 * The default hierarchy, reserved for the subsystems that are otherwise
140 141
 * unattached - it never has more than a single cgroup, and all tasks are
 * part of that cgroup.
142
 */
T
Tejun Heo 已提交
143
struct cgroup_root cgrp_dfl_root;
144

T
Tejun Heo 已提交
145 146 147 148 149
/*
 * The default hierarchy always exists but is hidden until mounted for the
 * first time.  This is for backward compatibility.
 */
static bool cgrp_dfl_root_visible;
150 151 152

/* The list of hierarchy roots */

153 154
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
155

T
Tejun Heo 已提交
156
/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
157
static DEFINE_IDR(cgroup_hierarchy_idr);
158

159 160 161 162 163
/*
 * Assign a monotonically increasing serial number to cgroups.  It
 * guarantees cgroups with bigger numbers are newer than those with smaller
 * numbers.  Also, as cgroups are always appended to the parent's
 * ->children list, it guarantees that sibling cgroups are always sorted in
164 165
 * the ascending serial number order on the list.  Protected by
 * cgroup_mutex.
166
 */
167
static u64 cgroup_serial_nr_next = 1;
168

169
/* This flag indicates whether tasks in the fork and exit paths should
L
Li Zefan 已提交
170 171 172
 * check for fork/exit handlers to call. This avoids us having to do
 * extra work in the fork/exit path if none of the subsystems need to
 * be called.
173
 */
174
static int need_forkexit_callback __read_mostly;
175

176 177
static struct cftype cgroup_base_files[];

178
static void cgroup_put(struct cgroup *cgrp);
179
static int rebind_subsystems(struct cgroup_root *dst_root,
180
			     unsigned int ss_mask);
181
static int cgroup_destroy_locked(struct cgroup *cgrp);
182 183
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss);
static void kill_css(struct cgroup_subsys_state *css);
184 185
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add);
186
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
187

188 189 190 191 192 193 194
/* IDR wrappers which synchronize using cgroup_idr_lock */
static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
			    gfp_t gfp_mask)
{
	int ret;

	idr_preload(gfp_mask);
T
Tejun Heo 已提交
195
	spin_lock_bh(&cgroup_idr_lock);
196
	ret = idr_alloc(idr, ptr, start, end, gfp_mask);
T
Tejun Heo 已提交
197
	spin_unlock_bh(&cgroup_idr_lock);
198 199 200 201 202 203 204 205
	idr_preload_end();
	return ret;
}

static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
{
	void *ret;

T
Tejun Heo 已提交
206
	spin_lock_bh(&cgroup_idr_lock);
207
	ret = idr_replace(idr, ptr, id);
T
Tejun Heo 已提交
208
	spin_unlock_bh(&cgroup_idr_lock);
209 210 211 212 213
	return ret;
}

static void cgroup_idr_remove(struct idr *idr, int id)
{
T
Tejun Heo 已提交
214
	spin_lock_bh(&cgroup_idr_lock);
215
	idr_remove(idr, id);
T
Tejun Heo 已提交
216
	spin_unlock_bh(&cgroup_idr_lock);
217 218
}

T
Tejun Heo 已提交
219 220 221
/**
 * cgroup_css - obtain a cgroup's css for the specified subsystem
 * @cgrp: the cgroup of interest
222
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
T
Tejun Heo 已提交
223
 *
224 225 226 227 228
 * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
 * function must be called either under cgroup_mutex or rcu_read_lock() and
 * the caller is responsible for pinning the returned css if it wants to
 * keep accessing it outside the said locks.  This function may return
 * %NULL if @cgrp doesn't have @subsys_id enabled.
T
Tejun Heo 已提交
229 230
 */
static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
231
					      struct cgroup_subsys *ss)
T
Tejun Heo 已提交
232
{
233
	if (ss)
234
		return rcu_dereference_check(cgrp->subsys[ss->id],
T
Tejun Heo 已提交
235
					lockdep_is_held(&cgroup_mutex));
236
	else
237
		return &cgrp->self;
T
Tejun Heo 已提交
238
}
239

240 241 242
/**
 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
243
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
244 245 246 247 248 249 250 251 252 253 254 255
 *
 * Similar to cgroup_css() but returns the effctive css, which is defined
 * as the matching css of the nearest ancestor including self which has @ss
 * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
 * function is guaranteed to return non-NULL css.
 */
static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
						struct cgroup_subsys *ss)
{
	lockdep_assert_held(&cgroup_mutex);

	if (!ss)
256
		return &cgrp->self;
257 258 259 260 261 262 263 264 265 266 267

	if (!(cgrp->root->subsys_mask & (1 << ss->id)))
		return NULL;

	while (cgrp->parent &&
	       !(cgrp->parent->child_subsys_mask & (1 << ss->id)))
		cgrp = cgrp->parent;

	return cgroup_css(cgrp, ss);
}

268
/* convenient tests for these bits */
269
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
270
{
271
	return test_bit(CGRP_DEAD, &cgrp->flags);
272 273
}

T
Tejun Heo 已提交
274
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
275
{
T
Tejun Heo 已提交
276
	struct cgroup *cgrp = of->kn->parent->priv;
T
Tejun Heo 已提交
277
	struct cftype *cft = of_cft(of);
T
Tejun Heo 已提交
278 279 280 281 282 283 284 285 286 287 288 289

	/*
	 * This is open and unprotected implementation of cgroup_css().
	 * seq_css() is only called from a kernfs file operation which has
	 * an active reference on the file.  Because all the subsystem
	 * files are drained before a css is disassociated with a cgroup,
	 * the matching css from the cgroup's subsys table is guaranteed to
	 * be and stay valid until the enclosing operation is complete.
	 */
	if (cft->ss)
		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
	else
290
		return &cgrp->self;
291
}
T
Tejun Heo 已提交
292
EXPORT_SYMBOL_GPL(of_css);
293

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
/**
 * cgroup_is_descendant - test ancestry
 * @cgrp: the cgroup to be tested
 * @ancestor: possible ancestor of @cgrp
 *
 * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
 * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
 * and @ancestor are accessible.
 */
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
{
	while (cgrp) {
		if (cgrp == ancestor)
			return true;
		cgrp = cgrp->parent;
	}
	return false;
}
312

313
static int cgroup_is_releasable(const struct cgroup *cgrp)
314 315
{
	const int bits =
316 317 318
		(1 << CGRP_RELEASABLE) |
		(1 << CGRP_NOTIFY_ON_RELEASE);
	return (cgrp->flags & bits) == bits;
319 320
}

321
static int notify_on_release(const struct cgroup *cgrp)
322
{
323
	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
324 325
}

T
Tejun Heo 已提交
326 327 328 329 330 331
/**
 * for_each_css - iterate all css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
332
 * Should be called under cgroup_[tree_]mutex.
T
Tejun Heo 已提交
333 334 335 336 337 338 339 340
 */
#define for_each_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = rcu_dereference_check(			\
				(cgrp)->subsys[(ssid)],			\
				lockdep_is_held(&cgroup_mutex)))) { }	\
		else

341 342 343 344 345 346 347 348 349 350 351 352 353 354
/**
 * for_each_e_css - iterate all effective css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
 * Should be called under cgroup_[tree_]mutex.
 */
#define for_each_e_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
			;						\
		else

355
/**
T
Tejun Heo 已提交
356
 * for_each_subsys - iterate all enabled cgroup subsystems
357
 * @ss: the iteration cursor
358
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
359
 */
360
#define for_each_subsys(ss, ssid)					\
T
Tejun Heo 已提交
361 362
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
363

364 365
/* iterate across the hierarchies */
#define for_each_root(root)						\
366
	list_for_each_entry((root), &cgroup_roots, root_list)
367

368 369 370
/* iterate over child cgrps, lock should be held throughout iteration */
#define cgroup_for_each_live_child(child, cgrp)				\
	list_for_each_entry((child), &(cgrp)->children, sibling)	\
T
Tejun Heo 已提交
371
		if (({ lockdep_assert_held(&cgroup_mutex);		\
372 373 374 375
		       cgroup_is_dead(child); }))			\
			;						\
		else

376 377 378
/* the list of cgroups eligible for automatic release. Protected by
 * release_list_lock */
static LIST_HEAD(release_list);
379
static DEFINE_RAW_SPINLOCK(release_list_lock);
380 381
static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
382
static void check_for_release(struct cgroup *cgrp);
383

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
/*
 * A cgroup can be associated with multiple css_sets as different tasks may
 * belong to different cgroups on different hierarchies.  In the other
 * direction, a css_set is naturally associated with multiple cgroups.
 * This M:N relationship is represented by the following link structure
 * which exists for each association and allows traversing the associations
 * from both sides.
 */
struct cgrp_cset_link {
	/* the cgroup and css_set this link associates */
	struct cgroup		*cgrp;
	struct css_set		*cset;

	/* list of cgrp_cset_links anchored at cgrp->cset_links */
	struct list_head	cset_link;

	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
	struct list_head	cgrp_link;
402 403
};

404 405
/*
 * The default css_set - used by init and its children prior to any
406 407 408 409 410
 * hierarchies being mounted. It contains a pointer to the root state
 * for each subsystem. Also used to anchor the list of css_sets. Not
 * reference-counted, to improve performance when child cgroups
 * haven't been created.
 */
T
Tejun Heo 已提交
411
struct css_set init_css_set = {
412 413 414 415 416 417 418
	.refcount		= ATOMIC_INIT(1),
	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
};
419

420
static int css_set_count	= 1;	/* 1 for init_css_set */
421

422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
/**
 * cgroup_update_populated - updated populated count of a cgroup
 * @cgrp: the target cgroup
 * @populated: inc or dec populated count
 *
 * @cgrp is either getting the first task (css_set) or losing the last.
 * Update @cgrp->populated_cnt accordingly.  The count is propagated
 * towards root so that a given cgroup's populated_cnt is zero iff the
 * cgroup and all its descendants are empty.
 *
 * @cgrp's interface file "cgroup.populated" is zero if
 * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
 * changes from or to zero, userland is notified that the content of the
 * interface file has changed.  This can be used to detect when @cgrp and
 * its descendants become populated or empty.
 */
static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
{
	lockdep_assert_held(&css_set_rwsem);

	do {
		bool trigger;

		if (populated)
			trigger = !cgrp->populated_cnt++;
		else
			trigger = !--cgrp->populated_cnt;

		if (!trigger)
			break;

		if (cgrp->populated_kn)
			kernfs_notify(cgrp->populated_kn);
		cgrp = cgrp->parent;
	} while (cgrp);
}

459 460 461 462 463
/*
 * hash table for cgroup groups. This improves the performance to find
 * an existing css_set. This hash doesn't (currently) take into
 * account cgroups in empty hierarchies.
 */
464
#define CSS_SET_HASH_BITS	7
465
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
466

467
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
468
{
469
	unsigned long key = 0UL;
470 471
	struct cgroup_subsys *ss;
	int i;
472

473
	for_each_subsys(ss, i)
474 475
		key += (unsigned long)css[i];
	key = (key >> 16) ^ key;
476

477
	return key;
478 479
}

480
static void put_css_set_locked(struct css_set *cset, bool taskexit)
481
{
482
	struct cgrp_cset_link *link, *tmp_link;
T
Tejun Heo 已提交
483 484
	struct cgroup_subsys *ss;
	int ssid;
485

486 487 488
	lockdep_assert_held(&css_set_rwsem);

	if (!atomic_dec_and_test(&cset->refcount))
489
		return;
490

491
	/* This css_set is dead. unlink it and release cgroup refcounts */
T
Tejun Heo 已提交
492 493
	for_each_subsys(ss, ssid)
		list_del(&cset->e_cset_node[ssid]);
494
	hash_del(&cset->hlist);
495 496
	css_set_count--;

497
	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
498
		struct cgroup *cgrp = link->cgrp;
499

500 501
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
L
Li Zefan 已提交
502

503
		/* @cgrp can't go away while we're holding css_set_rwsem */
504 505 506 507 508 509 510
		if (list_empty(&cgrp->cset_links)) {
			cgroup_update_populated(cgrp, false);
			if (notify_on_release(cgrp)) {
				if (taskexit)
					set_bit(CGRP_RELEASABLE, &cgrp->flags);
				check_for_release(cgrp);
			}
511
		}
512 513

		kfree(link);
514
	}
515

516
	kfree_rcu(cset, rcu_head);
517 518
}

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
static void put_css_set(struct css_set *cset, bool taskexit)
{
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * rwlock
	 */
	if (atomic_add_unless(&cset->refcount, -1, 1))
		return;

	down_write(&css_set_rwsem);
	put_css_set_locked(cset, taskexit);
	up_write(&css_set_rwsem);
}

534 535 536
/*
 * refcounted get/put for css_set objects
 */
537
static inline void get_css_set(struct css_set *cset)
538
{
539
	atomic_inc(&cset->refcount);
540 541
}

542
/**
543
 * compare_css_sets - helper function for find_existing_css_set().
544 545
 * @cset: candidate css_set being tested
 * @old_cset: existing css_set for a task
546 547 548
 * @new_cgrp: cgroup that's being entered by the task
 * @template: desired set of css pointers in css_set (pre-calculated)
 *
L
Li Zefan 已提交
549
 * Returns true if "cset" matches "old_cset" except for the hierarchy
550 551
 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
 */
552 553
static bool compare_css_sets(struct css_set *cset,
			     struct css_set *old_cset,
554 555 556 557 558
			     struct cgroup *new_cgrp,
			     struct cgroup_subsys_state *template[])
{
	struct list_head *l1, *l2;

559 560 561 562 563 564
	/*
	 * On the default hierarchy, there can be csets which are
	 * associated with the same set of cgroups but different csses.
	 * Let's first ensure that csses match.
	 */
	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
565 566 567 568
		return false;

	/*
	 * Compare cgroup pointers in order to distinguish between
569 570 571
	 * different cgroups in hierarchies.  As different cgroups may
	 * share the same effective css, this comparison is always
	 * necessary.
572
	 */
573 574
	l1 = &cset->cgrp_links;
	l2 = &old_cset->cgrp_links;
575
	while (1) {
576
		struct cgrp_cset_link *link1, *link2;
577
		struct cgroup *cgrp1, *cgrp2;
578 579 580 581

		l1 = l1->next;
		l2 = l2->next;
		/* See if we reached the end - both lists are equal length. */
582 583
		if (l1 == &cset->cgrp_links) {
			BUG_ON(l2 != &old_cset->cgrp_links);
584 585
			break;
		} else {
586
			BUG_ON(l2 == &old_cset->cgrp_links);
587 588
		}
		/* Locate the cgroups associated with these links. */
589 590 591 592
		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
		cgrp1 = link1->cgrp;
		cgrp2 = link2->cgrp;
593
		/* Hierarchies should be linked in the same order. */
594
		BUG_ON(cgrp1->root != cgrp2->root);
595 596 597 598 599 600 601 602

		/*
		 * If this hierarchy is the hierarchy of the cgroup
		 * that's changing, then we need to check that this
		 * css_set points to the new cgroup; if it's any other
		 * hierarchy, then this css_set should point to the
		 * same cgroup as the old css_set.
		 */
603 604
		if (cgrp1->root == new_cgrp->root) {
			if (cgrp1 != new_cgrp)
605 606
				return false;
		} else {
607
			if (cgrp1 != cgrp2)
608 609 610 611 612 613
				return false;
		}
	}
	return true;
}

614 615 616 617 618
/**
 * find_existing_css_set - init css array and find the matching css_set
 * @old_cset: the css_set that we're using before the cgroup transition
 * @cgrp: the cgroup that we're moving into
 * @template: out param for the new set of csses, should be clear on entry
619
 */
620 621 622
static struct css_set *find_existing_css_set(struct css_set *old_cset,
					struct cgroup *cgrp,
					struct cgroup_subsys_state *template[])
623
{
624
	struct cgroup_root *root = cgrp->root;
625
	struct cgroup_subsys *ss;
626
	struct css_set *cset;
627
	unsigned long key;
628
	int i;
629

B
Ben Blum 已提交
630 631 632 633 634
	/*
	 * Build the set of subsystem state objects that we want to see in the
	 * new css_set. while subsystems can change globally, the entries here
	 * won't change, so no need for locking.
	 */
635
	for_each_subsys(ss, i) {
636
		if (root->subsys_mask & (1UL << i)) {
637 638 639 640 641
			/*
			 * @ss is in this hierarchy, so we want the
			 * effective css from @cgrp.
			 */
			template[i] = cgroup_e_css(cgrp, ss);
642
		} else {
643 644 645 646
			/*
			 * @ss is not in this hierarchy, so we don't want
			 * to change the css.
			 */
647
			template[i] = old_cset->subsys[i];
648 649 650
		}
	}

651
	key = css_set_hash(template);
652 653
	hash_for_each_possible(css_set_table, cset, hlist, key) {
		if (!compare_css_sets(cset, old_cset, cgrp, template))
654 655 656
			continue;

		/* This css_set matches what we need */
657
		return cset;
658
	}
659 660 661 662 663

	/* No existing cgroup group matched */
	return NULL;
}

664
static void free_cgrp_cset_links(struct list_head *links_to_free)
665
{
666
	struct cgrp_cset_link *link, *tmp_link;
667

668 669
	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
		list_del(&link->cset_link);
670 671 672 673
		kfree(link);
	}
}

674 675 676 677 678 679 680
/**
 * allocate_cgrp_cset_links - allocate cgrp_cset_links
 * @count: the number of links to allocate
 * @tmp_links: list_head the allocated links are put on
 *
 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
 * through ->cset_link.  Returns 0 on success or -errno.
681
 */
682
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
683
{
684
	struct cgrp_cset_link *link;
685
	int i;
686 687 688

	INIT_LIST_HEAD(tmp_links);

689
	for (i = 0; i < count; i++) {
690
		link = kzalloc(sizeof(*link), GFP_KERNEL);
691
		if (!link) {
692
			free_cgrp_cset_links(tmp_links);
693 694
			return -ENOMEM;
		}
695
		list_add(&link->cset_link, tmp_links);
696 697 698 699
	}
	return 0;
}

700 701
/**
 * link_css_set - a helper function to link a css_set to a cgroup
702
 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
703
 * @cset: the css_set to be linked
704 705
 * @cgrp: the destination cgroup
 */
706 707
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
			 struct cgroup *cgrp)
708
{
709
	struct cgrp_cset_link *link;
710

711
	BUG_ON(list_empty(tmp_links));
T
Tejun Heo 已提交
712 713 714 715

	if (cgroup_on_dfl(cgrp))
		cset->dfl_cgrp = cgrp;

716 717
	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
	link->cset = cset;
718
	link->cgrp = cgrp;
719 720 721

	if (list_empty(&cgrp->cset_links))
		cgroup_update_populated(cgrp, true);
722
	list_move(&link->cset_link, &cgrp->cset_links);
723

724 725 726 727
	/*
	 * Always add links to the tail of the list so that the list
	 * is sorted by order of hierarchy creation
	 */
728
	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
729 730
}

731 732 733 734 735 736 737
/**
 * find_css_set - return a new css_set with one cgroup updated
 * @old_cset: the baseline css_set
 * @cgrp: the cgroup to be updated
 *
 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
 * substituted into the appropriate hierarchy.
738
 */
739 740
static struct css_set *find_css_set(struct css_set *old_cset,
				    struct cgroup *cgrp)
741
{
742
	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
743
	struct css_set *cset;
744 745
	struct list_head tmp_links;
	struct cgrp_cset_link *link;
T
Tejun Heo 已提交
746
	struct cgroup_subsys *ss;
747
	unsigned long key;
T
Tejun Heo 已提交
748
	int ssid;
749

750 751
	lockdep_assert_held(&cgroup_mutex);

752 753
	/* First see if we already have a cgroup group that matches
	 * the desired set */
754
	down_read(&css_set_rwsem);
755 756 757
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
		get_css_set(cset);
758
	up_read(&css_set_rwsem);
759

760 761
	if (cset)
		return cset;
762

763
	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
764
	if (!cset)
765 766
		return NULL;

767
	/* Allocate all the cgrp_cset_link objects that we'll need */
768
	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
769
		kfree(cset);
770 771 772
		return NULL;
	}

773
	atomic_set(&cset->refcount, 1);
774
	INIT_LIST_HEAD(&cset->cgrp_links);
775
	INIT_LIST_HEAD(&cset->tasks);
T
Tejun Heo 已提交
776
	INIT_LIST_HEAD(&cset->mg_tasks);
777
	INIT_LIST_HEAD(&cset->mg_preload_node);
778
	INIT_LIST_HEAD(&cset->mg_node);
779
	INIT_HLIST_NODE(&cset->hlist);
780 781 782

	/* Copy the set of subsystem state objects generated in
	 * find_existing_css_set() */
783
	memcpy(cset->subsys, template, sizeof(cset->subsys));
784

785
	down_write(&css_set_rwsem);
786
	/* Add reference counts and links from the new css_set. */
787
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
788
		struct cgroup *c = link->cgrp;
789

790 791
		if (c->root == cgrp->root)
			c = cgrp;
792
		link_css_set(&tmp_links, cset, c);
793
	}
794

795
	BUG_ON(!list_empty(&tmp_links));
796 797

	css_set_count++;
798

T
Tejun Heo 已提交
799
	/* Add @cset to the hash table */
800 801
	key = css_set_hash(cset->subsys);
	hash_add(css_set_table, &cset->hlist, key);
802

T
Tejun Heo 已提交
803 804 805 806
	for_each_subsys(ss, ssid)
		list_add_tail(&cset->e_cset_node[ssid],
			      &cset->subsys[ssid]->cgroup->e_csets[ssid]);

807
	up_write(&css_set_rwsem);
808

809
	return cset;
810 811
}

812
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
813
{
814
	struct cgroup *root_cgrp = kf_root->kn->priv;
T
Tejun Heo 已提交
815

816
	return root_cgrp->root;
T
Tejun Heo 已提交
817 818
}

819
static int cgroup_init_root_id(struct cgroup_root *root)
820 821 822 823 824
{
	int id;

	lockdep_assert_held(&cgroup_mutex);

825
	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
826 827 828 829 830 831 832
	if (id < 0)
		return id;

	root->hierarchy_id = id;
	return 0;
}

833
static void cgroup_exit_root_id(struct cgroup_root *root)
834 835 836 837 838 839 840 841 842
{
	lockdep_assert_held(&cgroup_mutex);

	if (root->hierarchy_id) {
		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
		root->hierarchy_id = 0;
	}
}

843
static void cgroup_free_root(struct cgroup_root *root)
844 845 846 847 848 849 850 851 852 853
{
	if (root) {
		/* hierarhcy ID shoulid already have been released */
		WARN_ON_ONCE(root->hierarchy_id);

		idr_destroy(&root->cgroup_idr);
		kfree(root);
	}
}

854
static void cgroup_destroy_root(struct cgroup_root *root)
855
{
856
	struct cgroup *cgrp = &root->cgrp;
857 858
	struct cgrp_cset_link *link, *tmp_link;

T
Tejun Heo 已提交
859
	mutex_lock(&cgroup_mutex);
860

T
Tejun Heo 已提交
861
	BUG_ON(atomic_read(&root->nr_cgrps));
862 863 864
	BUG_ON(!list_empty(&cgrp->children));

	/* Rebind all subsystems back to the default hierarchy */
865
	rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
866 867

	/*
868 869
	 * Release all the links from cset_links to this hierarchy's
	 * root cgroup
870
	 */
871
	down_write(&css_set_rwsem);
872 873 874 875 876 877

	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
		kfree(link);
	}
878
	up_write(&css_set_rwsem);
879 880 881 882 883 884 885 886 887 888

	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
		cgroup_root_count--;
	}

	cgroup_exit_root_id(root);

	mutex_unlock(&cgroup_mutex);

T
Tejun Heo 已提交
889
	kernfs_destroy_root(root->kf_root);
890 891 892
	cgroup_free_root(root);
}

893 894
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
895
					    struct cgroup_root *root)
896 897 898
{
	struct cgroup *res = NULL;

899 900 901
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

902
	if (cset == &init_css_set) {
903
		res = &root->cgrp;
904
	} else {
905 906 907
		struct cgrp_cset_link *link;

		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
908
			struct cgroup *c = link->cgrp;
909

910 911 912 913 914 915
			if (c->root == root) {
				res = c;
				break;
			}
		}
	}
916

917 918 919 920
	BUG_ON(!res);
	return res;
}

921
/*
922 923 924 925
 * Return the cgroup for "task" from the given hierarchy. Must be
 * called with cgroup_mutex and css_set_rwsem held.
 */
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
926
					    struct cgroup_root *root)
927 928 929 930 931 932 933 934 935
{
	/*
	 * No need to lock the task - since we hold cgroup_mutex the
	 * task can't change groups, so the only thing that can happen
	 * is that it exits and its css is set back to init_css_set.
	 */
	return cset_cgroup_from_root(task_css_set(task), root);
}

936 937 938 939 940 941
/*
 * A task must hold cgroup_mutex to modify cgroups.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding cgroup_mutex can't rely on the count
 * field not changing.  However, if the count goes to zero, then only
942
 * cgroup_attach_task() can increment it again.  Because a count of zero
943 944 945 946 947 948 949 950 951 952 953 954 955
 * means that no tasks are currently attached, therefore there is no
 * way a task attached to that cgroup can fork (the other way to
 * increment the count).  So code holding cgroup_mutex can safely
 * assume that if the count is zero, it will stay zero. Similarly, if
 * a task holds cgroup_mutex on a cgroup with zero count, it
 * knows that the cgroup won't be removed, as cgroup_rmdir()
 * needs that mutex.
 *
 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
 * (usually) take cgroup_mutex.  These are the two most performance
 * critical pieces of code here.  The exception occurs on cgroup_exit(),
 * when a task in a notify_on_release cgroup exits.  Then cgroup_mutex
 * is taken, and if the cgroup count is zero, a usermode call made
L
Li Zefan 已提交
956 957
 * to the release agent with the name of the cgroup (path relative to
 * the root of cgroup file system) as the argument.
958 959 960 961
 *
 * A cgroup can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cgroups is empty.  Since all
 * tasks in the system use _some_ cgroup, and since there is always at
962
 * least one task in the system (init, pid == 1), therefore, root cgroup
963
 * always has either children cgroups and/or using tasks.  So we don't
964
 * need a special hack to ensure that root cgroup cannot be deleted.
965 966
 *
 * P.S.  One more locking exception.  RCU is used to guard the
967
 * update of a tasks cgroup pointer by cgroup_attach_task()
968 969
 */

970
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask);
T
Tejun Heo 已提交
971
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
972
static const struct file_operations proc_cgroupstats_operations;
973

T
Tejun Heo 已提交
974 975
static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
			      char *buf)
976
{
T
Tejun Heo 已提交
977 978 979 980 981 982 983
	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
			 cft->ss->name, cft->name);
	else
		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
	return buf;
984 985
}

986 987 988 989 990 991 992 993 994 995
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
 * returns cft->mode if ->mode is not 0
 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
 * returns S_IRUGO if it has only a read handler
 * returns S_IWUSR if it has only a write hander
 */
static umode_t cgroup_file_mode(const struct cftype *cft)
996
{
997
	umode_t mode = 0;
998

999 1000 1001 1002 1003 1004
	if (cft->mode)
		return cft->mode;

	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
		mode |= S_IRUGO;

1005
	if (cft->write_u64 || cft->write_s64 || cft->write)
1006 1007 1008
		mode |= S_IWUSR;

	return mode;
1009 1010
}

1011 1012
static void cgroup_free_fn(struct work_struct *work)
{
1013
	struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
1014

1015
	atomic_dec(&cgrp->root->nr_cgrps);
1016
	cgroup_pidlist_destroy_all(cgrp);
1017

T
Tejun Heo 已提交
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	if (cgrp->parent) {
		/*
		 * We get a ref to the parent, and put the ref when this
		 * cgroup is being freed, so it's guaranteed that the
		 * parent won't be destroyed before its children.
		 */
		cgroup_put(cgrp->parent);
		kernfs_put(cgrp->kn);
		kfree(cgrp);
	} else {
		/*
1029
		 * This is root cgroup's refcnt reaching zero, which
T
Tejun Heo 已提交
1030 1031 1032 1033
		 * indicates that the root should be released.
		 */
		cgroup_destroy_root(cgrp->root);
	}
1034 1035 1036 1037 1038 1039
}

static void cgroup_free_rcu(struct rcu_head *head)
{
	struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);

1040
	INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
1041
	queue_work(cgroup_destroy_wq, &cgrp->destroy_work);
1042 1043
}

1044
static void cgroup_get(struct cgroup *cgrp)
1045
{
T
Tejun Heo 已提交
1046 1047 1048
	WARN_ON_ONCE(cgroup_is_dead(cgrp));
	WARN_ON_ONCE(atomic_read(&cgrp->refcnt) <= 0);
	atomic_inc(&cgrp->refcnt);
1049 1050
}

1051
static void cgroup_put(struct cgroup *cgrp)
T
Tejun Heo 已提交
1052
{
T
Tejun Heo 已提交
1053 1054
	if (!atomic_dec_and_test(&cgrp->refcnt))
		return;
T
Tejun Heo 已提交
1055
	if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp)))
T
Tejun Heo 已提交
1056
		return;
T
Tejun Heo 已提交
1057

1058 1059 1060 1061 1062
	/* delete this cgroup from parent->children */
	mutex_lock(&cgroup_mutex);
	list_del_rcu(&cgrp->sibling);
	mutex_unlock(&cgroup_mutex);

1063
	cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
T
Tejun Heo 已提交
1064
	cgrp->id = -1;
T
Tejun Heo 已提交
1065

T
Tejun Heo 已提交
1066
	call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
1067
}
T
Tejun Heo 已提交
1068

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
/**
 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper undoes cgroup_kn_lock_live() and should be invoked before
 * the method finishes if locking succeeded.  Note that once this function
 * returns the cgroup returned by cgroup_kn_lock_live() may become
 * inaccessible any time.  If the caller intends to continue to access the
 * cgroup, it should pin it before invoking this function.
 */
static void cgroup_kn_unlock(struct kernfs_node *kn)
{
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;

	mutex_unlock(&cgroup_mutex);

	kernfs_unbreak_active_protection(kn);
	cgroup_put(cgrp);
}

/**
 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper is to be used by a cgroup kernfs method currently servicing
 * @kn.  It breaks the active protection, performs cgroup locking and
 * verifies that the associated cgroup is alive.  Returns the cgroup if
 * alive; otherwise, %NULL.  A successful return should be undone by a
 * matching cgroup_kn_unlock() invocation.
 *
 * Any cgroup kernfs method implementation which requires locking the
 * associated cgroup should use this helper.  It avoids nesting cgroup
 * locking under kernfs active protection and allows all kernfs operations
 * including self-removal.
 */
static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
{
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;

	/*
1119
	 * We're gonna grab cgroup_mutex which nests outside kernfs
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	 * active_ref.  cgroup liveliness check alone provides enough
	 * protection against removal.  Ensure @cgrp stays accessible and
	 * break the active_ref protection.
	 */
	cgroup_get(cgrp);
	kernfs_break_active_protection(kn);

	mutex_lock(&cgroup_mutex);

	if (!cgroup_is_dead(cgrp))
		return cgrp;

	cgroup_kn_unlock(kn);
	return NULL;
}

1136
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
T
Tejun Heo 已提交
1137
{
T
Tejun Heo 已提交
1138
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
1139

1140
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
1141
	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
T
Tejun Heo 已提交
1142 1143
}

1144
/**
1145
 * cgroup_clear_dir - remove subsys files in a cgroup directory
1146
 * @cgrp: target cgroup
1147 1148
 * @subsys_mask: mask of the subsystem ids whose files should be removed
 */
1149
static void cgroup_clear_dir(struct cgroup *cgrp, unsigned int subsys_mask)
T
Tejun Heo 已提交
1150
{
1151
	struct cgroup_subsys *ss;
1152
	int i;
T
Tejun Heo 已提交
1153

1154
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
1155
		struct cftype *cfts;
1156

1157
		if (!(subsys_mask & (1 << i)))
1158
			continue;
T
Tejun Heo 已提交
1159 1160
		list_for_each_entry(cfts, &ss->cfts, node)
			cgroup_addrm_files(cgrp, cfts, false);
1161
	}
1162 1163
}

1164
static int rebind_subsystems(struct cgroup_root *dst_root, unsigned int ss_mask)
1165
{
1166
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
1167
	int ssid, i, ret;
1168

T
Tejun Heo 已提交
1169
	lockdep_assert_held(&cgroup_mutex);
1170

1171 1172 1173
	for_each_subsys(ss, ssid) {
		if (!(ss_mask & (1 << ssid)))
			continue;
B
Ben Blum 已提交
1174

1175 1176
		/* if @ss has non-root csses attached to it, can't move */
		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
T
Tejun Heo 已提交
1177
			return -EBUSY;
1178

1179
		/* can't move between two non-dummy roots either */
1180
		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1181
			return -EBUSY;
1182 1183
	}

T
Tejun Heo 已提交
1184 1185 1186
	ret = cgroup_populate_dir(&dst_root->cgrp, ss_mask);
	if (ret) {
		if (dst_root != &cgrp_dfl_root)
1187
			return ret;
1188

T
Tejun Heo 已提交
1189 1190 1191 1192 1193 1194 1195
		/*
		 * Rebinding back to the default root is not allowed to
		 * fail.  Using both default and non-default roots should
		 * be rare.  Moving subsystems back and forth even more so.
		 * Just warn about it and continue.
		 */
		if (cgrp_dfl_root_visible) {
1196
			pr_warn("failed to create files (%d) while rebinding 0x%x to default root\n",
1197
				ret, ss_mask);
1198
			pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
T
Tejun Heo 已提交
1199
		}
1200
	}
1201 1202 1203 1204 1205

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
1206
	for_each_subsys(ss, ssid)
T
Tejun Heo 已提交
1207
		if (ss_mask & (1 << ssid))
1208
			cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
1209

1210
	for_each_subsys(ss, ssid) {
1211
		struct cgroup_root *src_root;
1212
		struct cgroup_subsys_state *css;
T
Tejun Heo 已提交
1213
		struct css_set *cset;
1214

1215 1216
		if (!(ss_mask & (1 << ssid)))
			continue;
1217

1218
		src_root = ss->root;
1219
		css = cgroup_css(&src_root->cgrp, ss);
1220

1221
		WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
1222

1223 1224
		RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
		rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
1225
		ss->root = dst_root;
1226
		css->cgroup = &dst_root->cgrp;
1227

T
Tejun Heo 已提交
1228 1229 1230 1231 1232 1233
		down_write(&css_set_rwsem);
		hash_for_each(css_set_table, i, cset, hlist)
			list_move_tail(&cset->e_cset_node[ss->id],
				       &dst_root->cgrp.e_csets[ss->id]);
		up_write(&css_set_rwsem);

1234 1235 1236
		src_root->subsys_mask &= ~(1 << ssid);
		src_root->cgrp.child_subsys_mask &= ~(1 << ssid);

1237
		/* default hierarchy doesn't enable controllers by default */
1238
		dst_root->subsys_mask |= 1 << ssid;
1239 1240
		if (dst_root != &cgrp_dfl_root)
			dst_root->cgrp.child_subsys_mask |= 1 << ssid;
1241

1242 1243
		if (ss->bind)
			ss->bind(css);
1244 1245
	}

T
Tejun Heo 已提交
1246
	kernfs_activate(dst_root->cgrp.kn);
1247 1248 1249
	return 0;
}

T
Tejun Heo 已提交
1250 1251
static int cgroup_show_options(struct seq_file *seq,
			       struct kernfs_root *kf_root)
1252
{
1253
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1254
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
1255
	int ssid;
1256

T
Tejun Heo 已提交
1257
	for_each_subsys(ss, ssid)
1258
		if (root->subsys_mask & (1 << ssid))
T
Tejun Heo 已提交
1259
			seq_printf(seq, ",%s", ss->name);
1260 1261
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
		seq_puts(seq, ",sane_behavior");
1262
	if (root->flags & CGRP_ROOT_NOPREFIX)
1263
		seq_puts(seq, ",noprefix");
1264
	if (root->flags & CGRP_ROOT_XATTR)
A
Aristeu Rozanski 已提交
1265
		seq_puts(seq, ",xattr");
1266 1267

	spin_lock(&release_agent_path_lock);
1268 1269
	if (strlen(root->release_agent_path))
		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
1270 1271
	spin_unlock(&release_agent_path_lock);

1272
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1273
		seq_puts(seq, ",clone_children");
1274 1275
	if (strlen(root->name))
		seq_printf(seq, ",name=%s", root->name);
1276 1277 1278 1279
	return 0;
}

struct cgroup_sb_opts {
1280 1281
	unsigned int subsys_mask;
	unsigned int flags;
1282
	char *release_agent;
1283
	bool cpuset_clone_children;
1284
	char *name;
1285 1286
	/* User explicitly requested empty subsystem */
	bool none;
1287 1288
};

B
Ben Blum 已提交
1289
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1290
{
1291 1292
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
1293
	unsigned int mask = -1U;
1294 1295
	struct cgroup_subsys *ss;
	int i;
1296 1297

#ifdef CONFIG_CPUSETS
1298
	mask = ~(1U << cpuset_cgrp_id);
1299
#endif
1300

1301
	memset(opts, 0, sizeof(*opts));
1302 1303 1304 1305

	while ((token = strsep(&o, ",")) != NULL) {
		if (!*token)
			return -EINVAL;
1306
		if (!strcmp(token, "none")) {
1307 1308
			/* Explicitly have no subsystems */
			opts->none = true;
1309 1310 1311 1312 1313 1314 1315 1316 1317
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
1318 1319 1320 1321
		if (!strcmp(token, "__DEVEL__sane_behavior")) {
			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
			continue;
		}
1322
		if (!strcmp(token, "noprefix")) {
1323
			opts->flags |= CGRP_ROOT_NOPREFIX;
1324 1325 1326
			continue;
		}
		if (!strcmp(token, "clone_children")) {
1327
			opts->cpuset_clone_children = true;
1328 1329
			continue;
		}
A
Aristeu Rozanski 已提交
1330
		if (!strcmp(token, "xattr")) {
1331
			opts->flags |= CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
1332 1333
			continue;
		}
1334
		if (!strncmp(token, "release_agent=", 14)) {
1335 1336 1337
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
1338
			opts->release_agent =
1339
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1340 1341
			if (!opts->release_agent)
				return -ENOMEM;
1342 1343 1344
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
1362
					      MAX_CGROUP_ROOT_NAMELEN - 1,
1363 1364 1365
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
1366 1367 1368 1369

			continue;
		}

1370
		for_each_subsys(ss, i) {
1371 1372 1373 1374 1375 1376 1377 1378
			if (strcmp(token, ss->name))
				continue;
			if (ss->disabled)
				continue;

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
1379
			opts->subsys_mask |= (1 << i);
1380 1381 1382 1383 1384 1385 1386 1387
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

1388 1389
	/* Consistency checks */

1390
	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
1391
		pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
1392

1393 1394 1395
		if ((opts->flags & (CGRP_ROOT_NOPREFIX | CGRP_ROOT_XATTR)) ||
		    opts->cpuset_clone_children || opts->release_agent ||
		    opts->name) {
1396
			pr_err("sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n");
1397 1398
			return -EINVAL;
		}
T
Tejun Heo 已提交
1399 1400 1401 1402 1403 1404 1405 1406 1407
	} else {
		/*
		 * If the 'all' option was specified select all the
		 * subsystems, otherwise if 'none', 'name=' and a subsystem
		 * name options were not specified, let's default to 'all'
		 */
		if (all_ss || (!one_ss && !opts->none && !opts->name))
			for_each_subsys(ss, i)
				if (!ss->disabled)
1408
					opts->subsys_mask |= (1 << i);
1409

T
Tejun Heo 已提交
1410 1411 1412 1413 1414
		/*
		 * We either have to specify by name or by subsystems. (So
		 * all empty hierarchies must have a name).
		 */
		if (!opts->subsys_mask && !opts->name)
1415 1416 1417
			return -EINVAL;
	}

1418 1419 1420 1421 1422
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
1423
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1424 1425
		return -EINVAL;

1426 1427

	/* Can't specify "none" and some subsystems */
1428
	if (opts->subsys_mask && opts->none)
1429 1430
		return -EINVAL;

1431 1432 1433
	return 0;
}

T
Tejun Heo 已提交
1434
static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1435 1436
{
	int ret = 0;
1437
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1438
	struct cgroup_sb_opts opts;
1439
	unsigned int added_mask, removed_mask;
1440

1441
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
1442
		pr_err("sane_behavior: remount is not allowed\n");
1443 1444 1445
		return -EINVAL;
	}

1446 1447 1448 1449 1450 1451 1452
	mutex_lock(&cgroup_mutex);

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

1453
	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1454
		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1455
			task_tgid_nr(current), current->comm);
1456

1457 1458
	added_mask = opts.subsys_mask & ~root->subsys_mask;
	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1459

B
Ben Blum 已提交
1460
	/* Don't allow flags or name to change at remount */
1461
	if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) ||
B
Ben Blum 已提交
1462
	    (opts.name && strcmp(opts.name, root->name))) {
1463
		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1464 1465
		       opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "",
		       root->flags & CGRP_ROOT_OPTION_MASK, root->name);
1466 1467 1468 1469
		ret = -EINVAL;
		goto out_unlock;
	}

1470
	/* remounting is not allowed for populated hierarchies */
1471
	if (!list_empty(&root->cgrp.children)) {
1472
		ret = -EBUSY;
1473
		goto out_unlock;
B
Ben Blum 已提交
1474
	}
1475

1476
	ret = rebind_subsystems(root, added_mask);
1477
	if (ret)
1478
		goto out_unlock;
1479

1480
	rebind_subsystems(&cgrp_dfl_root, removed_mask);
1481

1482 1483
	if (opts.release_agent) {
		spin_lock(&release_agent_path_lock);
1484
		strcpy(root->release_agent_path, opts.release_agent);
1485 1486
		spin_unlock(&release_agent_path_lock);
	}
1487
 out_unlock:
1488
	kfree(opts.release_agent);
1489
	kfree(opts.name);
1490 1491 1492 1493
	mutex_unlock(&cgroup_mutex);
	return ret;
}

1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
/*
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
 * words after the first mount.
 */
static bool use_task_css_set_links __read_mostly;

static void cgroup_enable_task_cg_lists(void)
{
	struct task_struct *p, *g;

1506
	down_write(&css_set_rwsem);
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528

	if (use_task_css_set_links)
		goto out_unlock;

	use_task_css_set_links = true;

	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
	do_each_thread(g, p) {
		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
			     task_css_set(p) != &init_css_set);

		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
1529 1530
		 * Do it while holding siglock so that we don't end up
		 * racing against cgroup_exit().
1531
		 */
1532
		spin_lock_irq(&p->sighand->siglock);
1533 1534 1535 1536 1537 1538
		if (!(p->flags & PF_EXITING)) {
			struct css_set *cset = task_css_set(p);

			list_add(&p->cg_list, &cset->tasks);
			get_css_set(cset);
		}
1539
		spin_unlock_irq(&p->sighand->siglock);
1540 1541 1542
	} while_each_thread(g, p);
	read_unlock(&tasklist_lock);
out_unlock:
1543
	up_write(&css_set_rwsem);
1544
}
1545

1546 1547
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
T
Tejun Heo 已提交
1548 1549 1550
	struct cgroup_subsys *ss;
	int ssid;

T
Tejun Heo 已提交
1551
	atomic_set(&cgrp->refcnt, 1);
1552 1553
	INIT_LIST_HEAD(&cgrp->sibling);
	INIT_LIST_HEAD(&cgrp->children);
1554
	INIT_LIST_HEAD(&cgrp->cset_links);
1555
	INIT_LIST_HEAD(&cgrp->release_list);
1556 1557
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
1558
	cgrp->self.cgroup = cgrp;
T
Tejun Heo 已提交
1559 1560 1561

	for_each_subsys(ss, ssid)
		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1562 1563

	init_waitqueue_head(&cgrp->offline_waitq);
1564
}
1565

1566
static void init_cgroup_root(struct cgroup_root *root,
1567
			     struct cgroup_sb_opts *opts)
1568
{
1569
	struct cgroup *cgrp = &root->cgrp;
1570

1571
	INIT_LIST_HEAD(&root->root_list);
1572
	atomic_set(&root->nr_cgrps, 1);
1573
	cgrp->root = root;
1574
	init_cgroup_housekeeping(cgrp);
1575
	idr_init(&root->cgroup_idr);
1576 1577 1578 1579 1580 1581

	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
1582
	if (opts->cpuset_clone_children)
1583
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1584 1585
}

1586
static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
1587
{
1588
	LIST_HEAD(tmp_links);
1589
	struct cgroup *root_cgrp = &root->cgrp;
1590 1591
	struct css_set *cset;
	int i, ret;
1592

1593
	lockdep_assert_held(&cgroup_mutex);
1594

1595
	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
1596
	if (ret < 0)
T
Tejun Heo 已提交
1597
		goto out;
1598
	root_cgrp->id = ret;
1599

1600
	/*
1601
	 * We're accessing css_set_count without locking css_set_rwsem here,
1602 1603 1604 1605 1606 1607
	 * but that's OK - it can only be increased by someone holding
	 * cgroup_lock, and that's us. The worst that can happen is that we
	 * have some link structures left over
	 */
	ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
	if (ret)
T
Tejun Heo 已提交
1608
		goto out;
1609

1610
	ret = cgroup_init_root_id(root);
1611
	if (ret)
T
Tejun Heo 已提交
1612
		goto out;
1613

T
Tejun Heo 已提交
1614 1615 1616 1617 1618 1619 1620 1621
	root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
					   KERNFS_ROOT_CREATE_DEACTIVATED,
					   root_cgrp);
	if (IS_ERR(root->kf_root)) {
		ret = PTR_ERR(root->kf_root);
		goto exit_root_id;
	}
	root_cgrp->kn = root->kf_root->kn;
1622

1623 1624
	ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
	if (ret)
T
Tejun Heo 已提交
1625
		goto destroy_root;
1626

1627
	ret = rebind_subsystems(root, ss_mask);
1628
	if (ret)
T
Tejun Heo 已提交
1629
		goto destroy_root;
1630

1631 1632 1633 1634 1635 1636 1637
	/*
	 * There must be no failure case after here, since rebinding takes
	 * care of subsystems' refcounts, which are explicitly dropped in
	 * the failure exit path.
	 */
	list_add(&root->root_list, &cgroup_roots);
	cgroup_root_count++;
A
Al Viro 已提交
1638

1639
	/*
1640
	 * Link the root cgroup in this hierarchy into all the css_set
1641 1642
	 * objects.
	 */
1643
	down_write(&css_set_rwsem);
1644 1645
	hash_for_each(css_set_table, i, cset, hlist)
		link_css_set(&tmp_links, cset, root_cgrp);
1646
	up_write(&css_set_rwsem);
1647

1648
	BUG_ON(!list_empty(&root_cgrp->children));
1649
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1650

T
Tejun Heo 已提交
1651
	kernfs_activate(root_cgrp->kn);
1652
	ret = 0;
T
Tejun Heo 已提交
1653
	goto out;
1654

T
Tejun Heo 已提交
1655 1656 1657 1658
destroy_root:
	kernfs_destroy_root(root->kf_root);
	root->kf_root = NULL;
exit_root_id:
1659
	cgroup_exit_root_id(root);
T
Tejun Heo 已提交
1660
out:
1661 1662
	free_cgrp_cset_links(&tmp_links);
	return ret;
1663 1664
}

A
Al Viro 已提交
1665
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1666
			 int flags, const char *unused_dev_name,
A
Al Viro 已提交
1667
			 void *data)
1668
{
1669
	struct cgroup_root *root;
1670
	struct cgroup_sb_opts opts;
T
Tejun Heo 已提交
1671
	struct dentry *dentry;
1672
	int ret;
L
Li Zefan 已提交
1673
	bool new_sb;
1674

1675 1676 1677 1678 1679 1680
	/*
	 * The first time anyone tries to mount a cgroup, enable the list
	 * linking each css_set to its tasks and fix up all existing tasks.
	 */
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();
1681

B
Ben Blum 已提交
1682
	mutex_lock(&cgroup_mutex);
1683 1684

	/* First find the desired set of subsystems */
1685
	ret = parse_cgroupfs_options(data, &opts);
1686
	if (ret)
1687
		goto out_unlock;
1688

T
Tejun Heo 已提交
1689
	/* look for a matching existing root */
T
Tejun Heo 已提交
1690 1691 1692 1693 1694 1695
	if (!opts.subsys_mask && !opts.none && !opts.name) {
		cgrp_dfl_root_visible = true;
		root = &cgrp_dfl_root;
		cgroup_get(&root->cgrp);
		ret = 0;
		goto out_unlock;
1696 1697
	}

1698
	for_each_root(root) {
T
Tejun Heo 已提交
1699
		bool name_match = false;
1700

1701
		if (root == &cgrp_dfl_root)
1702
			continue;
1703

B
Ben Blum 已提交
1704
		/*
T
Tejun Heo 已提交
1705 1706 1707
		 * If we asked for a name then it must match.  Also, if
		 * name matches but sybsys_mask doesn't, we should fail.
		 * Remember whether name matched.
B
Ben Blum 已提交
1708
		 */
T
Tejun Heo 已提交
1709 1710 1711 1712 1713
		if (opts.name) {
			if (strcmp(opts.name, root->name))
				continue;
			name_match = true;
		}
1714

1715
		/*
T
Tejun Heo 已提交
1716 1717
		 * If we asked for subsystems (or explicitly for no
		 * subsystems) then they must match.
1718
		 */
T
Tejun Heo 已提交
1719
		if ((opts.subsys_mask || opts.none) &&
1720
		    (opts.subsys_mask != root->subsys_mask)) {
T
Tejun Heo 已提交
1721 1722 1723 1724 1725
			if (!name_match)
				continue;
			ret = -EBUSY;
			goto out_unlock;
		}
1726

1727
		if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) {
1728
			if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
1729
				pr_err("sane_behavior: new mount options should match the existing superblock\n");
1730
				ret = -EINVAL;
1731
				goto out_unlock;
1732
			} else {
1733
				pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1734
			}
1735
		}
1736

T
Tejun Heo 已提交
1737
		/*
1738
		 * A root's lifetime is governed by its root cgroup.  Zero
T
Tejun Heo 已提交
1739 1740 1741 1742 1743
		 * ref indicate that the root is being destroyed.  Wait for
		 * destruction to complete so that the subsystems are free.
		 * We can use wait_queue for the wait but this path is
		 * super cold.  Let's just sleep for a bit and retry.
		 */
1744
		if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
T
Tejun Heo 已提交
1745 1746
			mutex_unlock(&cgroup_mutex);
			msleep(10);
1747 1748
			ret = restart_syscall();
			goto out_free;
T
Tejun Heo 已提交
1749
		}
1750

T
Tejun Heo 已提交
1751
		ret = 0;
T
Tejun Heo 已提交
1752
		goto out_unlock;
1753 1754
	}

1755
	/*
1756 1757 1758
	 * No such thing, create a new one.  name= matching without subsys
	 * specification is allowed for already existing hierarchies but we
	 * can't create new one without subsys specification.
1759
	 */
1760 1761 1762
	if (!opts.subsys_mask && !opts.none) {
		ret = -EINVAL;
		goto out_unlock;
1763 1764
	}

1765 1766 1767
	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root) {
		ret = -ENOMEM;
T
Tejun Heo 已提交
1768
		goto out_unlock;
1769
	}
1770

1771 1772
	init_cgroup_root(root, &opts);

T
Tejun Heo 已提交
1773
	ret = cgroup_setup_root(root, opts.subsys_mask);
T
Tejun Heo 已提交
1774 1775
	if (ret)
		cgroup_free_root(root);
1776

1777
out_unlock:
1778
	mutex_unlock(&cgroup_mutex);
1779
out_free:
1780 1781
	kfree(opts.release_agent);
	kfree(opts.name);
A
Aristeu Rozanski 已提交
1782

T
Tejun Heo 已提交
1783
	if (ret)
1784
		return ERR_PTR(ret);
T
Tejun Heo 已提交
1785

L
Li Zefan 已提交
1786 1787
	dentry = kernfs_mount(fs_type, flags, root->kf_root, &new_sb);
	if (IS_ERR(dentry) || !new_sb)
1788
		cgroup_put(&root->cgrp);
T
Tejun Heo 已提交
1789 1790 1791 1792 1793 1794
	return dentry;
}

static void cgroup_kill_sb(struct super_block *sb)
{
	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
1795
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
T
Tejun Heo 已提交
1796

1797
	cgroup_put(&root->cgrp);
T
Tejun Heo 已提交
1798
	kernfs_kill_sb(sb);
1799 1800 1801 1802
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
A
Al Viro 已提交
1803
	.mount = cgroup_mount,
1804 1805 1806
	.kill_sb = cgroup_kill_sb,
};

1807 1808
static struct kobject *cgroup_kobj;

1809
/**
1810
 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1811 1812 1813 1814
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
1815 1816 1817 1818 1819
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
T
Tejun Heo 已提交
1820
 * Return value is the same as kernfs_path().
1821
 */
T
Tejun Heo 已提交
1822
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1823
{
1824
	struct cgroup_root *root;
1825
	struct cgroup *cgrp;
T
Tejun Heo 已提交
1826 1827
	int hierarchy_id = 1;
	char *path = NULL;
1828 1829

	mutex_lock(&cgroup_mutex);
1830
	down_read(&css_set_rwsem);
1831

1832 1833
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

1834 1835
	if (root) {
		cgrp = task_cgroup_from_root(task, root);
T
Tejun Heo 已提交
1836
		path = cgroup_path(cgrp, buf, buflen);
1837 1838
	} else {
		/* if no hierarchy exists, everyone is in "/" */
T
Tejun Heo 已提交
1839 1840
		if (strlcpy(buf, "/", buflen) < buflen)
			path = buf;
1841 1842
	}

1843
	up_read(&css_set_rwsem);
1844
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
1845
	return path;
1846
}
1847
EXPORT_SYMBOL_GPL(task_cgroup_path);
1848

1849
/* used to track tasks and other necessary states during migration */
1850
struct cgroup_taskset {
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
	/* the src and dst cset list running through cset->mg_node */
	struct list_head	src_csets;
	struct list_head	dst_csets;

	/*
	 * Fields for cgroup_taskset_*() iteration.
	 *
	 * Before migration is committed, the target migration tasks are on
	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
	 * or ->dst_csets depending on whether migration is committed.
	 *
	 * ->cur_csets and ->cur_task point to the current task position
	 * during iteration.
	 */
	struct list_head	*csets;
	struct css_set		*cur_cset;
	struct task_struct	*cur_task;
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
};

/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
 *
 * @tset iteration is initialized and the first task is returned.
 */
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
{
1879 1880 1881 1882
	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
	tset->cur_task = NULL;

	return cgroup_taskset_next(tset);
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
}

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
{
1894 1895
	struct css_set *cset = tset->cur_cset;
	struct task_struct *task = tset->cur_task;
1896

1897 1898 1899 1900 1901 1902
	while (&cset->mg_node != tset->csets) {
		if (!task)
			task = list_first_entry(&cset->mg_tasks,
						struct task_struct, cg_list);
		else
			task = list_next_entry(task, cg_list);
1903

1904 1905 1906 1907 1908
		if (&task->cg_list != &cset->mg_tasks) {
			tset->cur_cset = cset;
			tset->cur_task = task;
			return task;
		}
1909

1910 1911 1912
		cset = list_next_entry(cset, mg_node);
		task = NULL;
	}
1913

1914
	return NULL;
1915 1916
}

1917
/**
B
Ben Blum 已提交
1918
 * cgroup_task_migrate - move a task from one cgroup to another.
1919
 * @old_cgrp: the cgroup @tsk is being migrated from
1920 1921
 * @tsk: the task being migrated
 * @new_cset: the new css_set @tsk is being attached to
B
Ben Blum 已提交
1922
 *
1923
 * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
B
Ben Blum 已提交
1924
 */
1925 1926 1927
static void cgroup_task_migrate(struct cgroup *old_cgrp,
				struct task_struct *tsk,
				struct css_set *new_cset)
B
Ben Blum 已提交
1928
{
1929
	struct css_set *old_cset;
B
Ben Blum 已提交
1930

1931 1932 1933
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

B
Ben Blum 已提交
1934
	/*
1935 1936 1937
	 * We are synchronized through threadgroup_lock() against PF_EXITING
	 * setting such that we can't race against cgroup_exit() changing the
	 * css_set to init_css_set and dropping the old one.
B
Ben Blum 已提交
1938
	 */
1939
	WARN_ON_ONCE(tsk->flags & PF_EXITING);
1940
	old_cset = task_css_set(tsk);
B
Ben Blum 已提交
1941

1942
	get_css_set(new_cset);
1943
	rcu_assign_pointer(tsk->cgroups, new_cset);
B
Ben Blum 已提交
1944

1945 1946 1947 1948 1949 1950 1951
	/*
	 * Use move_tail so that cgroup_taskset_first() still returns the
	 * leader after migration.  This works because cgroup_migrate()
	 * ensures that the dst_cset of the leader is the first on the
	 * tset's dst_csets list.
	 */
	list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
B
Ben Blum 已提交
1952 1953

	/*
1954 1955 1956
	 * We just gained a reference on old_cset by taking it from the
	 * task. As trading it for new_cset is protected by cgroup_mutex,
	 * we're safe to drop it here; it will be freed under RCU.
B
Ben Blum 已提交
1957
	 */
1958
	set_bit(CGRP_RELEASABLE, &old_cgrp->flags);
1959
	put_css_set_locked(old_cset, false);
B
Ben Blum 已提交
1960 1961
}

L
Li Zefan 已提交
1962
/**
1963 1964
 * cgroup_migrate_finish - cleanup after attach
 * @preloaded_csets: list of preloaded css_sets
B
Ben Blum 已提交
1965
 *
1966 1967
 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
 * those functions for details.
B
Ben Blum 已提交
1968
 */
1969
static void cgroup_migrate_finish(struct list_head *preloaded_csets)
B
Ben Blum 已提交
1970
{
1971
	struct css_set *cset, *tmp_cset;
B
Ben Blum 已提交
1972

1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
	lockdep_assert_held(&cgroup_mutex);

	down_write(&css_set_rwsem);
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
		cset->mg_src_cgrp = NULL;
		cset->mg_dst_cset = NULL;
		list_del_init(&cset->mg_preload_node);
		put_css_set_locked(cset, false);
	}
	up_write(&css_set_rwsem);
}

/**
 * cgroup_migrate_add_src - add a migration source css_set
 * @src_cset: the source css_set to add
 * @dst_cgrp: the destination cgroup
 * @preloaded_csets: list of preloaded css_sets
 *
 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
 * @src_cset and add it to @preloaded_csets, which should later be cleaned
 * up by cgroup_migrate_finish().
 *
 * This function may be called without holding threadgroup_lock even if the
 * target is a process.  Threads may be created and destroyed but as long
 * as cgroup_mutex is not dropped, no new css_set can be put into play and
 * the preloaded css_sets are guaranteed to cover all migrations.
 */
static void cgroup_migrate_add_src(struct css_set *src_cset,
				   struct cgroup *dst_cgrp,
				   struct list_head *preloaded_csets)
{
	struct cgroup *src_cgrp;

	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);

	if (!list_empty(&src_cset->mg_preload_node))
		return;

	WARN_ON(src_cset->mg_src_cgrp);
	WARN_ON(!list_empty(&src_cset->mg_tasks));
	WARN_ON(!list_empty(&src_cset->mg_node));

	src_cset->mg_src_cgrp = src_cgrp;
	get_css_set(src_cset);
	list_add(&src_cset->mg_preload_node, preloaded_csets);
}

/**
 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2025
 * @dst_cgrp: the destination cgroup (may be %NULL)
2026 2027 2028 2029
 * @preloaded_csets: list of preloaded source css_sets
 *
 * Tasks are about to be moved to @dst_cgrp and all the source css_sets
 * have been preloaded to @preloaded_csets.  This function looks up and
2030 2031 2032
 * pins all destination css_sets, links each to its source, and append them
 * to @preloaded_csets.  If @dst_cgrp is %NULL, the destination of each
 * source css_set is assumed to be its cgroup on the default hierarchy.
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
 *
 * This function must be called after cgroup_migrate_add_src() has been
 * called on each migration source css_set.  After migration is performed
 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
 * @preloaded_csets.
 */
static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
				      struct list_head *preloaded_csets)
{
	LIST_HEAD(csets);
2043
	struct css_set *src_cset, *tmp_cset;
2044 2045 2046

	lockdep_assert_held(&cgroup_mutex);

2047 2048 2049 2050 2051 2052 2053 2054
	/*
	 * Except for the root, child_subsys_mask must be zero for a cgroup
	 * with tasks so that child cgroups don't compete against tasks.
	 */
	if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && dst_cgrp->parent &&
	    dst_cgrp->child_subsys_mask)
		return -EBUSY;

2055
	/* look up the dst cset for each src cset and link it to src */
2056
	list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
2057 2058
		struct css_set *dst_cset;

2059 2060
		dst_cset = find_css_set(src_cset,
					dst_cgrp ?: src_cset->dfl_cgrp);
2061 2062 2063 2064
		if (!dst_cset)
			goto err;

		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078

		/*
		 * If src cset equals dst, it's noop.  Drop the src.
		 * cgroup_migrate() will skip the cset too.  Note that we
		 * can't handle src == dst as some nodes are used by both.
		 */
		if (src_cset == dst_cset) {
			src_cset->mg_src_cgrp = NULL;
			list_del_init(&src_cset->mg_preload_node);
			put_css_set(src_cset, false);
			put_css_set(dst_cset, false);
			continue;
		}

2079 2080 2081 2082 2083 2084 2085 2086
		src_cset->mg_dst_cset = dst_cset;

		if (list_empty(&dst_cset->mg_preload_node))
			list_add(&dst_cset->mg_preload_node, &csets);
		else
			put_css_set(dst_cset, false);
	}

2087
	list_splice_tail(&csets, preloaded_csets);
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
	return 0;
err:
	cgroup_migrate_finish(&csets);
	return -ENOMEM;
}

/**
 * cgroup_migrate - migrate a process or task to a cgroup
 * @cgrp: the destination cgroup
 * @leader: the leader of the process or the task to migrate
 * @threadgroup: whether @leader points to the whole process or a single task
 *
 * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
 * process, the caller must be holding threadgroup_lock of @leader.  The
 * caller is also responsible for invoking cgroup_migrate_add_src() and
 * cgroup_migrate_prepare_dst() on the targets before invoking this
 * function and following up with cgroup_migrate_finish().
 *
 * As long as a controller's ->can_attach() doesn't fail, this function is
 * guaranteed to succeed.  This means that, excluding ->can_attach()
 * failure, when migrating multiple targets, the success or failure can be
 * decided for all targets by invoking group_migrate_prepare_dst() before
 * actually starting migrating.
 */
static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
			  bool threadgroup)
B
Ben Blum 已提交
2114
{
2115 2116 2117 2118 2119
	struct cgroup_taskset tset = {
		.src_csets	= LIST_HEAD_INIT(tset.src_csets),
		.dst_csets	= LIST_HEAD_INIT(tset.dst_csets),
		.csets		= &tset.src_csets,
	};
T
Tejun Heo 已提交
2120
	struct cgroup_subsys_state *css, *failed_css = NULL;
2121 2122 2123
	struct css_set *cset, *tmp_cset;
	struct task_struct *task, *tmp_task;
	int i, ret;
B
Ben Blum 已提交
2124

2125 2126 2127 2128 2129
	/*
	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 */
2130
	down_write(&css_set_rwsem);
2131
	rcu_read_lock();
2132
	task = leader;
B
Ben Blum 已提交
2133
	do {
2134 2135
		/* @task either already exited or can't exit until the end */
		if (task->flags & PF_EXITING)
2136
			goto next;
2137

2138 2139
		/* leave @task alone if post_fork() hasn't linked it yet */
		if (list_empty(&task->cg_list))
2140
			goto next;
2141

2142
		cset = task_css_set(task);
2143
		if (!cset->mg_src_cgrp)
2144
			goto next;
2145

2146
		/*
2147 2148
		 * cgroup_taskset_first() must always return the leader.
		 * Take care to avoid disturbing the ordering.
2149
		 */
2150 2151 2152 2153 2154 2155
		list_move_tail(&task->cg_list, &cset->mg_tasks);
		if (list_empty(&cset->mg_node))
			list_add_tail(&cset->mg_node, &tset.src_csets);
		if (list_empty(&cset->mg_dst_cset->mg_node))
			list_move_tail(&cset->mg_dst_cset->mg_node,
				       &tset.dst_csets);
2156
	next:
2157 2158
		if (!threadgroup)
			break;
2159
	} while_each_thread(leader, task);
2160
	rcu_read_unlock();
2161
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
2162

2163
	/* methods shouldn't be called if no task is actually migrating */
2164 2165
	if (list_empty(&tset.src_csets))
		return 0;
2166

2167
	/* check that we can legitimately attach to the cgroup */
2168
	for_each_e_css(css, i, cgrp) {
T
Tejun Heo 已提交
2169
		if (css->ss->can_attach) {
2170 2171
			ret = css->ss->can_attach(css, &tset);
			if (ret) {
T
Tejun Heo 已提交
2172
				failed_css = css;
B
Ben Blum 已提交
2173 2174 2175 2176 2177 2178
				goto out_cancel_attach;
			}
		}
	}

	/*
2179 2180 2181
	 * Now that we're guaranteed success, proceed to move all tasks to
	 * the new cgroup.  There are no failure cases after here, so this
	 * is the commit point.
B
Ben Blum 已提交
2182
	 */
2183
	down_write(&css_set_rwsem);
2184 2185 2186 2187
	list_for_each_entry(cset, &tset.src_csets, mg_node) {
		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
			cgroup_task_migrate(cset->mg_src_cgrp, task,
					    cset->mg_dst_cset);
B
Ben Blum 已提交
2188
	}
2189
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
2190 2191

	/*
2192 2193 2194
	 * Migration is committed, all target tasks are now on dst_csets.
	 * Nothing is sensitive to fork() after this point.  Notify
	 * controllers that migration is complete.
B
Ben Blum 已提交
2195
	 */
2196
	tset.csets = &tset.dst_csets;
B
Ben Blum 已提交
2197

2198
	for_each_e_css(css, i, cgrp)
T
Tejun Heo 已提交
2199 2200
		if (css->ss->attach)
			css->ss->attach(css, &tset);
B
Ben Blum 已提交
2201

2202
	ret = 0;
2203 2204
	goto out_release_tset;

B
Ben Blum 已提交
2205
out_cancel_attach:
2206
	for_each_e_css(css, i, cgrp) {
2207 2208 2209 2210
		if (css == failed_css)
			break;
		if (css->ss->cancel_attach)
			css->ss->cancel_attach(css, &tset);
B
Ben Blum 已提交
2211
	}
2212 2213 2214 2215
out_release_tset:
	down_write(&css_set_rwsem);
	list_splice_init(&tset.dst_csets, &tset.src_csets);
	list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
2216
		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2217 2218 2219
		list_del_init(&cset->mg_node);
	}
	up_write(&css_set_rwsem);
2220
	return ret;
B
Ben Blum 已提交
2221 2222
}

2223 2224 2225 2226 2227 2228
/**
 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
 * @dst_cgrp: the cgroup to attach to
 * @leader: the task or the leader of the threadgroup to be attached
 * @threadgroup: attach the whole threadgroup?
 *
2229
 * Call holding cgroup_mutex and threadgroup_lock of @leader.
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257
 */
static int cgroup_attach_task(struct cgroup *dst_cgrp,
			      struct task_struct *leader, bool threadgroup)
{
	LIST_HEAD(preloaded_csets);
	struct task_struct *task;
	int ret;

	/* look up all src csets */
	down_read(&css_set_rwsem);
	rcu_read_lock();
	task = leader;
	do {
		cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
				       &preloaded_csets);
		if (!threadgroup)
			break;
	} while_each_thread(leader, task);
	rcu_read_unlock();
	up_read(&css_set_rwsem);

	/* prepare dst csets and commit */
	ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
	if (!ret)
		ret = cgroup_migrate(dst_cgrp, leader, threadgroup);

	cgroup_migrate_finish(&preloaded_csets);
	return ret;
B
Ben Blum 已提交
2258 2259 2260 2261
}

/*
 * Find the task_struct of the task to attach by vpid and pass it along to the
2262
 * function to attach either it or all tasks in its threadgroup. Will lock
2263
 * cgroup_mutex and threadgroup.
2264
 */
2265 2266
static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
				    size_t nbytes, loff_t off, bool threadgroup)
2267 2268
{
	struct task_struct *tsk;
2269
	const struct cred *cred = current_cred(), *tcred;
2270
	struct cgroup *cgrp;
2271
	pid_t pid;
2272 2273
	int ret;

2274 2275 2276
	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
		return -EINVAL;

2277 2278
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
B
Ben Blum 已提交
2279 2280
		return -ENODEV;

2281 2282
retry_find_task:
	rcu_read_lock();
2283
	if (pid) {
2284
		tsk = find_task_by_vpid(pid);
B
Ben Blum 已提交
2285 2286
		if (!tsk) {
			rcu_read_unlock();
S
SeongJae Park 已提交
2287
			ret = -ESRCH;
2288
			goto out_unlock_cgroup;
2289
		}
B
Ben Blum 已提交
2290 2291 2292 2293
		/*
		 * even if we're attaching all tasks in the thread group, we
		 * only need to check permissions on one of them.
		 */
2294
		tcred = __task_cred(tsk);
2295 2296 2297
		if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
		    !uid_eq(cred->euid, tcred->uid) &&
		    !uid_eq(cred->euid, tcred->suid)) {
2298
			rcu_read_unlock();
2299 2300
			ret = -EACCES;
			goto out_unlock_cgroup;
2301
		}
2302 2303
	} else
		tsk = current;
2304 2305

	if (threadgroup)
2306
		tsk = tsk->group_leader;
2307 2308

	/*
2309
	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2310 2311 2312
	 * trapped in a cpuset, or RT worker may be born in a cgroup
	 * with no rt_runtime allocated.  Just say no.
	 */
2313
	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2314 2315 2316 2317 2318
		ret = -EINVAL;
		rcu_read_unlock();
		goto out_unlock_cgroup;
	}

2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335
	get_task_struct(tsk);
	rcu_read_unlock();

	threadgroup_lock(tsk);
	if (threadgroup) {
		if (!thread_group_leader(tsk)) {
			/*
			 * a race with de_thread from another thread's exec()
			 * may strip us of our leadership, if this happens,
			 * there is no choice but to throw this task away and
			 * try again; this is
			 * "double-double-toil-and-trouble-check locking".
			 */
			threadgroup_unlock(tsk);
			put_task_struct(tsk);
			goto retry_find_task;
		}
2336 2337 2338 2339
	}

	ret = cgroup_attach_task(cgrp, tsk, threadgroup);

2340 2341
	threadgroup_unlock(tsk);

2342
	put_task_struct(tsk);
2343
out_unlock_cgroup:
2344
	cgroup_kn_unlock(of->kn);
2345
	return ret ?: nbytes;
2346 2347
}

2348 2349 2350 2351 2352 2353 2354
/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
2355
	struct cgroup_root *root;
2356 2357
	int retval = 0;

T
Tejun Heo 已提交
2358
	mutex_lock(&cgroup_mutex);
2359
	for_each_root(root) {
2360 2361
		struct cgroup *from_cgrp;

2362
		if (root == &cgrp_dfl_root)
2363 2364
			continue;

2365 2366 2367
		down_read(&css_set_rwsem);
		from_cgrp = task_cgroup_from_root(from, root);
		up_read(&css_set_rwsem);
2368

L
Li Zefan 已提交
2369
		retval = cgroup_attach_task(from_cgrp, tsk, false);
2370 2371 2372
		if (retval)
			break;
	}
T
Tejun Heo 已提交
2373
	mutex_unlock(&cgroup_mutex);
2374 2375 2376 2377 2378

	return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);

2379 2380
static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
B
Ben Blum 已提交
2381
{
2382
	return __cgroup_procs_write(of, buf, nbytes, off, false);
B
Ben Blum 已提交
2383 2384
}

2385 2386
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
2387
{
2388
	return __cgroup_procs_write(of, buf, nbytes, off, true);
2389 2390
}

2391 2392
static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
					  char *buf, size_t nbytes, loff_t off)
2393
{
2394
	struct cgroup *cgrp;
2395

2396 2397 2398 2399
	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);

	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
2400
		return -ENODEV;
2401
	spin_lock(&release_agent_path_lock);
2402 2403
	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
		sizeof(cgrp->root->release_agent_path));
2404
	spin_unlock(&release_agent_path_lock);
2405
	cgroup_kn_unlock(of->kn);
2406
	return nbytes;
2407 2408
}

2409
static int cgroup_release_agent_show(struct seq_file *seq, void *v)
2410
{
2411
	struct cgroup *cgrp = seq_css(seq)->cgroup;
2412

2413
	spin_lock(&release_agent_path_lock);
2414
	seq_puts(seq, cgrp->root->release_agent_path);
2415
	spin_unlock(&release_agent_path_lock);
2416 2417 2418 2419
	seq_putc(seq, '\n');
	return 0;
}

2420
static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
2421
{
2422 2423 2424
	struct cgroup *cgrp = seq_css(seq)->cgroup;

	seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
2425 2426 2427
	return 0;
}

2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
static void cgroup_print_ss_mask(struct seq_file *seq, unsigned int ss_mask)
{
	struct cgroup_subsys *ss;
	bool printed = false;
	int ssid;

	for_each_subsys(ss, ssid) {
		if (ss_mask & (1 << ssid)) {
			if (printed)
				seq_putc(seq, ' ');
			seq_printf(seq, "%s", ss->name);
			printed = true;
		}
	}
	if (printed)
		seq_putc(seq, '\n');
}

/* show controllers which are currently attached to the default hierarchy */
static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
{
	struct cgroup *cgrp = seq_css(seq)->cgroup;

	cgroup_print_ss_mask(seq, cgrp->root->subsys_mask);
	return 0;
}

/* show controllers which are enabled from the parent */
static int cgroup_controllers_show(struct seq_file *seq, void *v)
{
	struct cgroup *cgrp = seq_css(seq)->cgroup;

	cgroup_print_ss_mask(seq, cgrp->parent->child_subsys_mask);
	return 0;
}

/* show controllers which are enabled for a given cgroup's children */
static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
{
	struct cgroup *cgrp = seq_css(seq)->cgroup;

	cgroup_print_ss_mask(seq, cgrp->child_subsys_mask);
	return 0;
}

/**
 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
 * @cgrp: root of the subtree to update csses for
 *
 * @cgrp's child_subsys_mask has changed and its subtree's (self excluded)
 * css associations need to be updated accordingly.  This function looks up
 * all css_sets which are attached to the subtree, creates the matching
 * updated css_sets and migrates the tasks to the new ones.
 */
static int cgroup_update_dfl_csses(struct cgroup *cgrp)
{
	LIST_HEAD(preloaded_csets);
	struct cgroup_subsys_state *css;
	struct css_set *src_cset;
	int ret;

	lockdep_assert_held(&cgroup_mutex);

	/* look up all csses currently attached to @cgrp's subtree */
	down_read(&css_set_rwsem);
	css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
		struct cgrp_cset_link *link;

		/* self is not affected by child_subsys_mask change */
		if (css->cgroup == cgrp)
			continue;

		list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
			cgroup_migrate_add_src(link->cset, cgrp,
					       &preloaded_csets);
	}
	up_read(&css_set_rwsem);

	/* NULL dst indicates self on default hierarchy */
	ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
	if (ret)
		goto out_finish;

	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
		struct task_struct *last_task = NULL, *task;

		/* src_csets precede dst_csets, break on the first dst_cset */
		if (!src_cset->mg_src_cgrp)
			break;

		/*
		 * All tasks in src_cset need to be migrated to the
		 * matching dst_cset.  Empty it process by process.  We
		 * walk tasks but migrate processes.  The leader might even
		 * belong to a different cset but such src_cset would also
		 * be among the target src_csets because the default
		 * hierarchy enforces per-process membership.
		 */
		while (true) {
			down_read(&css_set_rwsem);
			task = list_first_entry_or_null(&src_cset->tasks,
						struct task_struct, cg_list);
			if (task) {
				task = task->group_leader;
				WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp);
				get_task_struct(task);
			}
			up_read(&css_set_rwsem);

			if (!task)
				break;

			/* guard against possible infinite loop */
			if (WARN(last_task == task,
				 "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n"))
				goto out_finish;
			last_task = task;

			threadgroup_lock(task);
			/* raced against de_thread() from another thread? */
			if (!thread_group_leader(task)) {
				threadgroup_unlock(task);
				put_task_struct(task);
				continue;
			}

			ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);

			threadgroup_unlock(task);
			put_task_struct(task);

			if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
				goto out_finish;
		}
	}

out_finish:
	cgroup_migrate_finish(&preloaded_csets);
	return ret;
}

/* change the enabled child controllers for a cgroup in the default hierarchy */
2570 2571 2572
static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
2573
{
2574
	unsigned int enable = 0, disable = 0;
2575
	struct cgroup *cgrp, *child;
2576
	struct cgroup_subsys *ss;
2577
	char *tok;
2578 2579 2580
	int ssid, ret;

	/*
2581 2582
	 * Parse input - space separated list of subsystem names prefixed
	 * with either + or -.
2583
	 */
2584 2585
	buf = strstrip(buf);
	while ((tok = strsep(&buf, " "))) {
2586 2587
		if (tok[0] == '\0')
			continue;
2588 2589 2590 2591 2592
		for_each_subsys(ss, ssid) {
			if (ss->disabled || strcmp(tok + 1, ss->name))
				continue;

			if (*tok == '+') {
2593 2594
				enable |= 1 << ssid;
				disable &= ~(1 << ssid);
2595
			} else if (*tok == '-') {
2596 2597
				disable |= 1 << ssid;
				enable &= ~(1 << ssid);
2598 2599 2600 2601 2602 2603 2604 2605 2606
			} else {
				return -EINVAL;
			}
			break;
		}
		if (ssid == CGROUP_SUBSYS_COUNT)
			return -EINVAL;
	}

2607 2608 2609
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
		return -ENODEV;
2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624

	for_each_subsys(ss, ssid) {
		if (enable & (1 << ssid)) {
			if (cgrp->child_subsys_mask & (1 << ssid)) {
				enable &= ~(1 << ssid);
				continue;
			}

			/*
			 * Because css offlining is asynchronous, userland
			 * might try to re-enable the same controller while
			 * the previous instance is still around.  In such
			 * cases, wait till it's gone using offline_waitq.
			 */
			cgroup_for_each_live_child(child, cgrp) {
2625
				DEFINE_WAIT(wait);
2626 2627 2628 2629

				if (!cgroup_css(child, ss))
					continue;

2630
				cgroup_get(child);
2631 2632
				prepare_to_wait(&child->offline_waitq, &wait,
						TASK_UNINTERRUPTIBLE);
2633
				cgroup_kn_unlock(of->kn);
2634 2635
				schedule();
				finish_wait(&child->offline_waitq, &wait);
2636
				cgroup_put(child);
2637

2638
				return restart_syscall();
2639 2640 2641 2642 2643 2644 2645
			}

			/* unavailable or not enabled on the parent? */
			if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
			    (cgrp->parent &&
			     !(cgrp->parent->child_subsys_mask & (1 << ssid)))) {
				ret = -ENOENT;
2646
				goto out_unlock;
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
			}
		} else if (disable & (1 << ssid)) {
			if (!(cgrp->child_subsys_mask & (1 << ssid))) {
				disable &= ~(1 << ssid);
				continue;
			}

			/* a child has it enabled? */
			cgroup_for_each_live_child(child, cgrp) {
				if (child->child_subsys_mask & (1 << ssid)) {
					ret = -EBUSY;
2658
					goto out_unlock;
2659 2660 2661 2662 2663 2664 2665
				}
			}
		}
	}

	if (!enable && !disable) {
		ret = 0;
2666
		goto out_unlock;
2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713
	}

	/*
	 * Except for the root, child_subsys_mask must be zero for a cgroup
	 * with tasks so that child cgroups don't compete against tasks.
	 */
	if (enable && cgrp->parent && !list_empty(&cgrp->cset_links)) {
		ret = -EBUSY;
		goto out_unlock;
	}

	/*
	 * Create csses for enables and update child_subsys_mask.  This
	 * changes cgroup_e_css() results which in turn makes the
	 * subsequent cgroup_update_dfl_csses() associate all tasks in the
	 * subtree to the updated csses.
	 */
	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
			ret = create_css(child, ss);
			if (ret)
				goto err_undo_css;
		}
	}

	cgrp->child_subsys_mask |= enable;
	cgrp->child_subsys_mask &= ~disable;

	ret = cgroup_update_dfl_csses(cgrp);
	if (ret)
		goto err_undo_css;

	/* all tasks are now migrated away from the old csses, kill them */
	for_each_subsys(ss, ssid) {
		if (!(disable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp)
			kill_css(cgroup_css(child, ss));
	}

	kernfs_activate(cgrp->kn);
	ret = 0;
out_unlock:
2714
	cgroup_kn_unlock(of->kn);
2715
	return ret ?: nbytes;
2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733

err_undo_css:
	cgrp->child_subsys_mask &= ~enable;
	cgrp->child_subsys_mask |= disable;

	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
			struct cgroup_subsys_state *css = cgroup_css(child, ss);
			if (css)
				kill_css(css);
		}
	}
	goto out_unlock;
}

2734 2735 2736 2737 2738 2739
static int cgroup_populated_show(struct seq_file *seq, void *v)
{
	seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt);
	return 0;
}

T
Tejun Heo 已提交
2740 2741
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
				 size_t nbytes, loff_t off)
2742
{
T
Tejun Heo 已提交
2743 2744 2745
	struct cgroup *cgrp = of->kn->parent->priv;
	struct cftype *cft = of->kn->priv;
	struct cgroup_subsys_state *css;
2746
	int ret;
2747

T
Tejun Heo 已提交
2748 2749 2750
	if (cft->write)
		return cft->write(of, buf, nbytes, off);

T
Tejun Heo 已提交
2751 2752 2753 2754 2755 2756 2757 2758 2759
	/*
	 * kernfs guarantees that a file isn't deleted with operations in
	 * flight, which means that the matching css is and stays alive and
	 * doesn't need to be pinned.  The RCU locking is not necessary
	 * either.  It's just for the convenience of using cgroup_css().
	 */
	rcu_read_lock();
	css = cgroup_css(cgrp, cft->ss);
	rcu_read_unlock();
2760

2761
	if (cft->write_u64) {
2762 2763 2764 2765 2766 2767 2768 2769 2770
		unsigned long long v;
		ret = kstrtoull(buf, 0, &v);
		if (!ret)
			ret = cft->write_u64(css, cft, v);
	} else if (cft->write_s64) {
		long long v;
		ret = kstrtoll(buf, 0, &v);
		if (!ret)
			ret = cft->write_s64(css, cft, v);
2771
	} else {
2772
		ret = -EINVAL;
2773
	}
T
Tejun Heo 已提交
2774

2775
	return ret ?: nbytes;
2776 2777
}

2778
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
2779
{
T
Tejun Heo 已提交
2780
	return seq_cft(seq)->seq_start(seq, ppos);
2781 2782
}

2783
static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
2784
{
T
Tejun Heo 已提交
2785
	return seq_cft(seq)->seq_next(seq, v, ppos);
2786 2787
}

2788
static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
2789
{
T
Tejun Heo 已提交
2790
	seq_cft(seq)->seq_stop(seq, v);
2791 2792
}

2793
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
2794
{
2795 2796
	struct cftype *cft = seq_cft(m);
	struct cgroup_subsys_state *css = seq_css(m);
2797

2798 2799
	if (cft->seq_show)
		return cft->seq_show(m, arg);
2800

2801
	if (cft->read_u64)
2802 2803 2804 2805 2806 2807
		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
	else if (cft->read_s64)
		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
	else
		return -EINVAL;
	return 0;
2808 2809
}

T
Tejun Heo 已提交
2810 2811 2812 2813
static struct kernfs_ops cgroup_kf_single_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_show		= cgroup_seqfile_show,
2814 2815
};

T
Tejun Heo 已提交
2816 2817 2818 2819 2820 2821 2822 2823
static struct kernfs_ops cgroup_kf_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_start		= cgroup_seqfile_start,
	.seq_next		= cgroup_seqfile_next,
	.seq_stop		= cgroup_seqfile_stop,
	.seq_show		= cgroup_seqfile_show,
};
2824 2825 2826 2827

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
T
Tejun Heo 已提交
2828 2829
static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
			 const char *new_name_str)
2830
{
T
Tejun Heo 已提交
2831
	struct cgroup *cgrp = kn->priv;
2832 2833
	int ret;

T
Tejun Heo 已提交
2834
	if (kernfs_type(kn) != KERNFS_DIR)
2835
		return -ENOTDIR;
T
Tejun Heo 已提交
2836
	if (kn->parent != new_parent)
2837
		return -EIO;
2838

2839 2840 2841 2842 2843 2844
	/*
	 * This isn't a proper migration and its usefulness is very
	 * limited.  Disallow if sane_behavior.
	 */
	if (cgroup_sane_behavior(cgrp))
		return -EPERM;
L
Li Zefan 已提交
2845

2846
	/*
T
Tejun Heo 已提交
2847
	 * We're gonna grab cgroup_mutex which nests outside kernfs
2848
	 * active_ref.  kernfs_rename() doesn't require active_ref
T
Tejun Heo 已提交
2849
	 * protection.  Break them before grabbing cgroup_mutex.
2850 2851 2852
	 */
	kernfs_break_active_protection(new_parent);
	kernfs_break_active_protection(kn);
L
Li Zefan 已提交
2853

T
Tejun Heo 已提交
2854
	mutex_lock(&cgroup_mutex);
L
Li Zefan 已提交
2855

T
Tejun Heo 已提交
2856
	ret = kernfs_rename(kn, new_parent, new_name_str);
L
Li Zefan 已提交
2857

T
Tejun Heo 已提交
2858
	mutex_unlock(&cgroup_mutex);
2859 2860 2861

	kernfs_unbreak_active_protection(kn);
	kernfs_unbreak_active_protection(new_parent);
T
Tejun Heo 已提交
2862
	return ret;
L
Li Zefan 已提交
2863 2864
}

2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878
/* set uid and gid of cgroup dirs and files to that of the creator */
static int cgroup_kn_set_ugid(struct kernfs_node *kn)
{
	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
			       .ia_uid = current_fsuid(),
			       .ia_gid = current_fsgid(), };

	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
		return 0;

	return kernfs_setattr(kn, &iattr);
}

2879
static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
2880
{
T
Tejun Heo 已提交
2881
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
2882 2883
	struct kernfs_node *kn;
	struct lock_class_key *key = NULL;
2884
	int ret;
T
Tejun Heo 已提交
2885

T
Tejun Heo 已提交
2886 2887 2888 2889 2890 2891
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	key = &cft->lockdep_key;
#endif
	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
				  NULL, false, key);
2892 2893 2894 2895
	if (IS_ERR(kn))
		return PTR_ERR(kn);

	ret = cgroup_kn_set_ugid(kn);
2896
	if (ret) {
2897
		kernfs_remove(kn);
2898 2899 2900
		return ret;
	}

T
Tejun Heo 已提交
2901
	if (cft->seq_show == cgroup_populated_show)
2902
		cgrp->populated_kn = kn;
2903
	return 0;
2904 2905
}

2906 2907 2908 2909 2910 2911 2912
/**
 * cgroup_addrm_files - add or remove files to a cgroup directory
 * @cgrp: the target cgroup
 * @cfts: array of cftypes to be added
 * @is_add: whether to add or remove
 *
 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
2913 2914 2915
 * For removals, this function never fails.  If addition fails, this
 * function doesn't remove files already added.  The caller is responsible
 * for cleaning up.
2916
 */
2917 2918
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add)
2919
{
A
Aristeu Rozanski 已提交
2920
	struct cftype *cft;
2921 2922
	int ret;

2923
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
2924 2925

	for (cft = cfts; cft->name[0] != '\0'; cft++) {
2926
		/* does cft->flags tell us to skip this file on @cgrp? */
T
Tejun Heo 已提交
2927 2928
		if ((cft->flags & CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
			continue;
2929 2930
		if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp))
			continue;
2931 2932 2933 2934 2935
		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent)
			continue;
		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
			continue;

2936
		if (is_add) {
2937
			ret = cgroup_add_file(cgrp, cft);
2938
			if (ret) {
2939 2940
				pr_warn("%s: failed to add %s, err=%d\n",
					__func__, cft->name, ret);
2941 2942
				return ret;
			}
2943 2944
		} else {
			cgroup_rm_file(cgrp, cft);
T
Tejun Heo 已提交
2945
		}
2946
	}
2947
	return 0;
2948 2949
}

2950
static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
2951 2952
{
	LIST_HEAD(pending);
2953
	struct cgroup_subsys *ss = cfts[0].ss;
2954
	struct cgroup *root = &ss->root->cgrp;
2955
	struct cgroup_subsys_state *css;
2956
	int ret = 0;
2957

2958
	lockdep_assert_held(&cgroup_mutex);
2959

2960
	/* add/rm files for all cgroups created before */
2961
	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
2962 2963
		struct cgroup *cgrp = css->cgroup;

2964 2965 2966
		if (cgroup_is_dead(cgrp))
			continue;

2967
		ret = cgroup_addrm_files(cgrp, cfts, is_add);
2968 2969
		if (ret)
			break;
2970
	}
2971 2972 2973

	if (is_add && !ret)
		kernfs_activate(root->kn);
2974
	return ret;
2975 2976
}

2977
static void cgroup_exit_cftypes(struct cftype *cfts)
2978
{
2979
	struct cftype *cft;
2980

T
Tejun Heo 已提交
2981 2982 2983 2984 2985
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		/* free copy for custom atomic_write_len, see init_cftypes() */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
			kfree(cft->kf_ops);
		cft->kf_ops = NULL;
2986
		cft->ss = NULL;
T
Tejun Heo 已提交
2987
	}
2988 2989
}

T
Tejun Heo 已提交
2990
static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
2991 2992 2993
{
	struct cftype *cft;

T
Tejun Heo 已提交
2994 2995 2996
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		struct kernfs_ops *kf_ops;

T
Tejun Heo 已提交
2997 2998
		WARN_ON(cft->ss || cft->kf_ops);

T
Tejun Heo 已提交
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
		if (cft->seq_start)
			kf_ops = &cgroup_kf_ops;
		else
			kf_ops = &cgroup_kf_single_ops;

		/*
		 * Ugh... if @cft wants a custom max_write_len, we need to
		 * make a copy of kf_ops to set its atomic_write_len.
		 */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
			if (!kf_ops) {
				cgroup_exit_cftypes(cfts);
				return -ENOMEM;
			}
			kf_ops->atomic_write_len = cft->max_write_len;
		}
3016

T
Tejun Heo 已提交
3017
		cft->kf_ops = kf_ops;
3018
		cft->ss = ss;
T
Tejun Heo 已提交
3019
	}
3020

T
Tejun Heo 已提交
3021
	return 0;
3022 3023
}

3024 3025
static int cgroup_rm_cftypes_locked(struct cftype *cfts)
{
3026
	lockdep_assert_held(&cgroup_mutex);
3027 3028 3029 3030 3031 3032 3033 3034

	if (!cfts || !cfts[0].ss)
		return -ENOENT;

	list_del(&cfts->node);
	cgroup_apply_cftypes(cfts, false);
	cgroup_exit_cftypes(cfts);
	return 0;
3035 3036
}

3037 3038 3039 3040
/**
 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
3041 3042 3043
 * Unregister @cfts.  Files described by @cfts are removed from all
 * existing cgroups and all future cgroups won't have them either.  This
 * function can be called anytime whether @cfts' subsys is attached or not.
3044 3045
 *
 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3046
 * registered.
3047
 */
3048
int cgroup_rm_cftypes(struct cftype *cfts)
3049
{
3050
	int ret;
3051

3052
	mutex_lock(&cgroup_mutex);
3053
	ret = cgroup_rm_cftypes_locked(cfts);
3054
	mutex_unlock(&cgroup_mutex);
3055
	return ret;
T
Tejun Heo 已提交
3056 3057
}

3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071
/**
 * cgroup_add_cftypes - add an array of cftypes to a subsystem
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Register @cfts to @ss.  Files described by @cfts are created for all
 * existing cgroups to which @ss is attached and all future cgroups will
 * have them too.  This function can be called anytime whether @ss is
 * attached or not.
 *
 * Returns 0 on successful registration, -errno on failure.  Note that this
 * function currently returns 0 as long as @cfts registration is successful
 * even if some file creation attempts on existing cgroups fail.
 */
A
Aristeu Rozanski 已提交
3072
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3073
{
3074
	int ret;
3075

3076 3077
	if (!cfts || cfts[0].name[0] == '\0')
		return 0;
3078

T
Tejun Heo 已提交
3079 3080 3081
	ret = cgroup_init_cftypes(ss, cfts);
	if (ret)
		return ret;
3082

3083
	mutex_lock(&cgroup_mutex);
3084

T
Tejun Heo 已提交
3085
	list_add_tail(&cfts->node, &ss->cfts);
3086
	ret = cgroup_apply_cftypes(cfts, true);
3087
	if (ret)
3088
		cgroup_rm_cftypes_locked(cfts);
3089

3090
	mutex_unlock(&cgroup_mutex);
3091
	return ret;
3092 3093
}

L
Li Zefan 已提交
3094 3095 3096 3097 3098 3099
/**
 * cgroup_task_count - count the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 *
 * Return the number of tasks in the cgroup.
 */
3100
static int cgroup_task_count(const struct cgroup *cgrp)
3101 3102
{
	int count = 0;
3103
	struct cgrp_cset_link *link;
3104

3105
	down_read(&css_set_rwsem);
3106 3107
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
3108
	up_read(&css_set_rwsem);
3109 3110 3111
	return count;
}

3112
/**
3113 3114 3115
 * css_next_child - find the next child of a given css
 * @pos_css: the current position (%NULL to initiate traversal)
 * @parent_css: css whose children to walk
3116
 *
3117
 * This function returns the next child of @parent_css and should be called
3118 3119 3120
 * under either cgroup_mutex or RCU read lock.  The only requirement is
 * that @parent_css and @pos_css are accessible.  The next sibling is
 * guaranteed to be returned regardless of their states.
3121
 */
3122 3123 3124
struct cgroup_subsys_state *
css_next_child(struct cgroup_subsys_state *pos_css,
	       struct cgroup_subsys_state *parent_css)
3125
{
3126 3127
	struct cgroup *pos = pos_css ? pos_css->cgroup : NULL;
	struct cgroup *cgrp = parent_css->cgroup;
3128 3129
	struct cgroup *next;

T
Tejun Heo 已提交
3130
	cgroup_assert_mutex_or_rcu_locked();
3131 3132 3133 3134

	/*
	 * @pos could already have been removed.  Once a cgroup is removed,
	 * its ->sibling.next is no longer updated when its next sibling
3135 3136 3137 3138 3139 3140 3141
	 * changes.  As CGRP_DEAD assertion is serialized and happens
	 * before the cgroup is taken off the ->sibling list, if we see it
	 * unasserted, it's guaranteed that the next sibling hasn't
	 * finished its grace period even if it's already removed, and thus
	 * safe to dereference from this RCU critical section.  If
	 * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
	 * to be visible as %true here.
3142 3143 3144 3145 3146 3147 3148 3149
	 *
	 * If @pos is dead, its next pointer can't be dereferenced;
	 * however, as each cgroup is given a monotonically increasing
	 * unique serial number and always appended to the sibling list,
	 * the next one can be found by walking the parent's children until
	 * we see a cgroup with higher serial number than @pos's.  While
	 * this path can be slower, it's taken only when either the current
	 * cgroup is removed or iteration and removal race.
3150
	 */
3151 3152 3153
	if (!pos) {
		next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling);
	} else if (likely(!cgroup_is_dead(pos))) {
3154
		next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
3155 3156 3157 3158
	} else {
		list_for_each_entry_rcu(next, &cgrp->children, sibling)
			if (next->serial_nr > pos->serial_nr)
				break;
3159 3160
	}

3161 3162 3163 3164 3165 3166 3167
	/*
	 * @next, if not pointing to the head, can be dereferenced and is
	 * the next sibling; however, it might have @ss disabled.  If so,
	 * fast-forward to the next enabled one.
	 */
	while (&next->sibling != &cgrp->children) {
		struct cgroup_subsys_state *next_css = cgroup_css(next, parent_css->ss);
3168

3169 3170 3171 3172 3173
		if (next_css)
			return next_css;
		next = list_entry_rcu(next->sibling.next, struct cgroup, sibling);
	}
	return NULL;
3174 3175
}

3176
/**
3177
 * css_next_descendant_pre - find the next descendant for pre-order walk
3178
 * @pos: the current position (%NULL to initiate traversal)
3179
 * @root: css whose descendants to walk
3180
 *
3181
 * To be used by css_for_each_descendant_pre().  Find the next descendant
3182 3183
 * to visit for pre-order traversal of @root's descendants.  @root is
 * included in the iteration and the first node to be visited.
3184
 *
3185 3186 3187 3188
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @root are accessible and @pos is a descendant of @root.
3189
 */
3190 3191 3192
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
			struct cgroup_subsys_state *root)
3193
{
3194
	struct cgroup_subsys_state *next;
3195

T
Tejun Heo 已提交
3196
	cgroup_assert_mutex_or_rcu_locked();
3197

3198
	/* if first iteration, visit @root */
3199
	if (!pos)
3200
		return root;
3201 3202

	/* visit the first child if exists */
3203
	next = css_next_child(NULL, pos);
3204 3205 3206 3207
	if (next)
		return next;

	/* no child, visit my or the closest ancestor's next sibling */
3208 3209
	while (pos != root) {
		next = css_next_child(pos, css_parent(pos));
3210
		if (next)
3211
			return next;
3212
		pos = css_parent(pos);
3213
	}
3214 3215 3216 3217

	return NULL;
}

3218
/**
3219 3220
 * css_rightmost_descendant - return the rightmost descendant of a css
 * @pos: css of interest
3221
 *
3222 3223
 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
 * is returned.  This can be used during pre-order traversal to skip
3224
 * subtree of @pos.
3225
 *
3226 3227 3228 3229
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct rightmost descendant as
 * long as @pos is accessible.
3230
 */
3231 3232
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
3233
{
3234
	struct cgroup_subsys_state *last, *tmp;
3235

T
Tejun Heo 已提交
3236
	cgroup_assert_mutex_or_rcu_locked();
3237 3238 3239 3240 3241

	do {
		last = pos;
		/* ->prev isn't RCU safe, walk ->next till the end */
		pos = NULL;
3242
		css_for_each_child(tmp, last)
3243 3244 3245 3246 3247 3248
			pos = tmp;
	} while (pos);

	return last;
}

3249 3250
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
3251
{
3252
	struct cgroup_subsys_state *last;
3253 3254 3255

	do {
		last = pos;
3256
		pos = css_next_child(NULL, pos);
3257 3258 3259 3260 3261 3262
	} while (pos);

	return last;
}

/**
3263
 * css_next_descendant_post - find the next descendant for post-order walk
3264
 * @pos: the current position (%NULL to initiate traversal)
3265
 * @root: css whose descendants to walk
3266
 *
3267
 * To be used by css_for_each_descendant_post().  Find the next descendant
3268 3269
 * to visit for post-order traversal of @root's descendants.  @root is
 * included in the iteration and the last node to be visited.
3270
 *
3271 3272 3273 3274 3275
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @cgroup are accessible and @pos is a descendant of
 * @cgroup.
3276
 */
3277 3278 3279
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
			 struct cgroup_subsys_state *root)
3280
{
3281
	struct cgroup_subsys_state *next;
3282

T
Tejun Heo 已提交
3283
	cgroup_assert_mutex_or_rcu_locked();
3284

3285 3286 3287
	/* if first iteration, visit leftmost descendant which may be @root */
	if (!pos)
		return css_leftmost_descendant(root);
3288

3289 3290 3291 3292
	/* if we visited @root, we're done */
	if (pos == root)
		return NULL;

3293
	/* if there's an unvisited sibling, visit its leftmost descendant */
3294
	next = css_next_child(pos, css_parent(pos));
3295
	if (next)
3296
		return css_leftmost_descendant(next);
3297 3298

	/* no sibling left, visit parent */
3299
	return css_parent(pos);
3300 3301
}

3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316
static bool cgroup_has_live_children(struct cgroup *cgrp)
{
	struct cgroup *child;

	rcu_read_lock();
	list_for_each_entry_rcu(child, &cgrp->children, sibling) {
		if (!cgroup_is_dead(child)) {
			rcu_read_unlock();
			return true;
		}
	}
	rcu_read_unlock();
	return false;
}

3317
/**
3318
 * css_advance_task_iter - advance a task itererator to the next css_set
3319 3320 3321
 * @it: the iterator to advance
 *
 * Advance @it to the next css_set to walk.
3322
 */
3323
static void css_advance_task_iter(struct css_task_iter *it)
3324
{
T
Tejun Heo 已提交
3325
	struct list_head *l = it->cset_pos;
3326 3327 3328 3329 3330 3331
	struct cgrp_cset_link *link;
	struct css_set *cset;

	/* Advance to the next non-empty css_set */
	do {
		l = l->next;
T
Tejun Heo 已提交
3332 3333
		if (l == it->cset_head) {
			it->cset_pos = NULL;
3334 3335
			return;
		}
3336 3337 3338 3339 3340 3341 3342 3343

		if (it->ss) {
			cset = container_of(l, struct css_set,
					    e_cset_node[it->ss->id]);
		} else {
			link = list_entry(l, struct cgrp_cset_link, cset_link);
			cset = link->cset;
		}
T
Tejun Heo 已提交
3344 3345
	} while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));

T
Tejun Heo 已提交
3346
	it->cset_pos = l;
T
Tejun Heo 已提交
3347 3348

	if (!list_empty(&cset->tasks))
T
Tejun Heo 已提交
3349
		it->task_pos = cset->tasks.next;
T
Tejun Heo 已提交
3350
	else
T
Tejun Heo 已提交
3351 3352 3353 3354
		it->task_pos = cset->mg_tasks.next;

	it->tasks_head = &cset->tasks;
	it->mg_tasks_head = &cset->mg_tasks;
3355 3356
}

3357
/**
3358 3359
 * css_task_iter_start - initiate task iteration
 * @css: the css to walk tasks of
3360 3361
 * @it: the task iterator to use
 *
3362 3363 3364 3365
 * Initiate iteration through the tasks of @css.  The caller can call
 * css_task_iter_next() to walk through the tasks until the function
 * returns NULL.  On completion of iteration, css_task_iter_end() must be
 * called.
3366 3367 3368 3369 3370
 *
 * Note that this function acquires a lock which is released when the
 * iteration finishes.  The caller can't sleep while iteration is in
 * progress.
 */
3371 3372
void css_task_iter_start(struct cgroup_subsys_state *css,
			 struct css_task_iter *it)
3373
	__acquires(css_set_rwsem)
3374
{
3375 3376
	/* no one should try to iterate before mounting cgroups */
	WARN_ON_ONCE(!use_task_css_set_links);
3377

3378
	down_read(&css_set_rwsem);
3379

3380 3381 3382 3383 3384 3385 3386
	it->ss = css->ss;

	if (it->ss)
		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
	else
		it->cset_pos = &css->cgroup->cset_links;

T
Tejun Heo 已提交
3387
	it->cset_head = it->cset_pos;
3388

3389
	css_advance_task_iter(it);
3390 3391
}

3392
/**
3393
 * css_task_iter_next - return the next task for the iterator
3394 3395 3396
 * @it: the task iterator being iterated
 *
 * The "next" function for task iteration.  @it should have been
3397 3398
 * initialized via css_task_iter_start().  Returns NULL when the iteration
 * reaches the end.
3399
 */
3400
struct task_struct *css_task_iter_next(struct css_task_iter *it)
3401 3402
{
	struct task_struct *res;
T
Tejun Heo 已提交
3403
	struct list_head *l = it->task_pos;
3404 3405

	/* If the iterator cg is NULL, we have no tasks */
T
Tejun Heo 已提交
3406
	if (!it->cset_pos)
3407 3408
		return NULL;
	res = list_entry(l, struct task_struct, cg_list);
T
Tejun Heo 已提交
3409 3410 3411 3412 3413 3414

	/*
	 * Advance iterator to find next entry.  cset->tasks is consumed
	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
	 * next cset.
	 */
3415
	l = l->next;
T
Tejun Heo 已提交
3416

T
Tejun Heo 已提交
3417 3418
	if (l == it->tasks_head)
		l = it->mg_tasks_head->next;
T
Tejun Heo 已提交
3419

T
Tejun Heo 已提交
3420
	if (l == it->mg_tasks_head)
3421
		css_advance_task_iter(it);
T
Tejun Heo 已提交
3422
	else
T
Tejun Heo 已提交
3423
		it->task_pos = l;
T
Tejun Heo 已提交
3424

3425 3426 3427
	return res;
}

3428
/**
3429
 * css_task_iter_end - finish task iteration
3430 3431
 * @it: the task iterator to finish
 *
3432
 * Finish task iteration started by css_task_iter_start().
3433
 */
3434
void css_task_iter_end(struct css_task_iter *it)
3435
	__releases(css_set_rwsem)
3436
{
3437
	up_read(&css_set_rwsem);
3438 3439 3440
}

/**
3441 3442 3443
 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
3444
 *
3445 3446 3447 3448 3449
 * Locking rules between cgroup_post_fork() and the migration path
 * guarantee that, if a task is forking while being migrated, the new child
 * is guaranteed to be either visible in the source cgroup after the
 * parent's migration is complete or put into the target cgroup.  No task
 * can slip out of migration through forking.
3450
 */
3451
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
3452
{
3453 3454
	LIST_HEAD(preloaded_csets);
	struct cgrp_cset_link *link;
3455
	struct css_task_iter it;
3456
	struct task_struct *task;
3457
	int ret;
3458

3459
	mutex_lock(&cgroup_mutex);
3460

3461 3462 3463 3464 3465
	/* all tasks in @from are being moved, all csets are source */
	down_read(&css_set_rwsem);
	list_for_each_entry(link, &from->cset_links, cset_link)
		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
	up_read(&css_set_rwsem);
3466

3467 3468 3469
	ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
	if (ret)
		goto out_err;
3470

3471 3472 3473 3474
	/*
	 * Migrate tasks one-by-one until @form is empty.  This fails iff
	 * ->can_attach() fails.
	 */
3475
	do {
3476
		css_task_iter_start(&from->self, &it);
3477 3478 3479 3480 3481 3482
		task = css_task_iter_next(&it);
		if (task)
			get_task_struct(task);
		css_task_iter_end(&it);

		if (task) {
3483
			ret = cgroup_migrate(to, task, false);
3484 3485 3486
			put_task_struct(task);
		}
	} while (task && !ret);
3487 3488
out_err:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
3489
	mutex_unlock(&cgroup_mutex);
3490
	return ret;
3491 3492
}

3493
/*
3494
 * Stuff for reading the 'tasks'/'procs' files.
3495 3496 3497 3498 3499 3500 3501 3502
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528
/* which pidlist file are we talking about? */
enum cgroup_filetype {
	CGROUP_FILE_PROCS,
	CGROUP_FILE_TASKS,
};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {
	/*
	 * used to find which pidlist is wanted. doesn't change as long as
	 * this particular list stays in the list.
	*/
	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
	/* array of xids */
	pid_t *list;
	/* how many elements the above list has */
	int length;
	/* each of these stored in a list by its cgroup */
	struct list_head links;
	/* pointer to the cgroup we belong to, for list removal purposes */
	struct cgroup *owner;
3529 3530
	/* for delayed destruction */
	struct delayed_work destroy_dwork;
3531 3532
};

3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545
/*
 * The following two functions "fix" the issue where there are more pids
 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 * TODO: replace with a kernel-wide solution to this problem
 */
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(count * sizeof(pid_t));
	else
		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
3546

3547 3548 3549 3550 3551 3552 3553 3554
static void pidlist_free(void *p)
{
	if (is_vmalloc_addr(p))
		vfree(p);
	else
		kfree(p);
}

3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581
/*
 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 * should be left afterwards.
 */
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
{
	struct cgroup_pidlist *l, *tmp_l;

	mutex_lock(&cgrp->pidlist_mutex);
	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
	mutex_unlock(&cgrp->pidlist_mutex);

	flush_workqueue(cgroup_pidlist_destroy_wq);
	BUG_ON(!list_empty(&cgrp->pidlists));
}

static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
						destroy_dwork);
	struct cgroup_pidlist *tofree = NULL;

	mutex_lock(&l->owner->pidlist_mutex);

	/*
3582 3583
	 * Destroy iff we didn't get queued again.  The state won't change
	 * as destroy_dwork can only be queued while locked.
3584
	 */
3585
	if (!delayed_work_pending(dwork)) {
3586 3587 3588 3589 3590 3591 3592 3593 3594 3595
		list_del(&l->links);
		pidlist_free(l->list);
		put_pid_ns(l->key.ns);
		tofree = l;
	}

	mutex_unlock(&l->owner->pidlist_mutex);
	kfree(tofree);
}

3596
/*
3597
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3598
 * Returns the number of unique elements.
3599
 */
3600
static int pidlist_uniq(pid_t *list, int length)
3601
{
3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625
	int src, dest = 1;

	/*
	 * we presume the 0th element is unique, so i starts at 1. trivial
	 * edge cases first; no work needs to be done for either
	 */
	if (length == 0 || length == 1)
		return length;
	/* src and dest walk down the list; dest counts unique elements */
	for (src = 1; src < length; src++) {
		/* find next unique element */
		while (list[src] == list[src-1]) {
			src++;
			if (src == length)
				goto after;
		}
		/* dest always points to where the next unique element goes */
		list[dest] = list[src];
		dest++;
	}
after:
	return dest;
}

3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658
/*
 * The two pid files - task and cgroup.procs - guaranteed that the result
 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 * different per namespace, each namespace needs differently sorted list,
 * making it impossible to use, for example, single rbtree of member tasks
 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 * per open file is dangerous, so cgroup had to implement shared pool of
 * pidlists keyed by cgroup and namespace.
 *
 * All this extra complexity was caused by the original implementation
 * committing to an entirely unnecessary property.  In the long term, we
 * want to do away with it.  Explicitly scramble sort order if
 * sane_behavior so that no such expectation exists in the new interface.
 *
 * Scrambling is done by swapping every two consecutive bits, which is
 * non-identity one-to-one mapping which disturbs sort order sufficiently.
 */
static pid_t pid_fry(pid_t pid)
{
	unsigned a = pid & 0x55555555;
	unsigned b = pid & 0xAAAAAAAA;

	return (a << 1) | (b >> 1);
}

static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
{
	if (cgroup_sane_behavior(cgrp))
		return pid_fry(pid);
	else
		return pid;
}

3659 3660 3661 3662 3663
static int cmppid(const void *a, const void *b)
{
	return *(pid_t *)a - *(pid_t *)b;
}

3664 3665 3666 3667 3668
static int fried_cmppid(const void *a, const void *b)
{
	return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
}

T
Tejun Heo 已提交
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{
	struct cgroup_pidlist *l;
	/* don't need task_nsproxy() if we're looking at ourself */
	struct pid_namespace *ns = task_active_pid_ns(current);

	lockdep_assert_held(&cgrp->pidlist_mutex);

	list_for_each_entry(l, &cgrp->pidlists, links)
		if (l->key.type == type && l->key.ns == ns)
			return l;
	return NULL;
}

3684 3685 3686 3687 3688 3689
/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
T
Tejun Heo 已提交
3690 3691
static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
						enum cgroup_filetype type)
3692 3693
{
	struct cgroup_pidlist *l;
3694

T
Tejun Heo 已提交
3695 3696 3697 3698 3699 3700
	lockdep_assert_held(&cgrp->pidlist_mutex);

	l = cgroup_pidlist_find(cgrp, type);
	if (l)
		return l;

3701
	/* entry not found; create a new one */
3702
	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
T
Tejun Heo 已提交
3703
	if (!l)
3704
		return l;
T
Tejun Heo 已提交
3705

3706
	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
3707
	l->key.type = type;
T
Tejun Heo 已提交
3708 3709
	/* don't need task_nsproxy() if we're looking at ourself */
	l->key.ns = get_pid_ns(task_active_pid_ns(current));
3710 3711 3712 3713 3714
	l->owner = cgrp;
	list_add(&l->links, &cgrp->pidlists);
	return l;
}

3715 3716 3717
/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
3718 3719
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
3720 3721 3722 3723
{
	pid_t *array;
	int length;
	int pid, n = 0; /* used for populating the array */
3724
	struct css_task_iter it;
3725
	struct task_struct *tsk;
3726 3727
	struct cgroup_pidlist *l;

3728 3729
	lockdep_assert_held(&cgrp->pidlist_mutex);

3730 3731 3732 3733 3734 3735 3736
	/*
	 * If cgroup gets more users after we read count, we won't have
	 * enough space - tough.  This race is indistinguishable to the
	 * caller from the case that the additional cgroup users didn't
	 * show up until sometime later on.
	 */
	length = cgroup_task_count(cgrp);
3737
	array = pidlist_allocate(length);
3738 3739 3740
	if (!array)
		return -ENOMEM;
	/* now, populate the array */
3741
	css_task_iter_start(&cgrp->self, &it);
3742
	while ((tsk = css_task_iter_next(&it))) {
3743
		if (unlikely(n == length))
3744
			break;
3745
		/* get tgid or pid for procs or tasks file respectively */
3746 3747 3748 3749
		if (type == CGROUP_FILE_PROCS)
			pid = task_tgid_vnr(tsk);
		else
			pid = task_pid_vnr(tsk);
3750 3751
		if (pid > 0) /* make sure to only use valid results */
			array[n++] = pid;
3752
	}
3753
	css_task_iter_end(&it);
3754 3755
	length = n;
	/* now sort & (if procs) strip out duplicates */
3756 3757 3758 3759
	if (cgroup_sane_behavior(cgrp))
		sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
	else
		sort(array, length, sizeof(pid_t), cmppid, NULL);
3760
	if (type == CGROUP_FILE_PROCS)
3761
		length = pidlist_uniq(array, length);
T
Tejun Heo 已提交
3762 3763

	l = cgroup_pidlist_find_create(cgrp, type);
3764
	if (!l) {
T
Tejun Heo 已提交
3765
		mutex_unlock(&cgrp->pidlist_mutex);
3766
		pidlist_free(array);
3767
		return -ENOMEM;
3768
	}
T
Tejun Heo 已提交
3769 3770

	/* store array, freeing old if necessary */
3771
	pidlist_free(l->list);
3772 3773
	l->list = array;
	l->length = length;
3774
	*lp = l;
3775
	return 0;
3776 3777
}

B
Balbir Singh 已提交
3778
/**
L
Li Zefan 已提交
3779
 * cgroupstats_build - build and fill cgroupstats
B
Balbir Singh 已提交
3780 3781 3782
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
L
Li Zefan 已提交
3783 3784 3785
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
B
Balbir Singh 已提交
3786 3787 3788
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
T
Tejun Heo 已提交
3789
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
3790
	struct cgroup *cgrp;
3791
	struct css_task_iter it;
B
Balbir Singh 已提交
3792
	struct task_struct *tsk;
3793

T
Tejun Heo 已提交
3794 3795 3796 3797 3798
	/* it should be kernfs_node belonging to cgroupfs and is a directory */
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
		return -EINVAL;

3799 3800
	mutex_lock(&cgroup_mutex);

B
Balbir Singh 已提交
3801
	/*
T
Tejun Heo 已提交
3802
	 * We aren't being called from kernfs and there's no guarantee on
3803
	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
T
Tejun Heo 已提交
3804
	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
B
Balbir Singh 已提交
3805
	 */
T
Tejun Heo 已提交
3806 3807
	rcu_read_lock();
	cgrp = rcu_dereference(kn->priv);
3808
	if (!cgrp || cgroup_is_dead(cgrp)) {
T
Tejun Heo 已提交
3809
		rcu_read_unlock();
3810
		mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
3811 3812
		return -ENOENT;
	}
3813
	rcu_read_unlock();
B
Balbir Singh 已提交
3814

3815
	css_task_iter_start(&cgrp->self, &it);
3816
	while ((tsk = css_task_iter_next(&it))) {
B
Balbir Singh 已提交
3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835
		switch (tsk->state) {
		case TASK_RUNNING:
			stats->nr_running++;
			break;
		case TASK_INTERRUPTIBLE:
			stats->nr_sleeping++;
			break;
		case TASK_UNINTERRUPTIBLE:
			stats->nr_uninterruptible++;
			break;
		case TASK_STOPPED:
			stats->nr_stopped++;
			break;
		default:
			if (delayacct_is_task_waiting_on_io(tsk))
				stats->nr_io_wait++;
			break;
		}
	}
3836
	css_task_iter_end(&it);
B
Balbir Singh 已提交
3837

3838
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
3839
	return 0;
B
Balbir Singh 已提交
3840 3841
}

3842

3843
/*
3844
 * seq_file methods for the tasks/procs files. The seq_file position is the
3845
 * next pid to display; the seq_file iterator is a pointer to the pid
3846
 * in the cgroup->l->list array.
3847
 */
3848

3849
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
3850
{
3851 3852 3853 3854 3855 3856
	/*
	 * Initially we receive a position value that corresponds to
	 * one more than the last pid shown (or 0 on the first call or
	 * after a seek to the start). Use a binary-search to find the
	 * next pid to display, if any
	 */
T
Tejun Heo 已提交
3857
	struct kernfs_open_file *of = s->private;
3858
	struct cgroup *cgrp = seq_css(s)->cgroup;
3859
	struct cgroup_pidlist *l;
3860
	enum cgroup_filetype type = seq_cft(s)->private;
3861
	int index = 0, pid = *pos;
3862 3863 3864 3865 3866
	int *iter, ret;

	mutex_lock(&cgrp->pidlist_mutex);

	/*
3867
	 * !NULL @of->priv indicates that this isn't the first start()
3868
	 * after open.  If the matching pidlist is around, we can use that.
3869
	 * Look for it.  Note that @of->priv can't be used directly.  It
3870 3871
	 * could already have been destroyed.
	 */
3872 3873
	if (of->priv)
		of->priv = cgroup_pidlist_find(cgrp, type);
3874 3875 3876 3877 3878

	/*
	 * Either this is the first start() after open or the matching
	 * pidlist has been destroyed inbetween.  Create a new one.
	 */
3879 3880 3881
	if (!of->priv) {
		ret = pidlist_array_load(cgrp, type,
					 (struct cgroup_pidlist **)&of->priv);
3882 3883 3884
		if (ret)
			return ERR_PTR(ret);
	}
3885
	l = of->priv;
3886 3887

	if (pid) {
3888
		int end = l->length;
S
Stephen Rothwell 已提交
3889

3890 3891
		while (index < end) {
			int mid = (index + end) / 2;
3892
			if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
3893 3894
				index = mid;
				break;
3895
			} else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
3896 3897 3898 3899 3900 3901
				index = mid + 1;
			else
				end = mid;
		}
	}
	/* If we're off the end of the array, we're done */
3902
	if (index >= l->length)
3903 3904
		return NULL;
	/* Update the abstract position to be the actual pid that we found */
3905
	iter = l->list + index;
3906
	*pos = cgroup_pid_fry(cgrp, *iter);
3907 3908 3909
	return iter;
}

3910
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
3911
{
T
Tejun Heo 已提交
3912
	struct kernfs_open_file *of = s->private;
3913
	struct cgroup_pidlist *l = of->priv;
3914

3915 3916
	if (l)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
3917
				 CGROUP_PIDLIST_DESTROY_DELAY);
3918
	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
3919 3920
}

3921
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
3922
{
T
Tejun Heo 已提交
3923
	struct kernfs_open_file *of = s->private;
3924
	struct cgroup_pidlist *l = of->priv;
3925 3926
	pid_t *p = v;
	pid_t *end = l->list + l->length;
3927 3928 3929 3930 3931 3932 3933 3934
	/*
	 * Advance to the next pid in the array. If this goes off the
	 * end, we're done
	 */
	p++;
	if (p >= end) {
		return NULL;
	} else {
3935
		*pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
3936 3937 3938 3939
		return p;
	}
}

3940
static int cgroup_pidlist_show(struct seq_file *s, void *v)
3941 3942 3943
{
	return seq_printf(s, "%d\n", *(int *)v);
}
3944

3945 3946
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
3947
{
3948
	return notify_on_release(css->cgroup);
3949 3950
}

3951 3952
static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
3953
{
3954
	clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
3955
	if (val)
3956
		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
3957
	else
3958
		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
3959 3960 3961
	return 0;
}

3962 3963
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
3964
{
3965
	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
3966 3967
}

3968 3969
static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
3970 3971
{
	if (val)
3972
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
3973
	else
3974
		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
3975 3976 3977
	return 0;
}

3978
static struct cftype cgroup_base_files[] = {
3979
	{
3980
		.name = "cgroup.procs",
3981 3982 3983 3984
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
3985
		.private = CGROUP_FILE_PROCS,
3986
		.write = cgroup_procs_write,
B
Ben Blum 已提交
3987
		.mode = S_IRUGO | S_IWUSR,
3988
	},
3989 3990
	{
		.name = "cgroup.clone_children",
3991
		.flags = CFTYPE_INSANE,
3992 3993 3994
		.read_u64 = cgroup_clone_children_read,
		.write_u64 = cgroup_clone_children_write,
	},
3995 3996 3997
	{
		.name = "cgroup.sane_behavior",
		.flags = CFTYPE_ONLY_ON_ROOT,
3998
		.seq_show = cgroup_sane_behavior_show,
3999
	},
4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013
	{
		.name = "cgroup.controllers",
		.flags = CFTYPE_ONLY_ON_DFL | CFTYPE_ONLY_ON_ROOT,
		.seq_show = cgroup_root_controllers_show,
	},
	{
		.name = "cgroup.controllers",
		.flags = CFTYPE_ONLY_ON_DFL | CFTYPE_NOT_ON_ROOT,
		.seq_show = cgroup_controllers_show,
	},
	{
		.name = "cgroup.subtree_control",
		.flags = CFTYPE_ONLY_ON_DFL,
		.seq_show = cgroup_subtree_control_show,
4014
		.write = cgroup_subtree_control_write,
4015
	},
4016 4017 4018 4019 4020
	{
		.name = "cgroup.populated",
		.flags = CFTYPE_ONLY_ON_DFL | CFTYPE_NOT_ON_ROOT,
		.seq_show = cgroup_populated_show,
	},
4021 4022 4023 4024 4025 4026 4027 4028 4029

	/*
	 * Historical crazy stuff.  These don't have "cgroup."  prefix and
	 * don't exist if sane_behavior.  If you're depending on these, be
	 * prepared to be burned.
	 */
	{
		.name = "tasks",
		.flags = CFTYPE_INSANE,		/* use "procs" instead */
4030 4031 4032 4033
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4034
		.private = CGROUP_FILE_TASKS,
4035
		.write = cgroup_tasks_write,
4036 4037 4038 4039 4040 4041 4042 4043
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "notify_on_release",
		.flags = CFTYPE_INSANE,
		.read_u64 = cgroup_read_notify_on_release,
		.write_u64 = cgroup_write_notify_on_release,
	},
4044 4045
	{
		.name = "release_agent",
4046
		.flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
4047
		.seq_show = cgroup_release_agent_show,
4048
		.write = cgroup_release_agent_write,
4049
		.max_write_len = PATH_MAX - 1,
4050
	},
T
Tejun Heo 已提交
4051
	{ }	/* terminate */
4052 4053
};

4054
/**
4055
 * cgroup_populate_dir - create subsys files in a cgroup directory
4056 4057
 * @cgrp: target cgroup
 * @subsys_mask: mask of the subsystem ids whose files should be added
4058 4059
 *
 * On failure, no file is added.
4060
 */
4061
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask)
4062 4063
{
	struct cgroup_subsys *ss;
4064
	int i, ret = 0;
4065

4066
	/* process cftsets of each subsystem */
4067
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
4068
		struct cftype *cfts;
4069

4070
		if (!(subsys_mask & (1 << i)))
4071
			continue;
4072

T
Tejun Heo 已提交
4073 4074
		list_for_each_entry(cfts, &ss->cfts, node) {
			ret = cgroup_addrm_files(cgrp, cfts, true);
4075 4076 4077
			if (ret < 0)
				goto err;
		}
4078 4079
	}
	return 0;
4080 4081 4082
err:
	cgroup_clear_dir(cgrp, subsys_mask);
	return ret;
4083 4084
}

4085 4086 4087 4088 4089 4090 4091
/*
 * css destruction is four-stage process.
 *
 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
 *    Implemented in kill_css().
 *
 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
4092 4093 4094
 *    and thus css_tryget_online() is guaranteed to fail, the css can be
 *    offlined by invoking offline_css().  After offlining, the base ref is
 *    put.  Implemented in css_killed_work_fn().
4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106
 *
 * 3. When the percpu_ref reaches zero, the only possible remaining
 *    accessors are inside RCU read sections.  css_release() schedules the
 *    RCU callback.
 *
 * 4. After the grace period, the css can be freed.  Implemented in
 *    css_free_work_fn().
 *
 * It is actually hairier because both step 2 and 4 require process context
 * and thus involve punting to css->destroy_work adding two additional
 * steps to the already complex sequence.
 */
4107
static void css_free_work_fn(struct work_struct *work)
4108 4109
{
	struct cgroup_subsys_state *css =
4110
		container_of(work, struct cgroup_subsys_state, destroy_work);
4111
	struct cgroup *cgrp = css->cgroup;
4112

4113 4114 4115
	if (css->parent)
		css_put(css->parent);

4116
	css->ss->css_free(css);
T
Tejun Heo 已提交
4117
	cgroup_put(cgrp);
4118 4119
}

4120
static void css_free_rcu_fn(struct rcu_head *rcu_head)
4121 4122
{
	struct cgroup_subsys_state *css =
4123
		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
4124

4125
	INIT_WORK(&css->destroy_work, css_free_work_fn);
4126
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4127 4128
}

4129
static void css_release_work_fn(struct work_struct *work)
4130 4131
{
	struct cgroup_subsys_state *css =
4132
		container_of(work, struct cgroup_subsys_state, destroy_work);
4133 4134 4135
	struct cgroup_subsys *ss = css->ss;

	cgroup_idr_remove(&ss->css_idr, css->id);
4136

4137
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4138 4139
}

4140 4141 4142 4143 4144 4145 4146 4147 4148
static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

	INIT_WORK(&css->destroy_work, css_release_work_fn);
	queue_work(cgroup_destroy_wq, &css->destroy_work);
}

4149 4150
static void init_and_link_css(struct cgroup_subsys_state *css,
			      struct cgroup_subsys *ss, struct cgroup *cgrp)
4151
{
4152 4153
	cgroup_get(cgrp);

4154
	css->cgroup = cgrp;
4155
	css->ss = ss;
4156
	css->flags = 0;
4157

4158
	if (cgrp->parent) {
4159
		css->parent = cgroup_css(cgrp->parent, ss);
4160 4161
		css_get(css->parent);
	} else {
4162
		css->flags |= CSS_ROOT;
4163
	}
4164

4165
	BUG_ON(cgroup_css(cgrp, ss));
4166 4167
}

4168
/* invoke ->css_online() on a new CSS and mark it online if successful */
4169
static int online_css(struct cgroup_subsys_state *css)
4170
{
4171
	struct cgroup_subsys *ss = css->ss;
T
Tejun Heo 已提交
4172 4173
	int ret = 0;

4174 4175
	lockdep_assert_held(&cgroup_mutex);

4176
	if (ss->css_online)
4177
		ret = ss->css_online(css);
4178
	if (!ret) {
4179
		css->flags |= CSS_ONLINE;
4180
		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4181
	}
T
Tejun Heo 已提交
4182
	return ret;
4183 4184
}

4185
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4186
static void offline_css(struct cgroup_subsys_state *css)
4187
{
4188
	struct cgroup_subsys *ss = css->ss;
4189 4190 4191 4192 4193 4194

	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

4195
	if (ss->css_offline)
4196
		ss->css_offline(css);
4197

4198
	css->flags &= ~CSS_ONLINE;
4199
	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4200 4201

	wake_up_all(&css->cgroup->offline_waitq);
4202 4203
}

4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224
/**
 * create_css - create a cgroup_subsys_state
 * @cgrp: the cgroup new css will be associated with
 * @ss: the subsys of new css
 *
 * Create a new css associated with @cgrp - @ss pair.  On success, the new
 * css is online and installed in @cgrp with all interface files created.
 * Returns 0 on success, -errno on failure.
 */
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
{
	struct cgroup *parent = cgrp->parent;
	struct cgroup_subsys_state *css;
	int err;

	lockdep_assert_held(&cgroup_mutex);

	css = ss->css_alloc(cgroup_css(parent, ss));
	if (IS_ERR(css))
		return PTR_ERR(css);

4225
	init_and_link_css(css, ss, cgrp);
4226

4227 4228
	err = percpu_ref_init(&css->refcnt, css_release);
	if (err)
4229
		goto err_free_css;
4230

4231 4232 4233 4234 4235
	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
	if (err < 0)
		goto err_free_percpu_ref;
	css->id = err;

4236
	err = cgroup_populate_dir(cgrp, 1 << ss->id);
4237
	if (err)
4238 4239 4240 4241
		goto err_free_id;

	/* @css is ready to be brought online now, make it visible */
	cgroup_idr_replace(&ss->css_idr, css, css->id);
4242 4243 4244

	err = online_css(css);
	if (err)
4245
		goto err_clear_dir;
4246 4247 4248

	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
	    parent->parent) {
4249
		pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4250
			current->comm, current->pid, ss->name);
4251
		if (!strcmp(ss->name, "memory"))
4252
			pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4253 4254 4255 4256 4257
		ss->warned_broken_hierarchy = true;
	}

	return 0;

4258
err_clear_dir:
4259
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4260 4261
err_free_id:
	cgroup_idr_remove(&ss->css_idr, css->id);
4262
err_free_percpu_ref:
4263
	percpu_ref_cancel_init(&css->refcnt);
4264
err_free_css:
4265
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4266 4267 4268
	return err;
}

4269 4270
static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
			umode_t mode)
4271
{
4272 4273
	struct cgroup *parent, *cgrp;
	struct cgroup_root *root;
4274
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
4275
	struct kernfs_node *kn;
4276
	int ssid, ret;
4277

4278 4279 4280 4281
	parent = cgroup_kn_lock_live(parent_kn);
	if (!parent)
		return -ENODEV;
	root = parent->root;
T
Tejun Heo 已提交
4282 4283 4284 4285 4286 4287

	/* allocate the cgroup and its ID, 0 is reserved for the root */
	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
	if (!cgrp) {
		ret = -ENOMEM;
		goto out_unlock;
4288 4289 4290 4291 4292 4293
	}

	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
4294
	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
4295
	if (cgrp->id < 0) {
T
Tejun Heo 已提交
4296 4297
		ret = -ENOMEM;
		goto out_free_cgrp;
4298 4299
	}

4300
	init_cgroup_housekeeping(cgrp);
4301

4302
	cgrp->parent = parent;
4303
	cgrp->self.parent = &parent->self;
T
Tejun Heo 已提交
4304
	cgrp->root = root;
4305

4306 4307 4308
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

4309 4310
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4311

T
Tejun Heo 已提交
4312
	/* create the directory */
T
Tejun Heo 已提交
4313
	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
T
Tejun Heo 已提交
4314
	if (IS_ERR(kn)) {
T
Tejun Heo 已提交
4315 4316
		ret = PTR_ERR(kn);
		goto out_free_id;
T
Tejun Heo 已提交
4317 4318
	}
	cgrp->kn = kn;
4319

4320
	/*
4321 4322
	 * This extra ref will be put in cgroup_free_fn() and guarantees
	 * that @cgrp->kn is always accessible.
4323
	 */
4324
	kernfs_get(kn);
4325

4326
	cgrp->serial_nr = cgroup_serial_nr_next++;
4327

4328 4329
	/* allocation complete, commit to creation */
	list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
4330
	atomic_inc(&root->nr_cgrps);
4331
	cgroup_get(parent);
4332

4333 4334 4335 4336
	/*
	 * @cgrp is now fully operational.  If something fails after this
	 * point, it'll be released via the normal destruction path.
	 */
4337
	cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4338

T
Tejun Heo 已提交
4339 4340 4341
	ret = cgroup_kn_set_ugid(kn);
	if (ret)
		goto out_destroy;
4342

T
Tejun Heo 已提交
4343 4344 4345
	ret = cgroup_addrm_files(cgrp, cgroup_base_files, true);
	if (ret)
		goto out_destroy;
4346

4347
	/* let's create and online css's */
T
Tejun Heo 已提交
4348
	for_each_subsys(ss, ssid) {
4349
		if (parent->child_subsys_mask & (1 << ssid)) {
T
Tejun Heo 已提交
4350 4351 4352
			ret = create_css(cgrp, ss);
			if (ret)
				goto out_destroy;
T
Tejun Heo 已提交
4353
		}
4354
	}
4355

4356 4357 4358 4359 4360 4361
	/*
	 * On the default hierarchy, a child doesn't automatically inherit
	 * child_subsys_mask from the parent.  Each is configured manually.
	 */
	if (!cgroup_on_dfl(cgrp))
		cgrp->child_subsys_mask = parent->child_subsys_mask;
4362

T
Tejun Heo 已提交
4363 4364
	kernfs_activate(kn);

T
Tejun Heo 已提交
4365 4366
	ret = 0;
	goto out_unlock;
4367

T
Tejun Heo 已提交
4368
out_free_id:
4369
	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
T
Tejun Heo 已提交
4370 4371 4372
out_free_cgrp:
	kfree(cgrp);
out_unlock:
4373
	cgroup_kn_unlock(parent_kn);
T
Tejun Heo 已提交
4374
	return ret;
4375

T
Tejun Heo 已提交
4376
out_destroy:
4377
	cgroup_destroy_locked(cgrp);
T
Tejun Heo 已提交
4378
	goto out_unlock;
4379 4380
}

4381 4382
/*
 * This is called when the refcnt of a css is confirmed to be killed.
4383 4384
 * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
 * initate destruction and put the css ref from kill_css().
4385 4386
 */
static void css_killed_work_fn(struct work_struct *work)
4387
{
4388 4389
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
4390

4391
	mutex_lock(&cgroup_mutex);
4392
	offline_css(css);
4393
	mutex_unlock(&cgroup_mutex);
4394 4395

	css_put(css);
4396 4397
}

4398 4399
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
4400 4401 4402 4403
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4404
	INIT_WORK(&css->destroy_work, css_killed_work_fn);
4405
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4406 4407
}

4408 4409 4410 4411 4412 4413
/**
 * kill_css - destroy a css
 * @css: css to destroy
 *
 * This function initiates destruction of @css by removing cgroup interface
 * files and putting its base reference.  ->css_offline() will be invoked
4414 4415
 * asynchronously once css_tryget_online() is guaranteed to fail and when
 * the reference count reaches zero, @css will be released.
4416 4417
 */
static void kill_css(struct cgroup_subsys_state *css)
T
Tejun Heo 已提交
4418
{
4419
	lockdep_assert_held(&cgroup_mutex);
4420

T
Tejun Heo 已提交
4421 4422 4423 4424
	/*
	 * This must happen before css is disassociated with its cgroup.
	 * See seq_css() for details.
	 */
4425
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4426

T
Tejun Heo 已提交
4427 4428 4429 4430 4431 4432 4433 4434 4435
	/*
	 * Killing would put the base ref, but we need to keep it alive
	 * until after ->css_offline().
	 */
	css_get(css);

	/*
	 * cgroup core guarantees that, by the time ->css_offline() is
	 * invoked, no new css reference will be given out via
4436
	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
T
Tejun Heo 已提交
4437 4438 4439 4440 4441 4442 4443
	 * proceed to offlining css's because percpu_ref_kill() doesn't
	 * guarantee that the ref is seen as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.
	 */
	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
4444 4445 4446 4447 4448 4449 4450 4451
}

/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
4452 4453 4454
 * guarantee that css_tryget_online() won't succeed by the time
 * ->css_offline() is invoked.  To satisfy all the requirements,
 * destruction is implemented in the following two steps.
4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
4470 4471
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4472
{
T
Tejun Heo 已提交
4473
	struct cgroup_subsys_state *css;
4474
	bool empty;
T
Tejun Heo 已提交
4475
	int ssid;
4476

4477 4478
	lockdep_assert_held(&cgroup_mutex);

4479
	/*
4480
	 * css_set_rwsem synchronizes access to ->cset_links and prevents
4481
	 * @cgrp from being removed while put_css_set() is in progress.
4482
	 */
4483
	down_read(&css_set_rwsem);
4484
	empty = list_empty(&cgrp->cset_links);
4485
	up_read(&css_set_rwsem);
4486
	if (!empty)
4487
		return -EBUSY;
L
Li Zefan 已提交
4488

4489 4490 4491 4492 4493
	/*
	 * Make sure there's no live children.  We can't test ->children
	 * emptiness as dead children linger on it while being destroyed;
	 * otherwise, "rmdir parent/child parent" may fail with -EBUSY.
	 */
4494
	if (cgroup_has_live_children(cgrp))
4495 4496
		return -EBUSY;

4497 4498 4499
	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
	 * creation by disabling cgroup_lock_live_group().  Note that
4500
	 * CGRP_DEAD assertion is depended upon by css_next_child() to
4501
	 * resume iteration after dropping RCU read lock.  See
4502
	 * css_next_child() for details.
4503
	 */
4504
	set_bit(CGRP_DEAD, &cgrp->flags);
4505

4506
	/* initiate massacre of all css's */
T
Tejun Heo 已提交
4507 4508
	for_each_css(css, ssid, cgrp)
		kill_css(css);
4509 4510 4511 4512 4513 4514 4515

	/* CGRP_DEAD is set, remove from ->release_list for the last time */
	raw_spin_lock(&release_list_lock);
	if (!list_empty(&cgrp->release_list))
		list_del_init(&cgrp->release_list);
	raw_spin_unlock(&release_list_lock);

4516 4517 4518 4519 4520
	/*
	 * Remove @cgrp directory along with the base files.  @cgrp has an
	 * extra ref on its kn.
	 */
	kernfs_remove(cgrp->kn);
4521

4522 4523 4524
	set_bit(CGRP_RELEASABLE, &cgrp->parent->flags);
	check_for_release(cgrp->parent);

4525 4526 4527
	/* put the base reference */
	cgroup_put(cgrp);

4528 4529 4530
	return 0;
};

T
Tejun Heo 已提交
4531
static int cgroup_rmdir(struct kernfs_node *kn)
4532
{
4533
	struct cgroup *cgrp;
T
Tejun Heo 已提交
4534
	int ret = 0;
4535

4536 4537 4538 4539
	cgrp = cgroup_kn_lock_live(kn);
	if (!cgrp)
		return 0;
	cgroup_get(cgrp);	/* for @kn->priv clearing */
4540

4541
	ret = cgroup_destroy_locked(cgrp);
4542

4543
	cgroup_kn_unlock(kn);
4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554

	/*
	 * There are two control paths which try to determine cgroup from
	 * dentry without going through kernfs - cgroupstats_build() and
	 * css_tryget_online_from_dir().  Those are supported by RCU
	 * protecting clearing of cgrp->kn->priv backpointer, which should
	 * happen after all files under it have been removed.
	 */
	if (!ret)
		RCU_INIT_POINTER(*(void __rcu __force **)&kn->priv, NULL);

T
Tejun Heo 已提交
4555
	cgroup_put(cgrp);
4556
	return ret;
4557 4558
}

T
Tejun Heo 已提交
4559 4560 4561 4562 4563 4564 4565 4566
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
	.remount_fs		= cgroup_remount,
	.show_options		= cgroup_show_options,
	.mkdir			= cgroup_mkdir,
	.rmdir			= cgroup_rmdir,
	.rename			= cgroup_rename,
};

4567
static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
4568 4569
{
	struct cgroup_subsys_state *css;
D
Diego Calleja 已提交
4570 4571

	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
4572

4573 4574
	mutex_lock(&cgroup_mutex);

4575
	idr_init(&ss->css_idr);
T
Tejun Heo 已提交
4576
	INIT_LIST_HEAD(&ss->cfts);
4577

4578 4579 4580
	/* Create the root cgroup state for this subsystem */
	ss->root = &cgrp_dfl_root;
	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
4581 4582
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
4583
	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
4584 4585 4586 4587 4588 4589 4590
	if (early) {
		/* idr_alloc() can't be called safely during early init */
		css->id = 1;
	} else {
		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
		BUG_ON(css->id < 0);
	}
4591

L
Li Zefan 已提交
4592
	/* Update the init_css_set to contain a subsys
4593
	 * pointer to this state - since the subsystem is
L
Li Zefan 已提交
4594
	 * newly registered, all tasks and hence the
4595
	 * init_css_set is in the subsystem's root cgroup. */
4596
	init_css_set.subsys[ss->id] = css;
4597 4598 4599

	need_forkexit_callback |= ss->fork || ss->exit;

L
Li Zefan 已提交
4600 4601 4602 4603 4604
	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

4605
	BUG_ON(online_css(css));
4606

4607
	cgrp_dfl_root.subsys_mask |= 1 << ss->id;
B
Ben Blum 已提交
4608 4609 4610 4611

	mutex_unlock(&cgroup_mutex);
}

4612
/**
L
Li Zefan 已提交
4613 4614 4615 4616
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
4617 4618 4619
 */
int __init cgroup_init_early(void)
{
T
Tejun Heo 已提交
4620 4621
	static struct cgroup_sb_opts __initdata opts =
		{ .flags = CGRP_ROOT_SANE_BEHAVIOR };
4622
	struct cgroup_subsys *ss;
4623
	int i;
4624

4625
	init_cgroup_root(&cgrp_dfl_root, &opts);
4626
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
4627

T
Tejun Heo 已提交
4628
	for_each_subsys(ss, i) {
4629
		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
4630 4631
		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
4632
		     ss->id, ss->name);
4633 4634 4635
		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);

4636
		ss->id = i;
4637
		ss->name = cgroup_subsys_name[i];
4638 4639

		if (ss->early_init)
4640
			cgroup_init_subsys(ss, true);
4641 4642 4643 4644 4645
	}
	return 0;
}

/**
L
Li Zefan 已提交
4646 4647 4648 4649
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
4650 4651 4652
 */
int __init cgroup_init(void)
{
4653
	struct cgroup_subsys *ss;
4654
	unsigned long key;
4655
	int ssid, err;
4656

T
Tejun Heo 已提交
4657
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
4658

T
Tejun Heo 已提交
4659 4660
	mutex_lock(&cgroup_mutex);

4661 4662 4663 4664
	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

4665
	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
4666

T
Tejun Heo 已提交
4667 4668
	mutex_unlock(&cgroup_mutex);

4669
	for_each_subsys(ss, ssid) {
4670 4671 4672 4673 4674 4675 4676 4677 4678 4679
		if (ss->early_init) {
			struct cgroup_subsys_state *css =
				init_css_set.subsys[ss->id];

			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
						   GFP_KERNEL);
			BUG_ON(css->id < 0);
		} else {
			cgroup_init_subsys(ss, false);
		}
4680

T
Tejun Heo 已提交
4681 4682 4683
		list_add_tail(&init_css_set.e_cset_node[ssid],
			      &cgrp_dfl_root.cgrp.e_csets[ssid]);

4684 4685 4686 4687 4688 4689
		/*
		 * cftype registration needs kmalloc and can't be done
		 * during early_init.  Register base cftypes separately.
		 */
		if (ss->base_cftypes)
			WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes));
4690 4691 4692
	}

	cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
T
Tejun Heo 已提交
4693 4694
	if (!cgroup_kobj)
		return -ENOMEM;
4695

4696
	err = register_filesystem(&cgroup_fs_type);
4697 4698
	if (err < 0) {
		kobject_put(cgroup_kobj);
T
Tejun Heo 已提交
4699
		return err;
4700
	}
4701

L
Li Zefan 已提交
4702
	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
T
Tejun Heo 已提交
4703
	return 0;
4704
}
4705

4706 4707 4708 4709 4710
static int __init cgroup_wq_init(void)
{
	/*
	 * There isn't much point in executing destruction path in
	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
4711
	 * Use 1 for @max_active.
4712 4713 4714 4715
	 *
	 * We would prefer to do this in cgroup_init() above, but that
	 * is called before init_workqueues(): so leave this until after.
	 */
4716
	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
4717
	BUG_ON(!cgroup_destroy_wq);
4718 4719 4720 4721 4722 4723 4724 4725 4726

	/*
	 * Used to destroy pidlists and separate to serve as flush domain.
	 * Cap @max_active to 1 too.
	 */
	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
						    0, 1);
	BUG_ON(!cgroup_pidlist_destroy_wq);

4727 4728 4729 4730
	return 0;
}
core_initcall(cgroup_wq_init);

4731 4732 4733 4734 4735 4736 4737
/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 */

/* TODO: Use a proper seq_file iterator */
4738
int proc_cgroup_show(struct seq_file *m, void *v)
4739 4740 4741
{
	struct pid *pid;
	struct task_struct *tsk;
T
Tejun Heo 已提交
4742
	char *buf, *path;
4743
	int retval;
4744
	struct cgroup_root *root;
4745 4746

	retval = -ENOMEM;
T
Tejun Heo 已提交
4747
	buf = kmalloc(PATH_MAX, GFP_KERNEL);
4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759
	if (!buf)
		goto out;

	retval = -ESRCH;
	pid = m->private;
	tsk = get_pid_task(pid, PIDTYPE_PID);
	if (!tsk)
		goto out_free;

	retval = 0;

	mutex_lock(&cgroup_mutex);
4760
	down_read(&css_set_rwsem);
4761

4762
	for_each_root(root) {
4763
		struct cgroup_subsys *ss;
4764
		struct cgroup *cgrp;
T
Tejun Heo 已提交
4765
		int ssid, count = 0;
4766

T
Tejun Heo 已提交
4767
		if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
4768 4769
			continue;

4770
		seq_printf(m, "%d:", root->hierarchy_id);
T
Tejun Heo 已提交
4771
		for_each_subsys(ss, ssid)
4772
			if (root->subsys_mask & (1 << ssid))
T
Tejun Heo 已提交
4773
				seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
4774 4775 4776
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
4777
		seq_putc(m, ':');
4778
		cgrp = task_cgroup_from_root(tsk, root);
T
Tejun Heo 已提交
4779 4780 4781
		path = cgroup_path(cgrp, buf, PATH_MAX);
		if (!path) {
			retval = -ENAMETOOLONG;
4782
			goto out_unlock;
T
Tejun Heo 已提交
4783 4784
		}
		seq_puts(m, path);
4785 4786 4787 4788
		seq_putc(m, '\n');
	}

out_unlock:
4789
	up_read(&css_set_rwsem);
4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800
	mutex_unlock(&cgroup_mutex);
	put_task_struct(tsk);
out_free:
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
4801
	struct cgroup_subsys *ss;
4802 4803
	int i;

4804
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
B
Ben Blum 已提交
4805 4806 4807 4808 4809
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
4810
	mutex_lock(&cgroup_mutex);
4811 4812

	for_each_subsys(ss, i)
4813 4814
		seq_printf(m, "%s\t%d\t%d\t%d\n",
			   ss->name, ss->root->hierarchy_id,
4815
			   atomic_read(&ss->root->nr_cgrps), !ss->disabled);
4816

4817 4818 4819 4820 4821 4822
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
4823
	return single_open(file, proc_cgroupstats_show, NULL);
4824 4825
}

4826
static const struct file_operations proc_cgroupstats_operations = {
4827 4828 4829 4830 4831 4832
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

4833
/**
4834
 * cgroup_fork - initialize cgroup related fields during copy_process()
L
Li Zefan 已提交
4835
 * @child: pointer to task_struct of forking parent process.
4836
 *
4837 4838 4839
 * A task is associated with the init_css_set until cgroup_post_fork()
 * attaches it to the parent's css_set.  Empty cg_list indicates that
 * @child isn't holding reference to its css_set.
4840 4841 4842
 */
void cgroup_fork(struct task_struct *child)
{
4843
	RCU_INIT_POINTER(child->cgroups, &init_css_set);
4844
	INIT_LIST_HEAD(&child->cg_list);
4845 4846
}

4847
/**
L
Li Zefan 已提交
4848 4849 4850
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
4851 4852 4853
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
4854
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
4855
 * list.
L
Li Zefan 已提交
4856
 */
4857 4858
void cgroup_post_fork(struct task_struct *child)
{
4859
	struct cgroup_subsys *ss;
4860 4861
	int i;

4862
	/*
4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881
	 * This may race against cgroup_enable_task_cg_links().  As that
	 * function sets use_task_css_set_links before grabbing
	 * tasklist_lock and we just went through tasklist_lock to add
	 * @child, it's guaranteed that either we see the set
	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
	 * @child during its iteration.
	 *
	 * If we won the race, @child is associated with %current's
	 * css_set.  Grabbing css_set_rwsem guarantees both that the
	 * association is stable, and, on completion of the parent's
	 * migration, @child is visible in the source of migration or
	 * already in the destination cgroup.  This guarantee is necessary
	 * when implementing operations which need to migrate all tasks of
	 * a cgroup to another.
	 *
	 * Note that if we lose to cgroup_enable_task_cg_links(), @child
	 * will remain in init_css_set.  This is safe because all tasks are
	 * in the init_css_set before cg_links is enabled and there's no
	 * operation which transfers all tasks out of init_css_set.
4882
	 */
4883
	if (use_task_css_set_links) {
4884 4885
		struct css_set *cset;

4886
		down_write(&css_set_rwsem);
4887
		cset = task_css_set(current);
4888 4889 4890 4891 4892
		if (list_empty(&child->cg_list)) {
			rcu_assign_pointer(child->cgroups, cset);
			list_add(&child->cg_list, &cset->tasks);
			get_css_set(cset);
		}
4893
		up_write(&css_set_rwsem);
4894
	}
4895 4896 4897 4898 4899 4900 4901

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
	if (need_forkexit_callback) {
T
Tejun Heo 已提交
4902
		for_each_subsys(ss, i)
4903 4904 4905
			if (ss->fork)
				ss->fork(child);
	}
4906
}
4907

4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
4920 4921 4922 4923 4924
 * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
 * call cgroup_exit() while the task is still competent to handle
 * notify_on_release(), then leave the task attached to the root cgroup in
 * each hierarchy for the remainder of its exit.  No need to bother with
 * init_css_set refcnting.  init_css_set never goes away and we can't race
4925
 * with migration path - PF_EXITING is visible to migration path.
4926
 */
4927
void cgroup_exit(struct task_struct *tsk)
4928
{
4929
	struct cgroup_subsys *ss;
4930
	struct css_set *cset;
4931
	bool put_cset = false;
4932
	int i;
4933 4934

	/*
4935 4936
	 * Unlink from @tsk from its css_set.  As migration path can't race
	 * with us, we can check cg_list without grabbing css_set_rwsem.
4937 4938
	 */
	if (!list_empty(&tsk->cg_list)) {
4939
		down_write(&css_set_rwsem);
4940
		list_del_init(&tsk->cg_list);
4941
		up_write(&css_set_rwsem);
4942
		put_cset = true;
4943 4944
	}

4945
	/* Reassign the task to the init_css_set. */
4946 4947
	cset = task_css_set(tsk);
	RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
4948

4949
	if (need_forkexit_callback) {
T
Tejun Heo 已提交
4950 4951
		/* see cgroup_post_fork() for details */
		for_each_subsys(ss, i) {
4952
			if (ss->exit) {
4953 4954
				struct cgroup_subsys_state *old_css = cset->subsys[i];
				struct cgroup_subsys_state *css = task_css(tsk, i);
4955

4956
				ss->exit(css, old_css, tsk);
4957 4958 4959 4960
			}
		}
	}

4961 4962
	if (put_cset)
		put_css_set(cset, true);
4963
}
4964

4965
static void check_for_release(struct cgroup *cgrp)
4966
{
4967
	if (cgroup_is_releasable(cgrp) &&
4968
	    list_empty(&cgrp->cset_links) && !cgroup_has_live_children(cgrp)) {
4969 4970
		/*
		 * Control Group is currently removeable. If it's not
4971
		 * already queued for a userspace notification, queue
4972 4973
		 * it now
		 */
4974
		int need_schedule_work = 0;
4975

4976
		raw_spin_lock(&release_list_lock);
4977
		if (!cgroup_is_dead(cgrp) &&
4978 4979
		    list_empty(&cgrp->release_list)) {
			list_add(&cgrp->release_list, &release_list);
4980 4981
			need_schedule_work = 1;
		}
4982
		raw_spin_unlock(&release_list_lock);
4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014
		if (need_schedule_work)
			schedule_work(&release_agent_work);
	}
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
	BUG_ON(work != &release_agent_work);
	mutex_lock(&cgroup_mutex);
5015
	raw_spin_lock(&release_list_lock);
5016 5017 5018
	while (!list_empty(&release_list)) {
		char *argv[3], *envp[3];
		int i;
T
Tejun Heo 已提交
5019
		char *pathbuf = NULL, *agentbuf = NULL, *path;
5020
		struct cgroup *cgrp = list_entry(release_list.next,
5021 5022
						    struct cgroup,
						    release_list);
5023
		list_del_init(&cgrp->release_list);
5024
		raw_spin_unlock(&release_list_lock);
T
Tejun Heo 已提交
5025
		pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
5026 5027
		if (!pathbuf)
			goto continue_free;
T
Tejun Heo 已提交
5028 5029
		path = cgroup_path(cgrp, pathbuf, PATH_MAX);
		if (!path)
5030 5031 5032 5033
			goto continue_free;
		agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
		if (!agentbuf)
			goto continue_free;
5034 5035

		i = 0;
5036
		argv[i++] = agentbuf;
T
Tejun Heo 已提交
5037
		argv[i++] = path;
5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051
		argv[i] = NULL;

		i = 0;
		/* minimal command environment */
		envp[i++] = "HOME=/";
		envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
		envp[i] = NULL;

		/* Drop the lock while we invoke the usermode helper,
		 * since the exec could involve hitting disk and hence
		 * be a slow process */
		mutex_unlock(&cgroup_mutex);
		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
		mutex_lock(&cgroup_mutex);
5052 5053 5054
 continue_free:
		kfree(pathbuf);
		kfree(agentbuf);
5055
		raw_spin_lock(&release_list_lock);
5056
	}
5057
	raw_spin_unlock(&release_list_lock);
5058 5059
	mutex_unlock(&cgroup_mutex);
}
5060 5061 5062

static int __init cgroup_disable(char *str)
{
5063
	struct cgroup_subsys *ss;
5064
	char *token;
5065
	int i;
5066 5067 5068 5069

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
5070

T
Tejun Heo 已提交
5071
		for_each_subsys(ss, i) {
5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082
			if (!strcmp(token, ss->name)) {
				ss->disabled = 1;
				printk(KERN_INFO "Disabling %s control group"
					" subsystem\n", ss->name);
				break;
			}
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
K
KAMEZAWA Hiroyuki 已提交
5083

5084
/**
5085
 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
5086 5087
 * @dentry: directory dentry of interest
 * @ss: subsystem of interest
5088
 *
5089 5090 5091
 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
 * to get the corresponding css and return it.  If such css doesn't exist
 * or can't be pinned, an ERR_PTR value is returned.
S
Stephane Eranian 已提交
5092
 */
5093 5094
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
						       struct cgroup_subsys *ss)
S
Stephane Eranian 已提交
5095
{
T
Tejun Heo 已提交
5096 5097
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
	struct cgroup_subsys_state *css = NULL;
S
Stephane Eranian 已提交
5098 5099
	struct cgroup *cgrp;

5100
	/* is @dentry a cgroup dir? */
T
Tejun Heo 已提交
5101 5102
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
S
Stephane Eranian 已提交
5103 5104
		return ERR_PTR(-EBADF);

5105 5106
	rcu_read_lock();

T
Tejun Heo 已提交
5107 5108 5109
	/*
	 * This path doesn't originate from kernfs and @kn could already
	 * have been or be removed at any point.  @kn->priv is RCU
5110
	 * protected for this access.  See cgroup_rmdir() for details.
T
Tejun Heo 已提交
5111 5112 5113 5114
	 */
	cgrp = rcu_dereference(kn->priv);
	if (cgrp)
		css = cgroup_css(cgrp, ss);
5115

5116
	if (!css || !css_tryget_online(css))
5117 5118 5119 5120
		css = ERR_PTR(-ENOENT);

	rcu_read_unlock();
	return css;
S
Stephane Eranian 已提交
5121 5122
}

5123 5124 5125 5126 5127 5128 5129 5130 5131 5132
/**
 * css_from_id - lookup css by id
 * @id: the cgroup id
 * @ss: cgroup subsys to be looked into
 *
 * Returns the css if there's valid one with @id, otherwise returns NULL.
 * Should be called under rcu_read_lock().
 */
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
5133
	WARN_ON_ONCE(!rcu_read_lock_held());
5134
	return idr_find(&ss->css_idr, id);
S
Stephane Eranian 已提交
5135 5136
}

5137
#ifdef CONFIG_CGROUP_DEBUG
5138 5139
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
5140 5141 5142 5143 5144 5145 5146 5147 5148
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

5149
static void debug_css_free(struct cgroup_subsys_state *css)
5150
{
5151
	kfree(css);
5152 5153
}

5154 5155
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5156
{
5157
	return cgroup_task_count(css->cgroup);
5158 5159
}

5160 5161
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5162 5163 5164 5165
{
	return (u64)(unsigned long)current->cgroups;
}

5166
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
L
Li Zefan 已提交
5167
					 struct cftype *cft)
5168 5169 5170 5171
{
	u64 count;

	rcu_read_lock();
5172
	count = atomic_read(&task_css_set(current)->refcount);
5173 5174 5175 5176
	rcu_read_unlock();
	return count;
}

5177
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
5178
{
5179
	struct cgrp_cset_link *link;
5180
	struct css_set *cset;
T
Tejun Heo 已提交
5181 5182 5183 5184 5185
	char *name_buf;

	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name_buf)
		return -ENOMEM;
5186

5187
	down_read(&css_set_rwsem);
5188
	rcu_read_lock();
5189
	cset = rcu_dereference(current->cgroups);
5190
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5191 5192
		struct cgroup *c = link->cgrp;

T
Tejun Heo 已提交
5193
		cgroup_name(c, name_buf, NAME_MAX + 1);
5194
		seq_printf(seq, "Root %d group %s\n",
T
Tejun Heo 已提交
5195
			   c->root->hierarchy_id, name_buf);
5196 5197
	}
	rcu_read_unlock();
5198
	up_read(&css_set_rwsem);
T
Tejun Heo 已提交
5199
	kfree(name_buf);
5200 5201 5202 5203
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
5204
static int cgroup_css_links_read(struct seq_file *seq, void *v)
5205
{
5206
	struct cgroup_subsys_state *css = seq_css(seq);
5207
	struct cgrp_cset_link *link;
5208

5209
	down_read(&css_set_rwsem);
5210
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5211
		struct css_set *cset = link->cset;
5212 5213
		struct task_struct *task;
		int count = 0;
T
Tejun Heo 已提交
5214

5215
		seq_printf(seq, "css_set %p\n", cset);
T
Tejun Heo 已提交
5216

5217
		list_for_each_entry(task, &cset->tasks, cg_list) {
T
Tejun Heo 已提交
5218 5219 5220 5221 5222 5223 5224 5225 5226
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
		}

		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
5227
		}
T
Tejun Heo 已提交
5228 5229 5230
		continue;
	overflow:
		seq_puts(seq, "  ...\n");
5231
	}
5232
	up_read(&css_set_rwsem);
5233 5234 5235
	return 0;
}

5236
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
5237
{
5238
	return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

5257 5258
	{
		.name = "current_css_set_cg_links",
5259
		.seq_show = current_css_set_cg_links_read,
5260 5261 5262 5263
	},

	{
		.name = "cgroup_css_links",
5264
		.seq_show = cgroup_css_links_read,
5265 5266
	},

5267 5268 5269 5270 5271
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

5272 5273
	{ }	/* terminate */
};
5274

5275
struct cgroup_subsys debug_cgrp_subsys = {
5276 5277
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,
5278
	.base_cftypes = debug_files,
5279 5280
};
#endif /* CONFIG_CGROUP_DEBUG */