cgroup.c 162.6 KB
Newer Older
1 2 3 4 5 6
/*
 *  Generic process-grouping system.
 *
 *  Based originally on the cpuset system, extracted by Paul Menage
 *  Copyright (C) 2006 Google, Inc
 *
7 8 9 10
 *  Notifications support
 *  Copyright (C) 2009 Nokia Corporation
 *  Author: Kirill A. Shutemov
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *  Copyright notices from the original cpuset code:
 *  --------------------------------------------------
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
 *  2003-10-10 Written by Simon Derr.
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson.
 *  ---------------------------------------------------
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/cgroup.h>
32
#include <linux/cred.h>
33
#include <linux/ctype.h>
34
#include <linux/errno.h>
35
#include <linux/init_task.h>
36 37
#include <linux/kernel.h>
#include <linux/list.h>
38
#include <linux/magic.h>
39 40 41 42
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
43
#include <linux/proc_fs.h>
44 45 46 47
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
48
#include <linux/percpu-rwsem.h>
49
#include <linux/string.h>
50
#include <linux/sort.h>
51
#include <linux/kmod.h>
B
Balbir Singh 已提交
52 53
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
54
#include <linux/hashtable.h>
L
Li Zefan 已提交
55
#include <linux/pid_namespace.h>
56
#include <linux/idr.h>
57
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
58
#include <linux/kthread.h>
T
Tejun Heo 已提交
59
#include <linux/delay.h>
A
Arun Sharma 已提交
60
#include <linux/atomic.h>
61
#include <linux/cpuset.h>
T
Tejun Heo 已提交
62
#include <net/sock.h>
63

64 65 66 67 68 69 70 71
/*
 * pidlists linger the following amount before being destroyed.  The goal
 * is avoiding frequent destruction in the middle of consecutive read calls
 * Expiring in the middle is a performance problem not a correctness one.
 * 1 sec should be enough.
 */
#define CGROUP_PIDLIST_DESTROY_DELAY	HZ

T
Tejun Heo 已提交
72 73 74
#define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
					 MAX_CFTYPE_NAME + 2)

T
Tejun Heo 已提交
75 76 77 78
/*
 * cgroup_mutex is the master lock.  Any modification to cgroup or its
 * hierarchy must be performed while holding it.
 *
79
 * css_set_lock protects task->cgroups pointer, the list of css_set
80
 * objects, and the chain of tasks off each css_set.
T
Tejun Heo 已提交
81
 *
82 83
 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
 * cgroup.h can use them for lockdep annotations.
T
Tejun Heo 已提交
84
 */
T
Tejun Heo 已提交
85 86
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
87
DEFINE_SPINLOCK(css_set_lock);
88
EXPORT_SYMBOL_GPL(cgroup_mutex);
89
EXPORT_SYMBOL_GPL(css_set_lock);
T
Tejun Heo 已提交
90
#else
91
static DEFINE_MUTEX(cgroup_mutex);
92
static DEFINE_SPINLOCK(css_set_lock);
T
Tejun Heo 已提交
93 94
#endif

95
/*
96 97
 * Protects cgroup_idr and css_idr so that IDs can be released without
 * grabbing cgroup_mutex.
98 99 100
 */
static DEFINE_SPINLOCK(cgroup_idr_lock);

101 102 103 104 105 106
/*
 * Protects cgroup_file->kn for !self csses.  It synchronizes notifications
 * against file removal/re-creation across css hiding.
 */
static DEFINE_SPINLOCK(cgroup_file_kn_lock);

107 108 109 110 111
/*
 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
 */
static DEFINE_SPINLOCK(release_agent_path_lock);
112

113 114
struct percpu_rw_semaphore cgroup_threadgroup_rwsem;

T
Tejun Heo 已提交
115
#define cgroup_assert_mutex_or_rcu_locked()				\
116 117
	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
			   !lockdep_is_held(&cgroup_mutex),		\
T
Tejun Heo 已提交
118
			   "cgroup_mutex or RCU read lock required");
119

120 121 122 123 124 125 126 127
/*
 * cgroup destruction makes heavy use of work items and there can be a lot
 * of concurrent destructions.  Use a separate workqueue so that cgroup
 * destruction work items don't end up filling up max_active of system_wq
 * which may lead to deadlock.
 */
static struct workqueue_struct *cgroup_destroy_wq;

128 129 130 131 132 133
/*
 * pidlist destructions need to be flushed on cgroup destruction.  Use a
 * separate workqueue as flush domain.
 */
static struct workqueue_struct *cgroup_pidlist_destroy_wq;

T
Tejun Heo 已提交
134
/* generate an array of cgroup subsystem pointers */
135
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
T
Tejun Heo 已提交
136
static struct cgroup_subsys *cgroup_subsys[] = {
137 138
#include <linux/cgroup_subsys.h>
};
139 140 141 142 143
#undef SUBSYS

/* array of cgroup subsystem names */
#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
static const char *cgroup_subsys_name[] = {
144 145
#include <linux/cgroup_subsys.h>
};
146
#undef SUBSYS
147

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
#define SUBSYS(_x)								\
	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key);			\
	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key);			\
	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key);			\
	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
#include <linux/cgroup_subsys.h>
#undef SUBSYS

#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
static struct static_key_true *cgroup_subsys_enabled_key[] = {
#include <linux/cgroup_subsys.h>
};
#undef SUBSYS

#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
#include <linux/cgroup_subsys.h>
};
#undef SUBSYS

169
/*
170
 * The default hierarchy, reserved for the subsystems that are otherwise
171 172
 * unattached - it never has more than a single cgroup, and all tasks are
 * part of that cgroup.
173
 */
T
Tejun Heo 已提交
174
struct cgroup_root cgrp_dfl_root;
T
Tejun Heo 已提交
175
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
176

T
Tejun Heo 已提交
177 178 179 180 181
/*
 * The default hierarchy always exists but is hidden until mounted for the
 * first time.  This is for backward compatibility.
 */
static bool cgrp_dfl_root_visible;
182

183 184 185
/* Controllers blocked by the commandline in v1 */
static unsigned long cgroup_no_v1_mask;

186
/* some controllers are not supported in the default hierarchy */
187
static unsigned long cgrp_dfl_root_inhibit_ss_mask;
188

189 190
/* The list of hierarchy roots */

191 192
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
193

T
Tejun Heo 已提交
194
/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
195
static DEFINE_IDR(cgroup_hierarchy_idr);
196

197
/*
198 199 200 201 202
 * Assign a monotonically increasing serial number to csses.  It guarantees
 * cgroups with bigger numbers are newer than those with smaller numbers.
 * Also, as csses are always appended to the parent's ->children list, it
 * guarantees that sibling csses are always sorted in the ascending serial
 * number order on the list.  Protected by cgroup_mutex.
203
 */
204
static u64 css_serial_nr_next = 1;
205

206 207 208 209
/*
 * These bitmask flags indicate whether tasks in the fork and exit paths have
 * fork/exit handlers to call. This avoids us having to do extra work in the
 * fork/exit path to check which subsystems have fork/exit callbacks.
210
 */
211 212
static unsigned long have_fork_callback __read_mostly;
static unsigned long have_exit_callback __read_mostly;
213
static unsigned long have_free_callback __read_mostly;
214

215 216 217
/* Ditto for the can_fork callback. */
static unsigned long have_canfork_callback __read_mostly;

218
static struct file_system_type cgroup2_fs_type;
219 220
static struct cftype cgroup_dfl_base_files[];
static struct cftype cgroup_legacy_base_files[];
221

222
static int rebind_subsystems(struct cgroup_root *dst_root,
223
			     unsigned long ss_mask);
224
static void css_task_iter_advance(struct css_task_iter *it);
225
static int cgroup_destroy_locked(struct cgroup *cgrp);
226 227
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible);
228
static void css_release(struct percpu_ref *ref);
229
static void kill_css(struct cgroup_subsys_state *css);
230 231
static int cgroup_addrm_files(struct cgroup_subsys_state *css,
			      struct cgroup *cgrp, struct cftype cfts[],
232
			      bool is_add);
233

234 235 236 237 238 239 240 241 242 243 244 245 246
/**
 * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
 * @ssid: subsys ID of interest
 *
 * cgroup_subsys_enabled() can only be used with literal subsys names which
 * is fine for individual subsystems but unsuitable for cgroup core.  This
 * is slower static_key_enabled() based test indexed by @ssid.
 */
static bool cgroup_ssid_enabled(int ssid)
{
	return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
}

247 248 249 250 251
static bool cgroup_ssid_no_v1(int ssid)
{
	return cgroup_no_v1_mask & (1 << ssid);
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/**
 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
 * @cgrp: the cgroup of interest
 *
 * The default hierarchy is the v2 interface of cgroup and this function
 * can be used to test whether a cgroup is on the default hierarchy for
 * cases where a subsystem should behave differnetly depending on the
 * interface version.
 *
 * The set of behaviors which change on the default hierarchy are still
 * being determined and the mount option is prefixed with __DEVEL__.
 *
 * List of changed behaviors:
 *
 * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
 *   and "name" are disallowed.
 *
 * - When mounting an existing superblock, mount options should match.
 *
 * - Remount is disallowed.
 *
 * - rename(2) is disallowed.
 *
 * - "tasks" is removed.  Everything should be at process granularity.  Use
 *   "cgroup.procs" instead.
 *
 * - "cgroup.procs" is not sorted.  pids will be unique unless they got
 *   recycled inbetween reads.
 *
 * - "release_agent" and "notify_on_release" are removed.  Replacement
 *   notification mechanism will be implemented.
 *
 * - "cgroup.clone_children" is removed.
 *
 * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
 *   and its descendants contain no task; otherwise, 1.  The file also
 *   generates kernfs notification which can be monitored through poll and
 *   [di]notify when the value of the file changes.
 *
 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
 *   take masks of ancestors with non-empty cpus/mems, instead of being
 *   moved to an ancestor.
 *
 * - cpuset: a task can be moved into an empty cpuset, and again it takes
 *   masks of ancestors.
 *
 * - memcg: use_hierarchy is on by default and the cgroup file for the flag
 *   is not created.
 *
 * - blkcg: blk-throttle becomes properly hierarchical.
 *
 * - debug: disallowed on the default hierarchy.
 */
static bool cgroup_on_dfl(const struct cgroup *cgrp)
{
	return cgrp->root == &cgrp_dfl_root;
}

310 311 312 313 314 315 316
/* IDR wrappers which synchronize using cgroup_idr_lock */
static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
			    gfp_t gfp_mask)
{
	int ret;

	idr_preload(gfp_mask);
T
Tejun Heo 已提交
317
	spin_lock_bh(&cgroup_idr_lock);
318
	ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
T
Tejun Heo 已提交
319
	spin_unlock_bh(&cgroup_idr_lock);
320 321 322 323 324 325 326 327
	idr_preload_end();
	return ret;
}

static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
{
	void *ret;

T
Tejun Heo 已提交
328
	spin_lock_bh(&cgroup_idr_lock);
329
	ret = idr_replace(idr, ptr, id);
T
Tejun Heo 已提交
330
	spin_unlock_bh(&cgroup_idr_lock);
331 332 333 334 335
	return ret;
}

static void cgroup_idr_remove(struct idr *idr, int id)
{
T
Tejun Heo 已提交
336
	spin_lock_bh(&cgroup_idr_lock);
337
	idr_remove(idr, id);
T
Tejun Heo 已提交
338
	spin_unlock_bh(&cgroup_idr_lock);
339 340
}

T
Tejun Heo 已提交
341 342 343 344 345 346 347 348 349
static struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
	struct cgroup_subsys_state *parent_css = cgrp->self.parent;

	if (parent_css)
		return container_of(parent_css, struct cgroup, self);
	return NULL;
}

T
Tejun Heo 已提交
350 351 352
/**
 * cgroup_css - obtain a cgroup's css for the specified subsystem
 * @cgrp: the cgroup of interest
353
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
T
Tejun Heo 已提交
354
 *
355 356 357 358 359
 * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
 * function must be called either under cgroup_mutex or rcu_read_lock() and
 * the caller is responsible for pinning the returned css if it wants to
 * keep accessing it outside the said locks.  This function may return
 * %NULL if @cgrp doesn't have @subsys_id enabled.
T
Tejun Heo 已提交
360 361
 */
static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
362
					      struct cgroup_subsys *ss)
T
Tejun Heo 已提交
363
{
364
	if (ss)
365
		return rcu_dereference_check(cgrp->subsys[ss->id],
T
Tejun Heo 已提交
366
					lockdep_is_held(&cgroup_mutex));
367
	else
368
		return &cgrp->self;
T
Tejun Heo 已提交
369
}
370

371 372 373
/**
 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
374
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
375
 *
C
Chen Hanxiao 已提交
376
 * Similar to cgroup_css() but returns the effective css, which is defined
377 378 379 380 381 382 383 384 385 386
 * as the matching css of the nearest ancestor including self which has @ss
 * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
 * function is guaranteed to return non-NULL css.
 */
static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
						struct cgroup_subsys *ss)
{
	lockdep_assert_held(&cgroup_mutex);

	if (!ss)
387
		return &cgrp->self;
388 389 390 391

	if (!(cgrp->root->subsys_mask & (1 << ss->id)))
		return NULL;

T
Tejun Heo 已提交
392 393
	/*
	 * This function is used while updating css associations and thus
394
	 * can't test the csses directly.  Use ->subtree_ss_mask.
T
Tejun Heo 已提交
395
	 */
T
Tejun Heo 已提交
396
	while (cgroup_parent(cgrp) &&
397
	       !(cgroup_parent(cgrp)->subtree_ss_mask & (1 << ss->id)))
T
Tejun Heo 已提交
398
		cgrp = cgroup_parent(cgrp);
399 400

	return cgroup_css(cgrp, ss);
T
Tejun Heo 已提交
401
}
402

T
Tejun Heo 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/**
 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
 * @ss: the subsystem of interest
 *
 * Find and get the effective css of @cgrp for @ss.  The effective css is
 * defined as the matching css of the nearest ancestor including self which
 * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
 * the root css is returned, so this function always returns a valid css.
 * The returned css must be put using css_put().
 */
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
					     struct cgroup_subsys *ss)
{
	struct cgroup_subsys_state *css;

	rcu_read_lock();

	do {
		css = cgroup_css(cgrp, ss);

		if (css && css_tryget_online(css))
			goto out_unlock;
		cgrp = cgroup_parent(cgrp);
	} while (cgrp);

	css = init_css_set.subsys[ss->id];
	css_get(css);
out_unlock:
	rcu_read_unlock();
	return css;
}

436
/* convenient tests for these bits */
437
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
438
{
439
	return !(cgrp->self.flags & CSS_ONLINE);
440 441
}

T
Tejun Heo 已提交
442 443 444 445 446 447 448 449 450 451 452
static void cgroup_get(struct cgroup *cgrp)
{
	WARN_ON_ONCE(cgroup_is_dead(cgrp));
	css_get(&cgrp->self);
}

static bool cgroup_tryget(struct cgroup *cgrp)
{
	return css_tryget(&cgrp->self);
}

T
Tejun Heo 已提交
453
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
454
{
T
Tejun Heo 已提交
455
	struct cgroup *cgrp = of->kn->parent->priv;
T
Tejun Heo 已提交
456
	struct cftype *cft = of_cft(of);
T
Tejun Heo 已提交
457 458 459 460 461 462 463 464 465 466 467 468

	/*
	 * This is open and unprotected implementation of cgroup_css().
	 * seq_css() is only called from a kernfs file operation which has
	 * an active reference on the file.  Because all the subsystem
	 * files are drained before a css is disassociated with a cgroup,
	 * the matching css from the cgroup's subsys table is guaranteed to
	 * be and stay valid until the enclosing operation is complete.
	 */
	if (cft->ss)
		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
	else
469
		return &cgrp->self;
470
}
T
Tejun Heo 已提交
471
EXPORT_SYMBOL_GPL(of_css);
472

473
static int notify_on_release(const struct cgroup *cgrp)
474
{
475
	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
476 477
}

T
Tejun Heo 已提交
478 479 480 481 482 483
/**
 * for_each_css - iterate all css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
484
 * Should be called under cgroup_[tree_]mutex.
T
Tejun Heo 已提交
485 486 487 488 489 490 491 492
 */
#define for_each_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = rcu_dereference_check(			\
				(cgrp)->subsys[(ssid)],			\
				lockdep_is_held(&cgroup_mutex)))) { }	\
		else

493 494 495 496 497 498 499 500 501 502 503 504 505 506
/**
 * for_each_e_css - iterate all effective css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
 * Should be called under cgroup_[tree_]mutex.
 */
#define for_each_e_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
			;						\
		else

507
/**
T
Tejun Heo 已提交
508
 * for_each_subsys - iterate all enabled cgroup subsystems
509
 * @ss: the iteration cursor
510
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
511
 */
512
#define for_each_subsys(ss, ssid)					\
T
Tejun Heo 已提交
513 514
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
515

516 517 518 519 520 521 522 523 524 525 526
/**
 * for_each_subsys_which - filter for_each_subsys with a bitmask
 * @ss: the iteration cursor
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
 * @ss_maskp: a pointer to the bitmask
 *
 * The block will only run for cases where the ssid-th bit (1 << ssid) of
 * mask is set to 1.
 */
#define for_each_subsys_which(ss, ssid, ss_maskp)			\
	if (!CGROUP_SUBSYS_COUNT) /* to avoid spurious gcc warning */	\
527
		(ssid) = 0;						\
528 529 530 531 532 533
	else								\
		for_each_set_bit(ssid, ss_maskp, CGROUP_SUBSYS_COUNT)	\
			if (((ss) = cgroup_subsys[ssid]) && false)	\
				break;					\
			else

534 535
/* iterate across the hierarchies */
#define for_each_root(root)						\
536
	list_for_each_entry((root), &cgroup_roots, root_list)
537

538 539
/* iterate over child cgrps, lock should be held throughout iteration */
#define cgroup_for_each_live_child(child, cgrp)				\
540
	list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
T
Tejun Heo 已提交
541
		if (({ lockdep_assert_held(&cgroup_mutex);		\
542 543 544
		       cgroup_is_dead(child); }))			\
			;						\
		else
545

546
static void cgroup_release_agent(struct work_struct *work);
547
static void check_for_release(struct cgroup *cgrp);
548

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
/*
 * A cgroup can be associated with multiple css_sets as different tasks may
 * belong to different cgroups on different hierarchies.  In the other
 * direction, a css_set is naturally associated with multiple cgroups.
 * This M:N relationship is represented by the following link structure
 * which exists for each association and allows traversing the associations
 * from both sides.
 */
struct cgrp_cset_link {
	/* the cgroup and css_set this link associates */
	struct cgroup		*cgrp;
	struct css_set		*cset;

	/* list of cgrp_cset_links anchored at cgrp->cset_links */
	struct list_head	cset_link;

	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
	struct list_head	cgrp_link;
567 568
};

569 570
/*
 * The default css_set - used by init and its children prior to any
571 572 573 574 575
 * hierarchies being mounted. It contains a pointer to the root state
 * for each subsystem. Also used to anchor the list of css_sets. Not
 * reference-counted, to improve performance when child cgroups
 * haven't been created.
 */
T
Tejun Heo 已提交
576
struct css_set init_css_set = {
577 578 579 580 581 582
	.refcount		= ATOMIC_INIT(1),
	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
583
	.task_iters		= LIST_HEAD_INIT(init_css_set.task_iters),
584
};
585

586
static int css_set_count	= 1;	/* 1 for init_css_set */
587

588 589 590 591 592 593
/**
 * css_set_populated - does a css_set contain any tasks?
 * @cset: target css_set
 */
static bool css_set_populated(struct css_set *cset)
{
594
	lockdep_assert_held(&css_set_lock);
595 596 597 598

	return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
}

599 600 601 602 603
/**
 * cgroup_update_populated - updated populated count of a cgroup
 * @cgrp: the target cgroup
 * @populated: inc or dec populated count
 *
604 605 606 607
 * One of the css_sets associated with @cgrp is either getting its first
 * task or losing the last.  Update @cgrp->populated_cnt accordingly.  The
 * count is propagated towards root so that a given cgroup's populated_cnt
 * is zero iff the cgroup and all its descendants don't contain any tasks.
608 609 610 611 612 613 614 615 616
 *
 * @cgrp's interface file "cgroup.populated" is zero if
 * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
 * changes from or to zero, userland is notified that the content of the
 * interface file has changed.  This can be used to detect when @cgrp and
 * its descendants become populated or empty.
 */
static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
{
617
	lockdep_assert_held(&css_set_lock);
618 619 620 621 622 623 624 625 626 627 628 629

	do {
		bool trigger;

		if (populated)
			trigger = !cgrp->populated_cnt++;
		else
			trigger = !--cgrp->populated_cnt;

		if (!trigger)
			break;

630
		check_for_release(cgrp);
631 632
		cgroup_file_notify(&cgrp->events_file);

T
Tejun Heo 已提交
633
		cgrp = cgroup_parent(cgrp);
634 635 636
	} while (cgrp);
}

637 638 639 640 641 642 643 644 645 646 647 648
/**
 * css_set_update_populated - update populated state of a css_set
 * @cset: target css_set
 * @populated: whether @cset is populated or depopulated
 *
 * @cset is either getting the first task or losing the last.  Update the
 * ->populated_cnt of all associated cgroups accordingly.
 */
static void css_set_update_populated(struct css_set *cset, bool populated)
{
	struct cgrp_cset_link *link;

649
	lockdep_assert_held(&css_set_lock);
650 651 652 653 654

	list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
		cgroup_update_populated(link->cgrp, populated);
}

T
Tejun Heo 已提交
655 656 657 658 659 660 661 662 663 664 665
/**
 * css_set_move_task - move a task from one css_set to another
 * @task: task being moved
 * @from_cset: css_set @task currently belongs to (may be NULL)
 * @to_cset: new css_set @task is being moved to (may be NULL)
 * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
 *
 * Move @task from @from_cset to @to_cset.  If @task didn't belong to any
 * css_set, @from_cset can be NULL.  If @task is being disassociated
 * instead of moved, @to_cset can be NULL.
 *
666 667 668
 * This function automatically handles populated_cnt updates and
 * css_task_iter adjustments but the caller is responsible for managing
 * @from_cset and @to_cset's reference counts.
T
Tejun Heo 已提交
669 670 671 672 673
 */
static void css_set_move_task(struct task_struct *task,
			      struct css_set *from_cset, struct css_set *to_cset,
			      bool use_mg_tasks)
{
674
	lockdep_assert_held(&css_set_lock);
T
Tejun Heo 已提交
675 676

	if (from_cset) {
677 678
		struct css_task_iter *it, *pos;

T
Tejun Heo 已提交
679
		WARN_ON_ONCE(list_empty(&task->cg_list));
680 681 682 683 684 685 686 687 688 689 690 691 692

		/*
		 * @task is leaving, advance task iterators which are
		 * pointing to it so that they can resume at the next
		 * position.  Advancing an iterator might remove it from
		 * the list, use safe walk.  See css_task_iter_advance*()
		 * for details.
		 */
		list_for_each_entry_safe(it, pos, &from_cset->task_iters,
					 iters_node)
			if (it->task_pos == &task->cg_list)
				css_task_iter_advance(it);

T
Tejun Heo 已提交
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
		list_del_init(&task->cg_list);
		if (!css_set_populated(from_cset))
			css_set_update_populated(from_cset, false);
	} else {
		WARN_ON_ONCE(!list_empty(&task->cg_list));
	}

	if (to_cset) {
		/*
		 * We are synchronized through cgroup_threadgroup_rwsem
		 * against PF_EXITING setting such that we can't race
		 * against cgroup_exit() changing the css_set to
		 * init_css_set and dropping the old one.
		 */
		WARN_ON_ONCE(task->flags & PF_EXITING);

		if (!css_set_populated(to_cset))
			css_set_update_populated(to_cset, true);
		rcu_assign_pointer(task->cgroups, to_cset);
		list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
							     &to_cset->tasks);
	}
}

717 718 719 720 721
/*
 * hash table for cgroup groups. This improves the performance to find
 * an existing css_set. This hash doesn't (currently) take into
 * account cgroups in empty hierarchies.
 */
722
#define CSS_SET_HASH_BITS	7
723
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
724

725
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
726
{
727
	unsigned long key = 0UL;
728 729
	struct cgroup_subsys *ss;
	int i;
730

731
	for_each_subsys(ss, i)
732 733
		key += (unsigned long)css[i];
	key = (key >> 16) ^ key;
734

735
	return key;
736 737
}

Z
Zefan Li 已提交
738
static void put_css_set_locked(struct css_set *cset)
739
{
740
	struct cgrp_cset_link *link, *tmp_link;
T
Tejun Heo 已提交
741 742
	struct cgroup_subsys *ss;
	int ssid;
743

744
	lockdep_assert_held(&css_set_lock);
745 746

	if (!atomic_dec_and_test(&cset->refcount))
747
		return;
748

749 750
	/* This css_set is dead. unlink it and release cgroup and css refs */
	for_each_subsys(ss, ssid) {
T
Tejun Heo 已提交
751
		list_del(&cset->e_cset_node[ssid]);
752 753
		css_put(cset->subsys[ssid]);
	}
754
	hash_del(&cset->hlist);
755 756
	css_set_count--;

757 758 759
	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
760 761
		if (cgroup_parent(link->cgrp))
			cgroup_put(link->cgrp);
762
		kfree(link);
763
	}
764

765
	kfree_rcu(cset, rcu_head);
766 767
}

Z
Zefan Li 已提交
768
static void put_css_set(struct css_set *cset)
769 770 771 772 773 774 775 776 777
{
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * rwlock
	 */
	if (atomic_add_unless(&cset->refcount, -1, 1))
		return;

778
	spin_lock_bh(&css_set_lock);
Z
Zefan Li 已提交
779
	put_css_set_locked(cset);
780
	spin_unlock_bh(&css_set_lock);
781 782
}

783 784 785
/*
 * refcounted get/put for css_set objects
 */
786
static inline void get_css_set(struct css_set *cset)
787
{
788
	atomic_inc(&cset->refcount);
789 790
}

791
/**
792
 * compare_css_sets - helper function for find_existing_css_set().
793 794
 * @cset: candidate css_set being tested
 * @old_cset: existing css_set for a task
795 796 797
 * @new_cgrp: cgroup that's being entered by the task
 * @template: desired set of css pointers in css_set (pre-calculated)
 *
L
Li Zefan 已提交
798
 * Returns true if "cset" matches "old_cset" except for the hierarchy
799 800
 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
 */
801 802
static bool compare_css_sets(struct css_set *cset,
			     struct css_set *old_cset,
803 804 805 806 807
			     struct cgroup *new_cgrp,
			     struct cgroup_subsys_state *template[])
{
	struct list_head *l1, *l2;

808 809 810 811 812 813
	/*
	 * On the default hierarchy, there can be csets which are
	 * associated with the same set of cgroups but different csses.
	 * Let's first ensure that csses match.
	 */
	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
814 815 816 817
		return false;

	/*
	 * Compare cgroup pointers in order to distinguish between
818 819 820
	 * different cgroups in hierarchies.  As different cgroups may
	 * share the same effective css, this comparison is always
	 * necessary.
821
	 */
822 823
	l1 = &cset->cgrp_links;
	l2 = &old_cset->cgrp_links;
824
	while (1) {
825
		struct cgrp_cset_link *link1, *link2;
826
		struct cgroup *cgrp1, *cgrp2;
827 828 829 830

		l1 = l1->next;
		l2 = l2->next;
		/* See if we reached the end - both lists are equal length. */
831 832
		if (l1 == &cset->cgrp_links) {
			BUG_ON(l2 != &old_cset->cgrp_links);
833 834
			break;
		} else {
835
			BUG_ON(l2 == &old_cset->cgrp_links);
836 837
		}
		/* Locate the cgroups associated with these links. */
838 839 840 841
		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
		cgrp1 = link1->cgrp;
		cgrp2 = link2->cgrp;
842
		/* Hierarchies should be linked in the same order. */
843
		BUG_ON(cgrp1->root != cgrp2->root);
844 845 846 847 848 849 850 851

		/*
		 * If this hierarchy is the hierarchy of the cgroup
		 * that's changing, then we need to check that this
		 * css_set points to the new cgroup; if it's any other
		 * hierarchy, then this css_set should point to the
		 * same cgroup as the old css_set.
		 */
852 853
		if (cgrp1->root == new_cgrp->root) {
			if (cgrp1 != new_cgrp)
854 855
				return false;
		} else {
856
			if (cgrp1 != cgrp2)
857 858 859 860 861 862
				return false;
		}
	}
	return true;
}

863 864 865 866 867
/**
 * find_existing_css_set - init css array and find the matching css_set
 * @old_cset: the css_set that we're using before the cgroup transition
 * @cgrp: the cgroup that we're moving into
 * @template: out param for the new set of csses, should be clear on entry
868
 */
869 870 871
static struct css_set *find_existing_css_set(struct css_set *old_cset,
					struct cgroup *cgrp,
					struct cgroup_subsys_state *template[])
872
{
873
	struct cgroup_root *root = cgrp->root;
874
	struct cgroup_subsys *ss;
875
	struct css_set *cset;
876
	unsigned long key;
877
	int i;
878

B
Ben Blum 已提交
879 880 881 882 883
	/*
	 * Build the set of subsystem state objects that we want to see in the
	 * new css_set. while subsystems can change globally, the entries here
	 * won't change, so no need for locking.
	 */
884
	for_each_subsys(ss, i) {
885
		if (root->subsys_mask & (1UL << i)) {
886 887 888 889 890
			/*
			 * @ss is in this hierarchy, so we want the
			 * effective css from @cgrp.
			 */
			template[i] = cgroup_e_css(cgrp, ss);
891
		} else {
892 893 894 895
			/*
			 * @ss is not in this hierarchy, so we don't want
			 * to change the css.
			 */
896
			template[i] = old_cset->subsys[i];
897 898 899
		}
	}

900
	key = css_set_hash(template);
901 902
	hash_for_each_possible(css_set_table, cset, hlist, key) {
		if (!compare_css_sets(cset, old_cset, cgrp, template))
903 904 905
			continue;

		/* This css_set matches what we need */
906
		return cset;
907
	}
908 909 910 911 912

	/* No existing cgroup group matched */
	return NULL;
}

913
static void free_cgrp_cset_links(struct list_head *links_to_free)
914
{
915
	struct cgrp_cset_link *link, *tmp_link;
916

917 918
	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
		list_del(&link->cset_link);
919 920 921 922
		kfree(link);
	}
}

923 924 925 926 927 928 929
/**
 * allocate_cgrp_cset_links - allocate cgrp_cset_links
 * @count: the number of links to allocate
 * @tmp_links: list_head the allocated links are put on
 *
 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
 * through ->cset_link.  Returns 0 on success or -errno.
930
 */
931
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
932
{
933
	struct cgrp_cset_link *link;
934
	int i;
935 936 937

	INIT_LIST_HEAD(tmp_links);

938
	for (i = 0; i < count; i++) {
939
		link = kzalloc(sizeof(*link), GFP_KERNEL);
940
		if (!link) {
941
			free_cgrp_cset_links(tmp_links);
942 943
			return -ENOMEM;
		}
944
		list_add(&link->cset_link, tmp_links);
945 946 947 948
	}
	return 0;
}

949 950
/**
 * link_css_set - a helper function to link a css_set to a cgroup
951
 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
952
 * @cset: the css_set to be linked
953 954
 * @cgrp: the destination cgroup
 */
955 956
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
			 struct cgroup *cgrp)
957
{
958
	struct cgrp_cset_link *link;
959

960
	BUG_ON(list_empty(tmp_links));
T
Tejun Heo 已提交
961 962 963 964

	if (cgroup_on_dfl(cgrp))
		cset->dfl_cgrp = cgrp;

965 966
	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
	link->cset = cset;
967
	link->cgrp = cgrp;
968

969
	/*
970 971
	 * Always add links to the tail of the lists so that the lists are
	 * in choronological order.
972
	 */
973
	list_move_tail(&link->cset_link, &cgrp->cset_links);
974
	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
975 976 977

	if (cgroup_parent(cgrp))
		cgroup_get(cgrp);
978 979
}

980 981 982 983 984 985 986
/**
 * find_css_set - return a new css_set with one cgroup updated
 * @old_cset: the baseline css_set
 * @cgrp: the cgroup to be updated
 *
 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
 * substituted into the appropriate hierarchy.
987
 */
988 989
static struct css_set *find_css_set(struct css_set *old_cset,
				    struct cgroup *cgrp)
990
{
991
	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
992
	struct css_set *cset;
993 994
	struct list_head tmp_links;
	struct cgrp_cset_link *link;
T
Tejun Heo 已提交
995
	struct cgroup_subsys *ss;
996
	unsigned long key;
T
Tejun Heo 已提交
997
	int ssid;
998

999 1000
	lockdep_assert_held(&cgroup_mutex);

1001 1002
	/* First see if we already have a cgroup group that matches
	 * the desired set */
1003
	spin_lock_bh(&css_set_lock);
1004 1005 1006
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
		get_css_set(cset);
1007
	spin_unlock_bh(&css_set_lock);
1008

1009 1010
	if (cset)
		return cset;
1011

1012
	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
1013
	if (!cset)
1014 1015
		return NULL;

1016
	/* Allocate all the cgrp_cset_link objects that we'll need */
1017
	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
1018
		kfree(cset);
1019 1020 1021
		return NULL;
	}

1022
	atomic_set(&cset->refcount, 1);
1023
	INIT_LIST_HEAD(&cset->cgrp_links);
1024
	INIT_LIST_HEAD(&cset->tasks);
T
Tejun Heo 已提交
1025
	INIT_LIST_HEAD(&cset->mg_tasks);
1026
	INIT_LIST_HEAD(&cset->mg_preload_node);
1027
	INIT_LIST_HEAD(&cset->mg_node);
1028
	INIT_LIST_HEAD(&cset->task_iters);
1029
	INIT_HLIST_NODE(&cset->hlist);
1030 1031 1032

	/* Copy the set of subsystem state objects generated in
	 * find_existing_css_set() */
1033
	memcpy(cset->subsys, template, sizeof(cset->subsys));
1034

1035
	spin_lock_bh(&css_set_lock);
1036
	/* Add reference counts and links from the new css_set. */
1037
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
1038
		struct cgroup *c = link->cgrp;
1039

1040 1041
		if (c->root == cgrp->root)
			c = cgrp;
1042
		link_css_set(&tmp_links, cset, c);
1043
	}
1044

1045
	BUG_ON(!list_empty(&tmp_links));
1046 1047

	css_set_count++;
1048

T
Tejun Heo 已提交
1049
	/* Add @cset to the hash table */
1050 1051
	key = css_set_hash(cset->subsys);
	hash_add(css_set_table, &cset->hlist, key);
1052

1053 1054 1055
	for_each_subsys(ss, ssid) {
		struct cgroup_subsys_state *css = cset->subsys[ssid];

T
Tejun Heo 已提交
1056
		list_add_tail(&cset->e_cset_node[ssid],
1057 1058 1059
			      &css->cgroup->e_csets[ssid]);
		css_get(css);
	}
T
Tejun Heo 已提交
1060

1061
	spin_unlock_bh(&css_set_lock);
1062

1063
	return cset;
1064 1065
}

1066
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
1067
{
1068
	struct cgroup *root_cgrp = kf_root->kn->priv;
T
Tejun Heo 已提交
1069

1070
	return root_cgrp->root;
T
Tejun Heo 已提交
1071 1072
}

1073
static int cgroup_init_root_id(struct cgroup_root *root)
1074 1075 1076 1077 1078
{
	int id;

	lockdep_assert_held(&cgroup_mutex);

1079
	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
1080 1081 1082 1083 1084 1085 1086
	if (id < 0)
		return id;

	root->hierarchy_id = id;
	return 0;
}

1087
static void cgroup_exit_root_id(struct cgroup_root *root)
1088 1089 1090 1091 1092 1093 1094 1095 1096
{
	lockdep_assert_held(&cgroup_mutex);

	if (root->hierarchy_id) {
		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
		root->hierarchy_id = 0;
	}
}

1097
static void cgroup_free_root(struct cgroup_root *root)
1098 1099
{
	if (root) {
C
Chen Hanxiao 已提交
1100
		/* hierarchy ID should already have been released */
1101 1102 1103 1104 1105 1106 1107
		WARN_ON_ONCE(root->hierarchy_id);

		idr_destroy(&root->cgroup_idr);
		kfree(root);
	}
}

1108
static void cgroup_destroy_root(struct cgroup_root *root)
1109
{
1110
	struct cgroup *cgrp = &root->cgrp;
1111 1112
	struct cgrp_cset_link *link, *tmp_link;

T
Tejun Heo 已提交
1113
	mutex_lock(&cgroup_mutex);
1114

T
Tejun Heo 已提交
1115
	BUG_ON(atomic_read(&root->nr_cgrps));
1116
	BUG_ON(!list_empty(&cgrp->self.children));
1117 1118

	/* Rebind all subsystems back to the default hierarchy */
1119
	rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
1120 1121

	/*
1122 1123
	 * Release all the links from cset_links to this hierarchy's
	 * root cgroup
1124
	 */
1125
	spin_lock_bh(&css_set_lock);
1126 1127 1128 1129 1130 1131

	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
		kfree(link);
	}
1132 1133

	spin_unlock_bh(&css_set_lock);
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143

	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
		cgroup_root_count--;
	}

	cgroup_exit_root_id(root);

	mutex_unlock(&cgroup_mutex);

T
Tejun Heo 已提交
1144
	kernfs_destroy_root(root->kf_root);
1145 1146 1147
	cgroup_free_root(root);
}

1148 1149
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
1150
					    struct cgroup_root *root)
1151 1152 1153
{
	struct cgroup *res = NULL;

1154
	lockdep_assert_held(&cgroup_mutex);
1155
	lockdep_assert_held(&css_set_lock);
1156

1157
	if (cset == &init_css_set) {
1158
		res = &root->cgrp;
1159
	} else {
1160 1161 1162
		struct cgrp_cset_link *link;

		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1163
			struct cgroup *c = link->cgrp;
1164

1165 1166 1167 1168 1169 1170
			if (c->root == root) {
				res = c;
				break;
			}
		}
	}
1171

1172 1173 1174 1175
	BUG_ON(!res);
	return res;
}

1176
/*
1177
 * Return the cgroup for "task" from the given hierarchy. Must be
1178
 * called with cgroup_mutex and css_set_lock held.
1179 1180
 */
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
1181
					    struct cgroup_root *root)
1182 1183 1184 1185 1186 1187 1188 1189 1190
{
	/*
	 * No need to lock the task - since we hold cgroup_mutex the
	 * task can't change groups, so the only thing that can happen
	 * is that it exits and its css is set back to init_css_set.
	 */
	return cset_cgroup_from_root(task_css_set(task), root);
}

1191 1192 1193 1194 1195 1196
/*
 * A task must hold cgroup_mutex to modify cgroups.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding cgroup_mutex can't rely on the count
 * field not changing.  However, if the count goes to zero, then only
1197
 * cgroup_attach_task() can increment it again.  Because a count of zero
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
 * means that no tasks are currently attached, therefore there is no
 * way a task attached to that cgroup can fork (the other way to
 * increment the count).  So code holding cgroup_mutex can safely
 * assume that if the count is zero, it will stay zero. Similarly, if
 * a task holds cgroup_mutex on a cgroup with zero count, it
 * knows that the cgroup won't be removed, as cgroup_rmdir()
 * needs that mutex.
 *
 * A cgroup can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cgroups is empty.  Since all
 * tasks in the system use _some_ cgroup, and since there is always at
1209
 * least one task in the system (init, pid == 1), therefore, root cgroup
1210
 * always has either children cgroups and/or using tasks.  So we don't
1211
 * need a special hack to ensure that root cgroup cannot be deleted.
1212 1213
 *
 * P.S.  One more locking exception.  RCU is used to guard the
1214
 * update of a tasks cgroup pointer by cgroup_attach_task()
1215 1216
 */

T
Tejun Heo 已提交
1217
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1218
static const struct file_operations proc_cgroupstats_operations;
1219

T
Tejun Heo 已提交
1220 1221
static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
			      char *buf)
1222
{
1223 1224
	struct cgroup_subsys *ss = cft->ss;

T
Tejun Heo 已提交
1225 1226 1227
	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
1228 1229
			 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
			 cft->name);
T
Tejun Heo 已提交
1230 1231 1232
	else
		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
	return buf;
1233 1234
}

1235 1236 1237 1238
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
1239
 * S_IRUGO for read, S_IWUSR for write.
1240 1241
 */
static umode_t cgroup_file_mode(const struct cftype *cft)
1242
{
1243
	umode_t mode = 0;
1244

1245 1246 1247
	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
		mode |= S_IRUGO;

1248 1249 1250 1251 1252 1253
	if (cft->write_u64 || cft->write_s64 || cft->write) {
		if (cft->flags & CFTYPE_WORLD_WRITABLE)
			mode |= S_IWUGO;
		else
			mode |= S_IWUSR;
	}
1254 1255

	return mode;
1256 1257
}

1258
/**
1259
 * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
1260
 * @cgrp: the target cgroup
1261
 * @subtree_control: the new subtree_control mask to consider
1262 1263 1264 1265 1266
 *
 * On the default hierarchy, a subsystem may request other subsystems to be
 * enabled together through its ->depends_on mask.  In such cases, more
 * subsystems than specified in "cgroup.subtree_control" may be enabled.
 *
1267 1268 1269
 * This function calculates which subsystems need to be enabled if
 * @subtree_control is to be applied to @cgrp.  The returned mask is always
 * a superset of @subtree_control and follows the usual hierarchy rules.
1270
 */
1271 1272
static unsigned long cgroup_calc_subtree_ss_mask(struct cgroup *cgrp,
						 unsigned long subtree_control)
1273
{
1274
	struct cgroup *parent = cgroup_parent(cgrp);
1275
	unsigned long cur_ss_mask = subtree_control;
1276 1277 1278 1279 1280
	struct cgroup_subsys *ss;
	int ssid;

	lockdep_assert_held(&cgroup_mutex);

1281 1282
	if (!cgroup_on_dfl(cgrp))
		return cur_ss_mask;
1283 1284

	while (true) {
1285
		unsigned long new_ss_mask = cur_ss_mask;
1286

1287 1288
		for_each_subsys_which(ss, ssid, &cur_ss_mask)
			new_ss_mask |= ss->depends_on;
1289 1290 1291 1292 1293 1294 1295

		/*
		 * Mask out subsystems which aren't available.  This can
		 * happen only if some depended-upon subsystems were bound
		 * to non-default hierarchies.
		 */
		if (parent)
1296
			new_ss_mask &= parent->subtree_ss_mask;
1297 1298 1299 1300 1301 1302 1303 1304
		else
			new_ss_mask &= cgrp->root->subsys_mask;

		if (new_ss_mask == cur_ss_mask)
			break;
		cur_ss_mask = new_ss_mask;
	}

1305 1306 1307 1308
	return cur_ss_mask;
}

/**
1309
 * cgroup_refresh_subtree_ss_mask - update subtree_ss_mask
1310 1311
 * @cgrp: the target cgroup
 *
1312 1313
 * Update @cgrp->subtree_ss_mask according to the current
 * @cgrp->subtree_control using cgroup_calc_subtree_ss_mask().
1314
 */
1315
static void cgroup_refresh_subtree_ss_mask(struct cgroup *cgrp)
1316
{
1317 1318
	cgrp->subtree_ss_mask =
		cgroup_calc_subtree_ss_mask(cgrp, cgrp->subtree_control);
1319 1320
}

1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
/**
 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper undoes cgroup_kn_lock_live() and should be invoked before
 * the method finishes if locking succeeded.  Note that once this function
 * returns the cgroup returned by cgroup_kn_lock_live() may become
 * inaccessible any time.  If the caller intends to continue to access the
 * cgroup, it should pin it before invoking this function.
 */
static void cgroup_kn_unlock(struct kernfs_node *kn)
1332
{
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;

	mutex_unlock(&cgroup_mutex);

	kernfs_unbreak_active_protection(kn);
	cgroup_put(cgrp);
1344 1345
}

1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
/**
 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper is to be used by a cgroup kernfs method currently servicing
 * @kn.  It breaks the active protection, performs cgroup locking and
 * verifies that the associated cgroup is alive.  Returns the cgroup if
 * alive; otherwise, %NULL.  A successful return should be undone by a
 * matching cgroup_kn_unlock() invocation.
 *
 * Any cgroup kernfs method implementation which requires locking the
 * associated cgroup should use this helper.  It avoids nesting cgroup
 * locking under kernfs active protection and allows all kernfs operations
 * including self-removal.
 */
static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
T
Tejun Heo 已提交
1362
{
1363 1364 1365 1366 1367 1368
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;
T
Tejun Heo 已提交
1369

1370
	/*
1371
	 * We're gonna grab cgroup_mutex which nests outside kernfs
1372 1373 1374
	 * active_ref.  cgroup liveliness check alone provides enough
	 * protection against removal.  Ensure @cgrp stays accessible and
	 * break the active_ref protection.
1375
	 */
1376 1377
	if (!cgroup_tryget(cgrp))
		return NULL;
1378 1379
	kernfs_break_active_protection(kn);

T
Tejun Heo 已提交
1380
	mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1381

1382 1383 1384 1385 1386
	if (!cgroup_is_dead(cgrp))
		return cgrp;

	cgroup_kn_unlock(kn);
	return NULL;
1387
}
T
Tejun Heo 已提交
1388

1389
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
T
Tejun Heo 已提交
1390
{
T
Tejun Heo 已提交
1391
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
1392

1393
	lockdep_assert_held(&cgroup_mutex);
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403

	if (cft->file_offset) {
		struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
		struct cgroup_file *cfile = (void *)css + cft->file_offset;

		spin_lock_irq(&cgroup_file_kn_lock);
		cfile->kn = NULL;
		spin_unlock_irq(&cgroup_file_kn_lock);
	}

T
Tejun Heo 已提交
1404
	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
T
Tejun Heo 已提交
1405 1406
}

1407
/**
1408 1409 1410
 * css_clear_dir - remove subsys files in a cgroup directory
 * @css: taget css
 * @cgrp_override: specify if target cgroup is different from css->cgroup
1411
 */
1412 1413
static void css_clear_dir(struct cgroup_subsys_state *css,
			  struct cgroup *cgrp_override)
T
Tejun Heo 已提交
1414
{
1415 1416
	struct cgroup *cgrp = cgrp_override ?: css->cgroup;
	struct cftype *cfts;
T
Tejun Heo 已提交
1417

1418 1419
	list_for_each_entry(cfts, &css->ss->cfts, node)
		cgroup_addrm_files(css, cgrp, cfts, false);
1420 1421
}

1422
/**
1423 1424 1425
 * css_populate_dir - create subsys files in a cgroup directory
 * @css: target css
 * @cgrp_overried: specify if target cgroup is different from css->cgroup
1426 1427 1428
 *
 * On failure, no file is added.
 */
1429 1430
static int css_populate_dir(struct cgroup_subsys_state *css,
			    struct cgroup *cgrp_override)
1431
{
1432 1433 1434
	struct cgroup *cgrp = cgrp_override ?: css->cgroup;
	struct cftype *cfts, *failed_cfts;
	int ret;
1435

1436 1437 1438 1439 1440
	if (!css->ss) {
		if (cgroup_on_dfl(cgrp))
			cfts = cgroup_dfl_base_files;
		else
			cfts = cgroup_legacy_base_files;
1441

1442 1443
		return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
	}
1444

1445 1446 1447 1448 1449
	list_for_each_entry(cfts, &css->ss->cfts, node) {
		ret = cgroup_addrm_files(css, cgrp, cfts, true);
		if (ret < 0) {
			failed_cfts = cfts;
			goto err;
1450 1451 1452 1453
		}
	}
	return 0;
err:
1454 1455 1456 1457 1458
	list_for_each_entry(cfts, &css->ss->cfts, node) {
		if (cfts == failed_cfts)
			break;
		cgroup_addrm_files(css, cgrp, cfts, false);
	}
1459 1460 1461
	return ret;
}

1462 1463
static int rebind_subsystems(struct cgroup_root *dst_root,
			     unsigned long ss_mask)
1464
{
1465
	struct cgroup *dcgrp = &dst_root->cgrp;
1466
	struct cgroup_subsys *ss;
1467
	unsigned long tmp_ss_mask;
T
Tejun Heo 已提交
1468
	int ssid, i, ret;
1469

T
Tejun Heo 已提交
1470
	lockdep_assert_held(&cgroup_mutex);
1471

1472
	for_each_subsys_which(ss, ssid, &ss_mask) {
1473 1474
		/* if @ss has non-root csses attached to it, can't move */
		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
T
Tejun Heo 已提交
1475
			return -EBUSY;
1476

1477
		/* can't move between two non-dummy roots either */
1478
		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1479
			return -EBUSY;
1480 1481
	}

1482 1483 1484 1485 1486
	/* skip creating root files on dfl_root for inhibited subsystems */
	tmp_ss_mask = ss_mask;
	if (dst_root == &cgrp_dfl_root)
		tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;

1487 1488 1489 1490 1491 1492 1493
	for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
		struct cgroup *scgrp = &ss->root->cgrp;
		int tssid;

		ret = css_populate_dir(cgroup_css(scgrp, ss), dcgrp);
		if (!ret)
			continue;
1494

T
Tejun Heo 已提交
1495 1496 1497 1498 1499 1500
		/*
		 * Rebinding back to the default root is not allowed to
		 * fail.  Using both default and non-default roots should
		 * be rare.  Moving subsystems back and forth even more so.
		 * Just warn about it and continue.
		 */
1501 1502 1503 1504 1505 1506 1507
		if (dst_root == &cgrp_dfl_root) {
			if (cgrp_dfl_root_visible) {
				pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
					ret, ss_mask);
				pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
			}
			continue;
T
Tejun Heo 已提交
1508
		}
1509 1510 1511 1512 1513 1514 1515

		for_each_subsys_which(ss, tssid, &tmp_ss_mask) {
			if (tssid == ssid)
				break;
			css_clear_dir(cgroup_css(scgrp, ss), dcgrp);
		}
		return ret;
1516
	}
1517 1518 1519 1520 1521

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
1522
	for_each_subsys_which(ss, ssid, &ss_mask) {
1523 1524 1525
		struct cgroup_root *src_root = ss->root;
		struct cgroup *scgrp = &src_root->cgrp;
		struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
T
Tejun Heo 已提交
1526
		struct css_set *cset;
1527

1528
		WARN_ON(!css || cgroup_css(dcgrp, ss));
1529

1530 1531
		css_clear_dir(css, NULL);

1532 1533
		RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
		rcu_assign_pointer(dcgrp->subsys[ssid], css);
1534
		ss->root = dst_root;
1535
		css->cgroup = dcgrp;
1536

1537
		spin_lock_bh(&css_set_lock);
T
Tejun Heo 已提交
1538 1539
		hash_for_each(css_set_table, i, cset, hlist)
			list_move_tail(&cset->e_cset_node[ss->id],
1540
				       &dcgrp->e_csets[ss->id]);
1541
		spin_unlock_bh(&css_set_lock);
T
Tejun Heo 已提交
1542

1543
		src_root->subsys_mask &= ~(1 << ssid);
1544
		scgrp->subtree_control &= ~(1 << ssid);
1545
		cgroup_refresh_subtree_ss_mask(scgrp);
1546

1547
		/* default hierarchy doesn't enable controllers by default */
1548
		dst_root->subsys_mask |= 1 << ssid;
1549 1550 1551
		if (dst_root == &cgrp_dfl_root) {
			static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
		} else {
1552
			dcgrp->subtree_control |= 1 << ssid;
1553
			cgroup_refresh_subtree_ss_mask(dcgrp);
1554
			static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
1555
		}
1556

1557 1558
		if (ss->bind)
			ss->bind(css);
1559 1560
	}

1561
	kernfs_activate(dcgrp->kn);
1562 1563 1564
	return 0;
}

T
Tejun Heo 已提交
1565 1566
static int cgroup_show_options(struct seq_file *seq,
			       struct kernfs_root *kf_root)
1567
{
1568
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1569
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
1570
	int ssid;
1571

1572 1573 1574
	if (root != &cgrp_dfl_root)
		for_each_subsys(ss, ssid)
			if (root->subsys_mask & (1 << ssid))
1575
				seq_show_option(seq, ss->legacy_name, NULL);
1576
	if (root->flags & CGRP_ROOT_NOPREFIX)
1577
		seq_puts(seq, ",noprefix");
1578
	if (root->flags & CGRP_ROOT_XATTR)
A
Aristeu Rozanski 已提交
1579
		seq_puts(seq, ",xattr");
1580 1581

	spin_lock(&release_agent_path_lock);
1582
	if (strlen(root->release_agent_path))
1583 1584
		seq_show_option(seq, "release_agent",
				root->release_agent_path);
1585 1586
	spin_unlock(&release_agent_path_lock);

1587
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1588
		seq_puts(seq, ",clone_children");
1589
	if (strlen(root->name))
1590
		seq_show_option(seq, "name", root->name);
1591 1592 1593 1594
	return 0;
}

struct cgroup_sb_opts {
1595
	unsigned long subsys_mask;
1596
	unsigned int flags;
1597
	char *release_agent;
1598
	bool cpuset_clone_children;
1599
	char *name;
1600 1601
	/* User explicitly requested empty subsystem */
	bool none;
1602 1603
};

B
Ben Blum 已提交
1604
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1605
{
1606 1607
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
1608
	unsigned long mask = -1UL;
1609
	struct cgroup_subsys *ss;
1610
	int nr_opts = 0;
1611
	int i;
1612 1613

#ifdef CONFIG_CPUSETS
1614
	mask = ~(1U << cpuset_cgrp_id);
1615
#endif
1616

1617
	memset(opts, 0, sizeof(*opts));
1618 1619

	while ((token = strsep(&o, ",")) != NULL) {
1620 1621
		nr_opts++;

1622 1623
		if (!*token)
			return -EINVAL;
1624
		if (!strcmp(token, "none")) {
1625 1626
			/* Explicitly have no subsystems */
			opts->none = true;
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
		if (!strcmp(token, "noprefix")) {
1637
			opts->flags |= CGRP_ROOT_NOPREFIX;
1638 1639 1640
			continue;
		}
		if (!strcmp(token, "clone_children")) {
1641
			opts->cpuset_clone_children = true;
1642 1643
			continue;
		}
A
Aristeu Rozanski 已提交
1644
		if (!strcmp(token, "xattr")) {
1645
			opts->flags |= CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
1646 1647
			continue;
		}
1648
		if (!strncmp(token, "release_agent=", 14)) {
1649 1650 1651
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
1652
			opts->release_agent =
1653
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1654 1655
			if (!opts->release_agent)
				return -ENOMEM;
1656 1657 1658
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
1676
					      MAX_CGROUP_ROOT_NAMELEN - 1,
1677 1678 1679
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
1680 1681 1682 1683

			continue;
		}

1684
		for_each_subsys(ss, i) {
1685
			if (strcmp(token, ss->legacy_name))
1686
				continue;
1687
			if (!cgroup_ssid_enabled(i))
1688
				continue;
1689 1690
			if (cgroup_ssid_no_v1(i))
				continue;
1691 1692 1693 1694

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
1695
			opts->subsys_mask |= (1 << i);
1696 1697 1698 1699 1700 1701 1702 1703
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

1704 1705 1706 1707 1708 1709 1710
	/*
	 * If the 'all' option was specified select all the subsystems,
	 * otherwise if 'none', 'name=' and a subsystem name options were
	 * not specified, let's default to 'all'
	 */
	if (all_ss || (!one_ss && !opts->none && !opts->name))
		for_each_subsys(ss, i)
1711
			if (cgroup_ssid_enabled(i) && !cgroup_ssid_no_v1(i))
1712 1713 1714 1715 1716 1717 1718 1719 1720
				opts->subsys_mask |= (1 << i);

	/*
	 * We either have to specify by name or by subsystems. (So all
	 * empty hierarchies must have a name).
	 */
	if (!opts->subsys_mask && !opts->name)
		return -EINVAL;

1721 1722 1723 1724 1725
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
1726
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1727 1728
		return -EINVAL;

1729
	/* Can't specify "none" and some subsystems */
1730
	if (opts->subsys_mask && opts->none)
1731 1732
		return -EINVAL;

1733 1734 1735
	return 0;
}

T
Tejun Heo 已提交
1736
static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1737 1738
{
	int ret = 0;
1739
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1740
	struct cgroup_sb_opts opts;
1741
	unsigned long added_mask, removed_mask;
1742

1743 1744
	if (root == &cgrp_dfl_root) {
		pr_err("remount is not allowed\n");
1745 1746 1747
		return -EINVAL;
	}

1748 1749 1750 1751 1752 1753 1754
	mutex_lock(&cgroup_mutex);

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

1755
	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1756
		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1757
			task_tgid_nr(current), current->comm);
1758

1759 1760
	added_mask = opts.subsys_mask & ~root->subsys_mask;
	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1761

B
Ben Blum 已提交
1762
	/* Don't allow flags or name to change at remount */
T
Tejun Heo 已提交
1763
	if ((opts.flags ^ root->flags) ||
B
Ben Blum 已提交
1764
	    (opts.name && strcmp(opts.name, root->name))) {
1765
		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
T
Tejun Heo 已提交
1766
		       opts.flags, opts.name ?: "", root->flags, root->name);
1767 1768 1769 1770
		ret = -EINVAL;
		goto out_unlock;
	}

1771
	/* remounting is not allowed for populated hierarchies */
1772
	if (!list_empty(&root->cgrp.self.children)) {
1773
		ret = -EBUSY;
1774
		goto out_unlock;
B
Ben Blum 已提交
1775
	}
1776

1777
	ret = rebind_subsystems(root, added_mask);
1778
	if (ret)
1779
		goto out_unlock;
1780

1781
	rebind_subsystems(&cgrp_dfl_root, removed_mask);
1782

1783 1784
	if (opts.release_agent) {
		spin_lock(&release_agent_path_lock);
1785
		strcpy(root->release_agent_path, opts.release_agent);
1786 1787
		spin_unlock(&release_agent_path_lock);
	}
1788
 out_unlock:
1789
	kfree(opts.release_agent);
1790
	kfree(opts.name);
1791 1792 1793 1794
	mutex_unlock(&cgroup_mutex);
	return ret;
}

1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
/*
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
 * words after the first mount.
 */
static bool use_task_css_set_links __read_mostly;

static void cgroup_enable_task_cg_lists(void)
{
	struct task_struct *p, *g;

1807
	spin_lock_bh(&css_set_lock);
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829

	if (use_task_css_set_links)
		goto out_unlock;

	use_task_css_set_links = true;

	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
	do_each_thread(g, p) {
		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
			     task_css_set(p) != &init_css_set);

		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
1830 1831
		 * Do it while holding siglock so that we don't end up
		 * racing against cgroup_exit().
1832
		 */
1833
		spin_lock_irq(&p->sighand->siglock);
1834 1835 1836
		if (!(p->flags & PF_EXITING)) {
			struct css_set *cset = task_css_set(p);

1837 1838
			if (!css_set_populated(cset))
				css_set_update_populated(cset, true);
1839
			list_add_tail(&p->cg_list, &cset->tasks);
1840 1841
			get_css_set(cset);
		}
1842
		spin_unlock_irq(&p->sighand->siglock);
1843 1844 1845
	} while_each_thread(g, p);
	read_unlock(&tasklist_lock);
out_unlock:
1846
	spin_unlock_bh(&css_set_lock);
1847
}
1848

1849 1850
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
T
Tejun Heo 已提交
1851 1852 1853
	struct cgroup_subsys *ss;
	int ssid;

1854 1855
	INIT_LIST_HEAD(&cgrp->self.sibling);
	INIT_LIST_HEAD(&cgrp->self.children);
1856
	INIT_LIST_HEAD(&cgrp->cset_links);
1857 1858
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
1859
	cgrp->self.cgroup = cgrp;
1860
	cgrp->self.flags |= CSS_ONLINE;
T
Tejun Heo 已提交
1861 1862 1863

	for_each_subsys(ss, ssid)
		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1864 1865

	init_waitqueue_head(&cgrp->offline_waitq);
1866
	INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
1867
}
1868

1869
static void init_cgroup_root(struct cgroup_root *root,
1870
			     struct cgroup_sb_opts *opts)
1871
{
1872
	struct cgroup *cgrp = &root->cgrp;
1873

1874
	INIT_LIST_HEAD(&root->root_list);
1875
	atomic_set(&root->nr_cgrps, 1);
1876
	cgrp->root = root;
1877
	init_cgroup_housekeeping(cgrp);
1878
	idr_init(&root->cgroup_idr);
1879 1880 1881 1882 1883 1884

	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
1885
	if (opts->cpuset_clone_children)
1886
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1887 1888
}

1889
static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
1890
{
1891
	LIST_HEAD(tmp_links);
1892
	struct cgroup *root_cgrp = &root->cgrp;
1893 1894
	struct css_set *cset;
	int i, ret;
1895

1896
	lockdep_assert_held(&cgroup_mutex);
1897

V
Vladimir Davydov 已提交
1898
	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
1899
	if (ret < 0)
T
Tejun Heo 已提交
1900
		goto out;
1901
	root_cgrp->id = ret;
1902
	root_cgrp->ancestor_ids[0] = ret;
1903

1904 1905
	ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
			      GFP_KERNEL);
1906 1907 1908
	if (ret)
		goto out;

1909
	/*
1910
	 * We're accessing css_set_count without locking css_set_lock here,
1911 1912 1913 1914 1915 1916
	 * but that's OK - it can only be increased by someone holding
	 * cgroup_lock, and that's us. The worst that can happen is that we
	 * have some link structures left over
	 */
	ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
	if (ret)
1917
		goto cancel_ref;
1918

1919
	ret = cgroup_init_root_id(root);
1920
	if (ret)
1921
		goto cancel_ref;
1922

T
Tejun Heo 已提交
1923 1924 1925 1926 1927 1928 1929 1930
	root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
					   KERNFS_ROOT_CREATE_DEACTIVATED,
					   root_cgrp);
	if (IS_ERR(root->kf_root)) {
		ret = PTR_ERR(root->kf_root);
		goto exit_root_id;
	}
	root_cgrp->kn = root->kf_root->kn;
1931

1932
	ret = css_populate_dir(&root_cgrp->self, NULL);
1933
	if (ret)
T
Tejun Heo 已提交
1934
		goto destroy_root;
1935

1936
	ret = rebind_subsystems(root, ss_mask);
1937
	if (ret)
T
Tejun Heo 已提交
1938
		goto destroy_root;
1939

1940 1941 1942 1943 1944 1945 1946
	/*
	 * There must be no failure case after here, since rebinding takes
	 * care of subsystems' refcounts, which are explicitly dropped in
	 * the failure exit path.
	 */
	list_add(&root->root_list, &cgroup_roots);
	cgroup_root_count++;
A
Al Viro 已提交
1947

1948
	/*
1949
	 * Link the root cgroup in this hierarchy into all the css_set
1950 1951
	 * objects.
	 */
1952
	spin_lock_bh(&css_set_lock);
1953
	hash_for_each(css_set_table, i, cset, hlist) {
1954
		link_css_set(&tmp_links, cset, root_cgrp);
1955 1956 1957
		if (css_set_populated(cset))
			cgroup_update_populated(root_cgrp, true);
	}
1958
	spin_unlock_bh(&css_set_lock);
1959

1960
	BUG_ON(!list_empty(&root_cgrp->self.children));
1961
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1962

T
Tejun Heo 已提交
1963
	kernfs_activate(root_cgrp->kn);
1964
	ret = 0;
T
Tejun Heo 已提交
1965
	goto out;
1966

T
Tejun Heo 已提交
1967 1968 1969 1970
destroy_root:
	kernfs_destroy_root(root->kf_root);
	root->kf_root = NULL;
exit_root_id:
1971
	cgroup_exit_root_id(root);
1972
cancel_ref:
1973
	percpu_ref_exit(&root_cgrp->self.refcnt);
T
Tejun Heo 已提交
1974
out:
1975 1976
	free_cgrp_cset_links(&tmp_links);
	return ret;
1977 1978
}

A
Al Viro 已提交
1979
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1980
			 int flags, const char *unused_dev_name,
A
Al Viro 已提交
1981
			 void *data)
1982
{
1983
	bool is_v2 = fs_type == &cgroup2_fs_type;
1984
	struct super_block *pinned_sb = NULL;
1985
	struct cgroup_subsys *ss;
1986
	struct cgroup_root *root;
1987
	struct cgroup_sb_opts opts;
T
Tejun Heo 已提交
1988
	struct dentry *dentry;
1989
	int ret;
1990
	int i;
L
Li Zefan 已提交
1991
	bool new_sb;
1992

1993 1994 1995 1996 1997 1998
	/*
	 * The first time anyone tries to mount a cgroup, enable the list
	 * linking each css_set to its tasks and fix up all existing tasks.
	 */
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();
1999

2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
	if (is_v2) {
		if (data) {
			pr_err("cgroup2: unknown option \"%s\"\n", (char *)data);
			return ERR_PTR(-EINVAL);
		}
		cgrp_dfl_root_visible = true;
		root = &cgrp_dfl_root;
		cgroup_get(&root->cgrp);
		goto out_mount;
	}

B
Ben Blum 已提交
2011
	mutex_lock(&cgroup_mutex);
2012 2013

	/* First find the desired set of subsystems */
2014
	ret = parse_cgroupfs_options(data, &opts);
2015
	if (ret)
2016
		goto out_unlock;
2017

2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
	/*
	 * Destruction of cgroup root is asynchronous, so subsystems may
	 * still be dying after the previous unmount.  Let's drain the
	 * dying subsystems.  We just need to ensure that the ones
	 * unmounted previously finish dying and don't care about new ones
	 * starting.  Testing ref liveliness is good enough.
	 */
	for_each_subsys(ss, i) {
		if (!(opts.subsys_mask & (1 << i)) ||
		    ss->root == &cgrp_dfl_root)
			continue;

		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
			mutex_unlock(&cgroup_mutex);
			msleep(10);
			ret = restart_syscall();
			goto out_free;
		}
		cgroup_put(&ss->root->cgrp);
	}

2039
	for_each_root(root) {
T
Tejun Heo 已提交
2040
		bool name_match = false;
2041

2042
		if (root == &cgrp_dfl_root)
2043
			continue;
2044

B
Ben Blum 已提交
2045
		/*
T
Tejun Heo 已提交
2046 2047 2048
		 * If we asked for a name then it must match.  Also, if
		 * name matches but sybsys_mask doesn't, we should fail.
		 * Remember whether name matched.
B
Ben Blum 已提交
2049
		 */
T
Tejun Heo 已提交
2050 2051 2052 2053 2054
		if (opts.name) {
			if (strcmp(opts.name, root->name))
				continue;
			name_match = true;
		}
2055

2056
		/*
T
Tejun Heo 已提交
2057 2058
		 * If we asked for subsystems (or explicitly for no
		 * subsystems) then they must match.
2059
		 */
T
Tejun Heo 已提交
2060
		if ((opts.subsys_mask || opts.none) &&
2061
		    (opts.subsys_mask != root->subsys_mask)) {
T
Tejun Heo 已提交
2062 2063 2064 2065 2066
			if (!name_match)
				continue;
			ret = -EBUSY;
			goto out_unlock;
		}
2067

2068 2069
		if (root->flags ^ opts.flags)
			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
2070

T
Tejun Heo 已提交
2071
		/*
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
		 * We want to reuse @root whose lifetime is governed by its
		 * ->cgrp.  Let's check whether @root is alive and keep it
		 * that way.  As cgroup_kill_sb() can happen anytime, we
		 * want to block it by pinning the sb so that @root doesn't
		 * get killed before mount is complete.
		 *
		 * With the sb pinned, tryget_live can reliably indicate
		 * whether @root can be reused.  If it's being killed,
		 * drain it.  We can use wait_queue for the wait but this
		 * path is super cold.  Let's just sleep a bit and retry.
T
Tejun Heo 已提交
2082
		 */
2083 2084 2085
		pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
		if (IS_ERR(pinned_sb) ||
		    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
T
Tejun Heo 已提交
2086
			mutex_unlock(&cgroup_mutex);
2087 2088
			if (!IS_ERR_OR_NULL(pinned_sb))
				deactivate_super(pinned_sb);
T
Tejun Heo 已提交
2089
			msleep(10);
2090 2091
			ret = restart_syscall();
			goto out_free;
T
Tejun Heo 已提交
2092
		}
2093

T
Tejun Heo 已提交
2094
		ret = 0;
T
Tejun Heo 已提交
2095
		goto out_unlock;
2096 2097
	}

2098
	/*
2099 2100 2101
	 * No such thing, create a new one.  name= matching without subsys
	 * specification is allowed for already existing hierarchies but we
	 * can't create new one without subsys specification.
2102
	 */
2103 2104 2105
	if (!opts.subsys_mask && !opts.none) {
		ret = -EINVAL;
		goto out_unlock;
2106 2107
	}

2108 2109 2110
	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root) {
		ret = -ENOMEM;
T
Tejun Heo 已提交
2111
		goto out_unlock;
2112
	}
2113

2114 2115
	init_cgroup_root(root, &opts);

T
Tejun Heo 已提交
2116
	ret = cgroup_setup_root(root, opts.subsys_mask);
T
Tejun Heo 已提交
2117 2118
	if (ret)
		cgroup_free_root(root);
2119

2120
out_unlock:
2121
	mutex_unlock(&cgroup_mutex);
2122
out_free:
2123 2124
	kfree(opts.release_agent);
	kfree(opts.name);
A
Aristeu Rozanski 已提交
2125

T
Tejun Heo 已提交
2126
	if (ret)
2127
		return ERR_PTR(ret);
2128
out_mount:
2129
	dentry = kernfs_mount(fs_type, flags, root->kf_root,
2130 2131
			      is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC,
			      &new_sb);
L
Li Zefan 已提交
2132
	if (IS_ERR(dentry) || !new_sb)
2133
		cgroup_put(&root->cgrp);
2134 2135 2136 2137 2138 2139 2140 2141 2142 2143

	/*
	 * If @pinned_sb, we're reusing an existing root and holding an
	 * extra ref on its sb.  Mount is complete.  Put the extra ref.
	 */
	if (pinned_sb) {
		WARN_ON(new_sb);
		deactivate_super(pinned_sb);
	}

T
Tejun Heo 已提交
2144 2145 2146 2147 2148 2149
	return dentry;
}

static void cgroup_kill_sb(struct super_block *sb)
{
	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
2150
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
T
Tejun Heo 已提交
2151

2152 2153 2154 2155
	/*
	 * If @root doesn't have any mounts or children, start killing it.
	 * This prevents new mounts by disabling percpu_ref_tryget_live().
	 * cgroup_mount() may wait for @root's release.
2156 2157
	 *
	 * And don't kill the default root.
2158
	 */
2159
	if (!list_empty(&root->cgrp.self.children) ||
2160
	    root == &cgrp_dfl_root)
2161 2162 2163 2164
		cgroup_put(&root->cgrp);
	else
		percpu_ref_kill(&root->cgrp.self.refcnt);

T
Tejun Heo 已提交
2165
	kernfs_kill_sb(sb);
2166 2167 2168 2169
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
A
Al Viro 已提交
2170
	.mount = cgroup_mount,
2171 2172 2173
	.kill_sb = cgroup_kill_sb,
};

2174 2175 2176 2177 2178 2179
static struct file_system_type cgroup2_fs_type = {
	.name = "cgroup2",
	.mount = cgroup_mount,
	.kill_sb = cgroup_kill_sb,
};

2180
/**
2181
 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
2182 2183 2184 2185
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
2186 2187 2188 2189 2190
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
T
Tejun Heo 已提交
2191
 * Return value is the same as kernfs_path().
2192
 */
T
Tejun Heo 已提交
2193
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2194
{
2195
	struct cgroup_root *root;
2196
	struct cgroup *cgrp;
T
Tejun Heo 已提交
2197 2198
	int hierarchy_id = 1;
	char *path = NULL;
2199 2200

	mutex_lock(&cgroup_mutex);
2201
	spin_lock_bh(&css_set_lock);
2202

2203 2204
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

2205 2206
	if (root) {
		cgrp = task_cgroup_from_root(task, root);
T
Tejun Heo 已提交
2207
		path = cgroup_path(cgrp, buf, buflen);
2208 2209
	} else {
		/* if no hierarchy exists, everyone is in "/" */
T
Tejun Heo 已提交
2210 2211
		if (strlcpy(buf, "/", buflen) < buflen)
			path = buf;
2212 2213
	}

2214
	spin_unlock_bh(&css_set_lock);
2215
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
2216
	return path;
2217
}
2218
EXPORT_SYMBOL_GPL(task_cgroup_path);
2219

2220
/* used to track tasks and other necessary states during migration */
2221
struct cgroup_taskset {
2222 2223 2224 2225
	/* the src and dst cset list running through cset->mg_node */
	struct list_head	src_csets;
	struct list_head	dst_csets;

2226 2227 2228
	/* the subsys currently being processed */
	int			ssid;

2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
	/*
	 * Fields for cgroup_taskset_*() iteration.
	 *
	 * Before migration is committed, the target migration tasks are on
	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
	 * or ->dst_csets depending on whether migration is committed.
	 *
	 * ->cur_csets and ->cur_task point to the current task position
	 * during iteration.
	 */
	struct list_head	*csets;
	struct css_set		*cur_cset;
	struct task_struct	*cur_task;
2243 2244
};

2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265
#define CGROUP_TASKSET_INIT(tset)	(struct cgroup_taskset){	\
	.src_csets		= LIST_HEAD_INIT(tset.src_csets),	\
	.dst_csets		= LIST_HEAD_INIT(tset.dst_csets),	\
	.csets			= &tset.src_csets,			\
}

/**
 * cgroup_taskset_add - try to add a migration target task to a taskset
 * @task: target task
 * @tset: target taskset
 *
 * Add @task, which is a migration target, to @tset.  This function becomes
 * noop if @task doesn't need to be migrated.  @task's css_set should have
 * been added as a migration source and @task->cg_list will be moved from
 * the css_set's tasks list to mg_tasks one.
 */
static void cgroup_taskset_add(struct task_struct *task,
			       struct cgroup_taskset *tset)
{
	struct css_set *cset;

2266
	lockdep_assert_held(&css_set_lock);
2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287

	/* @task either already exited or can't exit until the end */
	if (task->flags & PF_EXITING)
		return;

	/* leave @task alone if post_fork() hasn't linked it yet */
	if (list_empty(&task->cg_list))
		return;

	cset = task_css_set(task);
	if (!cset->mg_src_cgrp)
		return;

	list_move_tail(&task->cg_list, &cset->mg_tasks);
	if (list_empty(&cset->mg_node))
		list_add_tail(&cset->mg_node, &tset->src_csets);
	if (list_empty(&cset->mg_dst_cset->mg_node))
		list_move_tail(&cset->mg_dst_cset->mg_node,
			       &tset->dst_csets);
}

2288 2289 2290
/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
2291
 * @dst_cssp: output variable for the destination css
2292 2293 2294
 *
 * @tset iteration is initialized and the first task is returned.
 */
2295 2296
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
					 struct cgroup_subsys_state **dst_cssp)
2297
{
2298 2299 2300
	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
	tset->cur_task = NULL;

2301
	return cgroup_taskset_next(tset, dst_cssp);
2302 2303 2304 2305 2306
}

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
2307
 * @dst_cssp: output variable for the destination css
2308 2309 2310 2311
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
2312 2313
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
					struct cgroup_subsys_state **dst_cssp)
2314
{
2315 2316
	struct css_set *cset = tset->cur_cset;
	struct task_struct *task = tset->cur_task;
2317

2318 2319 2320 2321 2322 2323
	while (&cset->mg_node != tset->csets) {
		if (!task)
			task = list_first_entry(&cset->mg_tasks,
						struct task_struct, cg_list);
		else
			task = list_next_entry(task, cg_list);
2324

2325 2326 2327
		if (&task->cg_list != &cset->mg_tasks) {
			tset->cur_cset = cset;
			tset->cur_task = task;
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339

			/*
			 * This function may be called both before and
			 * after cgroup_taskset_migrate().  The two cases
			 * can be distinguished by looking at whether @cset
			 * has its ->mg_dst_cset set.
			 */
			if (cset->mg_dst_cset)
				*dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
			else
				*dst_cssp = cset->subsys[tset->ssid];

2340 2341
			return task;
		}
2342

2343 2344 2345
		cset = list_next_entry(cset, mg_node);
		task = NULL;
	}
2346

2347
	return NULL;
2348 2349
}

2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374
/**
 * cgroup_taskset_migrate - migrate a taskset to a cgroup
 * @tset: taget taskset
 * @dst_cgrp: destination cgroup
 *
 * Migrate tasks in @tset to @dst_cgrp.  This function fails iff one of the
 * ->can_attach callbacks fails and guarantees that either all or none of
 * the tasks in @tset are migrated.  @tset is consumed regardless of
 * success.
 */
static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
				  struct cgroup *dst_cgrp)
{
	struct cgroup_subsys_state *css, *failed_css = NULL;
	struct task_struct *task, *tmp_task;
	struct css_set *cset, *tmp_cset;
	int i, ret;

	/* methods shouldn't be called if no task is actually migrating */
	if (list_empty(&tset->src_csets))
		return 0;

	/* check that we can legitimately attach to the cgroup */
	for_each_e_css(css, i, dst_cgrp) {
		if (css->ss->can_attach) {
2375 2376
			tset->ssid = i;
			ret = css->ss->can_attach(tset);
2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
			if (ret) {
				failed_css = css;
				goto out_cancel_attach;
			}
		}
	}

	/*
	 * Now that we're guaranteed success, proceed to move all tasks to
	 * the new cgroup.  There are no failure cases after here, so this
	 * is the commit point.
	 */
2389
	spin_lock_bh(&css_set_lock);
2390
	list_for_each_entry(cset, &tset->src_csets, mg_node) {
T
Tejun Heo 已提交
2391 2392 2393 2394 2395 2396 2397 2398
		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
			struct css_set *from_cset = task_css_set(task);
			struct css_set *to_cset = cset->mg_dst_cset;

			get_css_set(to_cset);
			css_set_move_task(task, from_cset, to_cset, true);
			put_css_set_locked(from_cset);
		}
2399
	}
2400
	spin_unlock_bh(&css_set_lock);
2401 2402 2403 2404 2405 2406 2407 2408

	/*
	 * Migration is committed, all target tasks are now on dst_csets.
	 * Nothing is sensitive to fork() after this point.  Notify
	 * controllers that migration is complete.
	 */
	tset->csets = &tset->dst_csets;

2409 2410 2411 2412 2413 2414
	for_each_e_css(css, i, dst_cgrp) {
		if (css->ss->attach) {
			tset->ssid = i;
			css->ss->attach(tset);
		}
	}
2415 2416 2417 2418 2419 2420 2421 2422

	ret = 0;
	goto out_release_tset;

out_cancel_attach:
	for_each_e_css(css, i, dst_cgrp) {
		if (css == failed_css)
			break;
2423 2424 2425 2426
		if (css->ss->cancel_attach) {
			tset->ssid = i;
			css->ss->cancel_attach(tset);
		}
2427 2428
	}
out_release_tset:
2429
	spin_lock_bh(&css_set_lock);
2430 2431 2432 2433 2434
	list_splice_init(&tset->dst_csets, &tset->src_csets);
	list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
		list_del_init(&cset->mg_node);
	}
2435
	spin_unlock_bh(&css_set_lock);
2436 2437 2438
	return ret;
}

L
Li Zefan 已提交
2439
/**
2440 2441
 * cgroup_migrate_finish - cleanup after attach
 * @preloaded_csets: list of preloaded css_sets
B
Ben Blum 已提交
2442
 *
2443 2444
 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
 * those functions for details.
B
Ben Blum 已提交
2445
 */
2446
static void cgroup_migrate_finish(struct list_head *preloaded_csets)
B
Ben Blum 已提交
2447
{
2448
	struct css_set *cset, *tmp_cset;
B
Ben Blum 已提交
2449

2450 2451
	lockdep_assert_held(&cgroup_mutex);

2452
	spin_lock_bh(&css_set_lock);
2453 2454 2455 2456
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
		cset->mg_src_cgrp = NULL;
		cset->mg_dst_cset = NULL;
		list_del_init(&cset->mg_preload_node);
Z
Zefan Li 已提交
2457
		put_css_set_locked(cset);
2458
	}
2459
	spin_unlock_bh(&css_set_lock);
2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471
}

/**
 * cgroup_migrate_add_src - add a migration source css_set
 * @src_cset: the source css_set to add
 * @dst_cgrp: the destination cgroup
 * @preloaded_csets: list of preloaded css_sets
 *
 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
 * @src_cset and add it to @preloaded_csets, which should later be cleaned
 * up by cgroup_migrate_finish().
 *
2472 2473 2474 2475 2476
 * This function may be called without holding cgroup_threadgroup_rwsem
 * even if the target is a process.  Threads may be created and destroyed
 * but as long as cgroup_mutex is not dropped, no new css_set can be put
 * into play and the preloaded css_sets are guaranteed to cover all
 * migrations.
2477 2478 2479 2480 2481 2482 2483 2484
 */
static void cgroup_migrate_add_src(struct css_set *src_cset,
				   struct cgroup *dst_cgrp,
				   struct list_head *preloaded_csets)
{
	struct cgroup *src_cgrp;

	lockdep_assert_held(&cgroup_mutex);
2485
	lockdep_assert_held(&css_set_lock);
2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502

	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);

	if (!list_empty(&src_cset->mg_preload_node))
		return;

	WARN_ON(src_cset->mg_src_cgrp);
	WARN_ON(!list_empty(&src_cset->mg_tasks));
	WARN_ON(!list_empty(&src_cset->mg_node));

	src_cset->mg_src_cgrp = src_cgrp;
	get_css_set(src_cset);
	list_add(&src_cset->mg_preload_node, preloaded_csets);
}

/**
 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2503
 * @dst_cgrp: the destination cgroup (may be %NULL)
2504 2505 2506 2507
 * @preloaded_csets: list of preloaded source css_sets
 *
 * Tasks are about to be moved to @dst_cgrp and all the source css_sets
 * have been preloaded to @preloaded_csets.  This function looks up and
2508 2509 2510
 * pins all destination css_sets, links each to its source, and append them
 * to @preloaded_csets.  If @dst_cgrp is %NULL, the destination of each
 * source css_set is assumed to be its cgroup on the default hierarchy.
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
 *
 * This function must be called after cgroup_migrate_add_src() has been
 * called on each migration source css_set.  After migration is performed
 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
 * @preloaded_csets.
 */
static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
				      struct list_head *preloaded_csets)
{
	LIST_HEAD(csets);
2521
	struct css_set *src_cset, *tmp_cset;
2522 2523 2524

	lockdep_assert_held(&cgroup_mutex);

2525
	/*
2526
	 * Except for the root, subtree_ss_mask must be zero for a cgroup
2527 2528
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
2529
	if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) &&
2530
	    dst_cgrp->subtree_ss_mask)
2531 2532
		return -EBUSY;

2533
	/* look up the dst cset for each src cset and link it to src */
2534
	list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
2535 2536
		struct css_set *dst_cset;

2537 2538
		dst_cset = find_css_set(src_cset,
					dst_cgrp ?: src_cset->dfl_cgrp);
2539 2540 2541 2542
		if (!dst_cset)
			goto err;

		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2543 2544 2545 2546 2547 2548 2549 2550 2551

		/*
		 * If src cset equals dst, it's noop.  Drop the src.
		 * cgroup_migrate() will skip the cset too.  Note that we
		 * can't handle src == dst as some nodes are used by both.
		 */
		if (src_cset == dst_cset) {
			src_cset->mg_src_cgrp = NULL;
			list_del_init(&src_cset->mg_preload_node);
Z
Zefan Li 已提交
2552 2553
			put_css_set(src_cset);
			put_css_set(dst_cset);
2554 2555 2556
			continue;
		}

2557 2558 2559 2560 2561
		src_cset->mg_dst_cset = dst_cset;

		if (list_empty(&dst_cset->mg_preload_node))
			list_add(&dst_cset->mg_preload_node, &csets);
		else
Z
Zefan Li 已提交
2562
			put_css_set(dst_cset);
2563 2564
	}

2565
	list_splice_tail(&csets, preloaded_csets);
2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
	return 0;
err:
	cgroup_migrate_finish(&csets);
	return -ENOMEM;
}

/**
 * cgroup_migrate - migrate a process or task to a cgroup
 * @leader: the leader of the process or the task to migrate
 * @threadgroup: whether @leader points to the whole process or a single task
2576
 * @cgrp: the destination cgroup
2577 2578
 *
 * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
2579
 * process, the caller must be holding cgroup_threadgroup_rwsem.  The
2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
 * caller is also responsible for invoking cgroup_migrate_add_src() and
 * cgroup_migrate_prepare_dst() on the targets before invoking this
 * function and following up with cgroup_migrate_finish().
 *
 * As long as a controller's ->can_attach() doesn't fail, this function is
 * guaranteed to succeed.  This means that, excluding ->can_attach()
 * failure, when migrating multiple targets, the success or failure can be
 * decided for all targets by invoking group_migrate_prepare_dst() before
 * actually starting migrating.
 */
2590 2591
static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
			  struct cgroup *cgrp)
B
Ben Blum 已提交
2592
{
2593 2594
	struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
	struct task_struct *task;
B
Ben Blum 已提交
2595

2596 2597 2598 2599 2600
	/*
	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 */
2601
	spin_lock_bh(&css_set_lock);
2602
	rcu_read_lock();
2603
	task = leader;
B
Ben Blum 已提交
2604
	do {
2605
		cgroup_taskset_add(task, &tset);
2606 2607
		if (!threadgroup)
			break;
2608
	} while_each_thread(leader, task);
2609
	rcu_read_unlock();
2610
	spin_unlock_bh(&css_set_lock);
B
Ben Blum 已提交
2611

2612
	return cgroup_taskset_migrate(&tset, cgrp);
B
Ben Blum 已提交
2613 2614
}

2615 2616 2617 2618 2619 2620
/**
 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
 * @dst_cgrp: the cgroup to attach to
 * @leader: the task or the leader of the threadgroup to be attached
 * @threadgroup: attach the whole threadgroup?
 *
2621
 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2622 2623 2624 2625 2626 2627 2628 2629 2630
 */
static int cgroup_attach_task(struct cgroup *dst_cgrp,
			      struct task_struct *leader, bool threadgroup)
{
	LIST_HEAD(preloaded_csets);
	struct task_struct *task;
	int ret;

	/* look up all src csets */
2631
	spin_lock_bh(&css_set_lock);
2632 2633 2634 2635 2636 2637 2638 2639 2640
	rcu_read_lock();
	task = leader;
	do {
		cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
				       &preloaded_csets);
		if (!threadgroup)
			break;
	} while_each_thread(leader, task);
	rcu_read_unlock();
2641
	spin_unlock_bh(&css_set_lock);
2642 2643 2644 2645

	/* prepare dst csets and commit */
	ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
	if (!ret)
2646
		ret = cgroup_migrate(leader, threadgroup, dst_cgrp);
2647 2648 2649

	cgroup_migrate_finish(&preloaded_csets);
	return ret;
B
Ben Blum 已提交
2650 2651
}

2652 2653 2654
static int cgroup_procs_write_permission(struct task_struct *task,
					 struct cgroup *dst_cgrp,
					 struct kernfs_open_file *of)
2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668
{
	const struct cred *cred = current_cred();
	const struct cred *tcred = get_task_cred(task);
	int ret = 0;

	/*
	 * even if we're attaching all tasks in the thread group, we only
	 * need to check permissions on one of them.
	 */
	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
	    !uid_eq(cred->euid, tcred->uid) &&
	    !uid_eq(cred->euid, tcred->suid))
		ret = -EACCES;

2669 2670 2671 2672 2673
	if (!ret && cgroup_on_dfl(dst_cgrp)) {
		struct super_block *sb = of->file->f_path.dentry->d_sb;
		struct cgroup *cgrp;
		struct inode *inode;

2674
		spin_lock_bh(&css_set_lock);
2675
		cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
2676
		spin_unlock_bh(&css_set_lock);
2677 2678 2679 2680 2681

		while (!cgroup_is_descendant(dst_cgrp, cgrp))
			cgrp = cgroup_parent(cgrp);

		ret = -ENOMEM;
2682
		inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
2683 2684 2685 2686 2687 2688
		if (inode) {
			ret = inode_permission(inode, MAY_WRITE);
			iput(inode);
		}
	}

2689 2690 2691 2692
	put_cred(tcred);
	return ret;
}

B
Ben Blum 已提交
2693 2694
/*
 * Find the task_struct of the task to attach by vpid and pass it along to the
2695
 * function to attach either it or all tasks in its threadgroup. Will lock
2696
 * cgroup_mutex and threadgroup.
2697
 */
2698 2699
static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
				    size_t nbytes, loff_t off, bool threadgroup)
2700 2701
{
	struct task_struct *tsk;
2702
	struct cgroup *cgrp;
2703
	pid_t pid;
2704 2705
	int ret;

2706 2707 2708
	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
		return -EINVAL;

2709 2710
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
B
Ben Blum 已提交
2711 2712
		return -ENODEV;

T
Tejun Heo 已提交
2713
	percpu_down_write(&cgroup_threadgroup_rwsem);
2714
	rcu_read_lock();
2715
	if (pid) {
2716
		tsk = find_task_by_vpid(pid);
B
Ben Blum 已提交
2717
		if (!tsk) {
S
SeongJae Park 已提交
2718
			ret = -ESRCH;
T
Tejun Heo 已提交
2719
			goto out_unlock_rcu;
2720
		}
2721
	} else {
2722
		tsk = current;
2723
	}
2724 2725

	if (threadgroup)
2726
		tsk = tsk->group_leader;
2727 2728

	/*
2729
	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2730 2731 2732
	 * trapped in a cpuset, or RT worker may be born in a cgroup
	 * with no rt_runtime allocated.  Just say no.
	 */
2733
	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2734
		ret = -EINVAL;
T
Tejun Heo 已提交
2735
		goto out_unlock_rcu;
2736 2737
	}

2738 2739 2740
	get_task_struct(tsk);
	rcu_read_unlock();

2741
	ret = cgroup_procs_write_permission(tsk, cgrp, of);
2742 2743
	if (!ret)
		ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2744

2745
	put_task_struct(tsk);
T
Tejun Heo 已提交
2746 2747 2748 2749 2750 2751
	goto out_unlock_threadgroup;

out_unlock_rcu:
	rcu_read_unlock();
out_unlock_threadgroup:
	percpu_up_write(&cgroup_threadgroup_rwsem);
2752
	cgroup_kn_unlock(of->kn);
2753
	cpuset_post_attach_flush();
2754
	return ret ?: nbytes;
2755 2756
}

2757 2758 2759 2760 2761 2762 2763
/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
2764
	struct cgroup_root *root;
2765 2766
	int retval = 0;

T
Tejun Heo 已提交
2767
	mutex_lock(&cgroup_mutex);
2768
	for_each_root(root) {
2769 2770
		struct cgroup *from_cgrp;

2771
		if (root == &cgrp_dfl_root)
2772 2773
			continue;

2774
		spin_lock_bh(&css_set_lock);
2775
		from_cgrp = task_cgroup_from_root(from, root);
2776
		spin_unlock_bh(&css_set_lock);
2777

L
Li Zefan 已提交
2778
		retval = cgroup_attach_task(from_cgrp, tsk, false);
2779 2780 2781
		if (retval)
			break;
	}
T
Tejun Heo 已提交
2782
	mutex_unlock(&cgroup_mutex);
2783 2784 2785 2786 2787

	return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);

2788 2789
static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
B
Ben Blum 已提交
2790
{
2791
	return __cgroup_procs_write(of, buf, nbytes, off, false);
B
Ben Blum 已提交
2792 2793
}

2794 2795
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
2796
{
2797
	return __cgroup_procs_write(of, buf, nbytes, off, true);
2798 2799
}

2800 2801
static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
					  char *buf, size_t nbytes, loff_t off)
2802
{
2803
	struct cgroup *cgrp;
2804

2805
	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
2806

2807 2808
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
2809
		return -ENODEV;
2810
	spin_lock(&release_agent_path_lock);
2811 2812
	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
		sizeof(cgrp->root->release_agent_path));
2813
	spin_unlock(&release_agent_path_lock);
2814
	cgroup_kn_unlock(of->kn);
2815
	return nbytes;
2816 2817
}

2818
static int cgroup_release_agent_show(struct seq_file *seq, void *v)
2819
{
2820
	struct cgroup *cgrp = seq_css(seq)->cgroup;
2821

2822
	spin_lock(&release_agent_path_lock);
2823
	seq_puts(seq, cgrp->root->release_agent_path);
2824
	spin_unlock(&release_agent_path_lock);
2825 2826 2827 2828
	seq_putc(seq, '\n');
	return 0;
}

2829
static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
2830
{
2831
	seq_puts(seq, "0\n");
2832 2833 2834
	return 0;
}

2835
static void cgroup_print_ss_mask(struct seq_file *seq, unsigned long ss_mask)
2836
{
2837 2838 2839
	struct cgroup_subsys *ss;
	bool printed = false;
	int ssid;
2840

2841 2842 2843 2844 2845
	for_each_subsys_which(ss, ssid, &ss_mask) {
		if (printed)
			seq_putc(seq, ' ');
		seq_printf(seq, "%s", ss->name);
		printed = true;
2846
	}
2847 2848
	if (printed)
		seq_putc(seq, '\n');
2849 2850
}

2851 2852
/* show controllers which are currently attached to the default hierarchy */
static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
2853
{
2854 2855
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2856 2857
	cgroup_print_ss_mask(seq, cgrp->root->subsys_mask &
			     ~cgrp_dfl_root_inhibit_ss_mask);
2858
	return 0;
2859 2860
}

2861 2862
/* show controllers which are enabled from the parent */
static int cgroup_controllers_show(struct seq_file *seq, void *v)
2863
{
2864 2865
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2866
	cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
2867
	return 0;
2868 2869
}

2870 2871
/* show controllers which are enabled for a given cgroup's children */
static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2872
{
2873 2874
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2875
	cgroup_print_ss_mask(seq, cgrp->subtree_control);
2876 2877 2878 2879 2880 2881 2882
	return 0;
}

/**
 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
 * @cgrp: root of the subtree to update csses for
 *
2883
 * @cgrp's subtree_ss_mask has changed and its subtree's (self excluded)
2884 2885 2886 2887 2888 2889 2890
 * css associations need to be updated accordingly.  This function looks up
 * all css_sets which are attached to the subtree, creates the matching
 * updated css_sets and migrates the tasks to the new ones.
 */
static int cgroup_update_dfl_csses(struct cgroup *cgrp)
{
	LIST_HEAD(preloaded_csets);
2891
	struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
2892 2893 2894 2895 2896 2897
	struct cgroup_subsys_state *css;
	struct css_set *src_cset;
	int ret;

	lockdep_assert_held(&cgroup_mutex);

T
Tejun Heo 已提交
2898 2899
	percpu_down_write(&cgroup_threadgroup_rwsem);

2900
	/* look up all csses currently attached to @cgrp's subtree */
2901
	spin_lock_bh(&css_set_lock);
2902 2903 2904
	css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
		struct cgrp_cset_link *link;

2905
		/* self is not affected by subtree_ss_mask change */
2906 2907 2908 2909 2910 2911 2912
		if (css->cgroup == cgrp)
			continue;

		list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
			cgroup_migrate_add_src(link->cset, cgrp,
					       &preloaded_csets);
	}
2913
	spin_unlock_bh(&css_set_lock);
2914 2915 2916 2917 2918 2919

	/* NULL dst indicates self on default hierarchy */
	ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
	if (ret)
		goto out_finish;

2920
	spin_lock_bh(&css_set_lock);
2921
	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
2922
		struct task_struct *task, *ntask;
2923 2924 2925 2926 2927

		/* src_csets precede dst_csets, break on the first dst_cset */
		if (!src_cset->mg_src_cgrp)
			break;

2928 2929 2930
		/* all tasks in src_csets need to be migrated */
		list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
			cgroup_taskset_add(task, &tset);
2931
	}
2932
	spin_unlock_bh(&css_set_lock);
2933

2934
	ret = cgroup_taskset_migrate(&tset, cgrp);
2935 2936
out_finish:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
2937
	percpu_up_write(&cgroup_threadgroup_rwsem);
2938 2939 2940 2941
	return ret;
}

/* change the enabled child controllers for a cgroup in the default hierarchy */
2942 2943 2944
static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
2945
{
2946 2947
	unsigned long enable = 0, disable = 0;
	unsigned long css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
2948
	struct cgroup *cgrp, *child;
2949
	struct cgroup_subsys *ss;
2950
	char *tok;
2951 2952 2953
	int ssid, ret;

	/*
2954 2955
	 * Parse input - space separated list of subsystem names prefixed
	 * with either + or -.
2956
	 */
2957 2958
	buf = strstrip(buf);
	while ((tok = strsep(&buf, " "))) {
2959 2960
		unsigned long tmp_ss_mask = ~cgrp_dfl_root_inhibit_ss_mask;

2961 2962
		if (tok[0] == '\0')
			continue;
2963
		for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
2964 2965
			if (!cgroup_ssid_enabled(ssid) ||
			    strcmp(tok + 1, ss->name))
2966 2967 2968
				continue;

			if (*tok == '+') {
2969 2970
				enable |= 1 << ssid;
				disable &= ~(1 << ssid);
2971
			} else if (*tok == '-') {
2972 2973
				disable |= 1 << ssid;
				enable &= ~(1 << ssid);
2974 2975 2976 2977 2978 2979 2980 2981 2982
			} else {
				return -EINVAL;
			}
			break;
		}
		if (ssid == CGROUP_SUBSYS_COUNT)
			return -EINVAL;
	}

2983 2984 2985
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
		return -ENODEV;
2986 2987 2988

	for_each_subsys(ss, ssid) {
		if (enable & (1 << ssid)) {
2989
			if (cgrp->subtree_control & (1 << ssid)) {
2990 2991 2992 2993
				enable &= ~(1 << ssid);
				continue;
			}

2994 2995 2996
			/* unavailable or not enabled on the parent? */
			if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
			    (cgroup_parent(cgrp) &&
2997
			     !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
2998 2999 3000
				ret = -ENOENT;
				goto out_unlock;
			}
3001
		} else if (disable & (1 << ssid)) {
3002
			if (!(cgrp->subtree_control & (1 << ssid))) {
3003 3004 3005 3006 3007 3008
				disable &= ~(1 << ssid);
				continue;
			}

			/* a child has it enabled? */
			cgroup_for_each_live_child(child, cgrp) {
3009
				if (child->subtree_control & (1 << ssid)) {
3010
					ret = -EBUSY;
3011
					goto out_unlock;
3012 3013 3014 3015 3016 3017 3018
				}
			}
		}
	}

	if (!enable && !disable) {
		ret = 0;
3019
		goto out_unlock;
3020 3021 3022
	}

	/*
3023
	 * Except for the root, subtree_control must be zero for a cgroup
3024 3025
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
3026
	if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
3027 3028 3029 3030 3031
		ret = -EBUSY;
		goto out_unlock;
	}

	/*
3032 3033 3034 3035
	 * Update subsys masks and calculate what needs to be done.  More
	 * subsystems than specified may need to be enabled or disabled
	 * depending on subsystem dependencies.
	 */
3036
	old_sc = cgrp->subtree_control;
3037
	old_ss = cgrp->subtree_ss_mask;
3038
	new_sc = (old_sc | enable) & ~disable;
3039
	new_ss = cgroup_calc_subtree_ss_mask(cgrp, new_sc);
3040

3041 3042
	css_enable = ~old_ss & new_ss;
	css_disable = old_ss & ~new_ss;
3043 3044
	enable |= css_enable;
	disable |= css_disable;
3045

3046 3047 3048 3049 3050 3051
	/*
	 * Because css offlining is asynchronous, userland might try to
	 * re-enable the same controller while the previous instance is
	 * still around.  In such cases, wait till it's gone using
	 * offline_waitq.
	 */
3052
	for_each_subsys_which(ss, ssid, &css_enable) {
3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070
		cgroup_for_each_live_child(child, cgrp) {
			DEFINE_WAIT(wait);

			if (!cgroup_css(child, ss))
				continue;

			cgroup_get(child);
			prepare_to_wait(&child->offline_waitq, &wait,
					TASK_UNINTERRUPTIBLE);
			cgroup_kn_unlock(of->kn);
			schedule();
			finish_wait(&child->offline_waitq, &wait);
			cgroup_put(child);

			return restart_syscall();
		}
	}

3071
	cgrp->subtree_control = new_sc;
3072
	cgrp->subtree_ss_mask = new_ss;
3073

3074 3075 3076 3077 3078
	/*
	 * Create new csses or make the existing ones visible.  A css is
	 * created invisible if it's being implicitly enabled through
	 * dependency.  An invisible css is made visible when the userland
	 * explicitly enables it.
3079 3080 3081 3082 3083 3084
	 */
	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
3085 3086 3087 3088
			if (css_enable & (1 << ssid))
				ret = create_css(child, ss,
					cgrp->subtree_control & (1 << ssid));
			else
3089 3090
				ret = css_populate_dir(cgroup_css(child, ss),
						       NULL);
3091 3092 3093 3094 3095
			if (ret)
				goto err_undo_css;
		}
	}

3096 3097 3098 3099 3100
	/*
	 * At this point, cgroup_e_css() results reflect the new csses
	 * making the following cgroup_update_dfl_csses() properly update
	 * css associations of all tasks in the subtree.
	 */
3101 3102 3103 3104
	ret = cgroup_update_dfl_csses(cgrp);
	if (ret)
		goto err_undo_css;

3105 3106 3107
	/*
	 * All tasks are migrated out of disabled csses.  Kill or hide
	 * them.  A css is hidden when the userland requests it to be
3108 3109 3110 3111
	 * disabled while other subsystems are still depending on it.  The
	 * css must not actively control resources and be in the vanilla
	 * state if it's made visible again later.  Controllers which may
	 * be depended upon should provide ->css_reset() for this purpose.
3112
	 */
3113 3114 3115 3116
	for_each_subsys(ss, ssid) {
		if (!(disable & (1 << ssid)))
			continue;

3117
		cgroup_for_each_live_child(child, cgrp) {
3118 3119 3120 3121 3122
			struct cgroup_subsys_state *css = cgroup_css(child, ss);

			if (css_disable & (1 << ssid)) {
				kill_css(css);
			} else {
3123
				css_clear_dir(css, NULL);
3124 3125 3126
				if (ss->css_reset)
					ss->css_reset(css);
			}
3127
		}
3128 3129 3130 3131 3132
	}

	kernfs_activate(cgrp->kn);
	ret = 0;
out_unlock:
3133
	cgroup_kn_unlock(of->kn);
3134
	return ret ?: nbytes;
3135 3136

err_undo_css:
3137
	cgrp->subtree_control = old_sc;
3138
	cgrp->subtree_ss_mask = old_ss;
3139 3140 3141 3142 3143 3144 3145

	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
			struct cgroup_subsys_state *css = cgroup_css(child, ss);
3146 3147 3148 3149 3150

			if (!css)
				continue;

			if (css_enable & (1 << ssid))
3151
				kill_css(css);
3152
			else
3153
				css_clear_dir(css, NULL);
3154 3155 3156 3157 3158
		}
	}
	goto out_unlock;
}

3159
static int cgroup_events_show(struct seq_file *seq, void *v)
3160
{
3161
	seq_printf(seq, "populated %d\n",
3162
		   cgroup_is_populated(seq_css(seq)->cgroup));
3163 3164 3165
	return 0;
}

T
Tejun Heo 已提交
3166 3167
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
				 size_t nbytes, loff_t off)
3168
{
T
Tejun Heo 已提交
3169 3170 3171
	struct cgroup *cgrp = of->kn->parent->priv;
	struct cftype *cft = of->kn->priv;
	struct cgroup_subsys_state *css;
3172
	int ret;
3173

T
Tejun Heo 已提交
3174 3175 3176
	if (cft->write)
		return cft->write(of, buf, nbytes, off);

T
Tejun Heo 已提交
3177 3178 3179 3180 3181 3182 3183 3184 3185
	/*
	 * kernfs guarantees that a file isn't deleted with operations in
	 * flight, which means that the matching css is and stays alive and
	 * doesn't need to be pinned.  The RCU locking is not necessary
	 * either.  It's just for the convenience of using cgroup_css().
	 */
	rcu_read_lock();
	css = cgroup_css(cgrp, cft->ss);
	rcu_read_unlock();
3186

3187
	if (cft->write_u64) {
3188 3189 3190 3191 3192 3193 3194 3195 3196
		unsigned long long v;
		ret = kstrtoull(buf, 0, &v);
		if (!ret)
			ret = cft->write_u64(css, cft, v);
	} else if (cft->write_s64) {
		long long v;
		ret = kstrtoll(buf, 0, &v);
		if (!ret)
			ret = cft->write_s64(css, cft, v);
3197
	} else {
3198
		ret = -EINVAL;
3199
	}
T
Tejun Heo 已提交
3200

3201
	return ret ?: nbytes;
3202 3203
}

3204
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
3205
{
T
Tejun Heo 已提交
3206
	return seq_cft(seq)->seq_start(seq, ppos);
3207 3208
}

3209
static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
3210
{
T
Tejun Heo 已提交
3211
	return seq_cft(seq)->seq_next(seq, v, ppos);
3212 3213
}

3214
static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
3215
{
T
Tejun Heo 已提交
3216
	seq_cft(seq)->seq_stop(seq, v);
3217 3218
}

3219
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
3220
{
3221 3222
	struct cftype *cft = seq_cft(m);
	struct cgroup_subsys_state *css = seq_css(m);
3223

3224 3225
	if (cft->seq_show)
		return cft->seq_show(m, arg);
3226

3227
	if (cft->read_u64)
3228 3229 3230 3231 3232 3233
		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
	else if (cft->read_s64)
		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
	else
		return -EINVAL;
	return 0;
3234 3235
}

T
Tejun Heo 已提交
3236 3237 3238 3239
static struct kernfs_ops cgroup_kf_single_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_show		= cgroup_seqfile_show,
3240 3241
};

T
Tejun Heo 已提交
3242 3243 3244 3245 3246 3247 3248 3249
static struct kernfs_ops cgroup_kf_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_start		= cgroup_seqfile_start,
	.seq_next		= cgroup_seqfile_next,
	.seq_stop		= cgroup_seqfile_stop,
	.seq_show		= cgroup_seqfile_show,
};
3250 3251 3252 3253

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
T
Tejun Heo 已提交
3254 3255
static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
			 const char *new_name_str)
3256
{
T
Tejun Heo 已提交
3257
	struct cgroup *cgrp = kn->priv;
3258 3259
	int ret;

T
Tejun Heo 已提交
3260
	if (kernfs_type(kn) != KERNFS_DIR)
3261
		return -ENOTDIR;
T
Tejun Heo 已提交
3262
	if (kn->parent != new_parent)
3263
		return -EIO;
3264

3265 3266
	/*
	 * This isn't a proper migration and its usefulness is very
3267
	 * limited.  Disallow on the default hierarchy.
3268
	 */
3269
	if (cgroup_on_dfl(cgrp))
3270
		return -EPERM;
L
Li Zefan 已提交
3271

3272
	/*
T
Tejun Heo 已提交
3273
	 * We're gonna grab cgroup_mutex which nests outside kernfs
3274
	 * active_ref.  kernfs_rename() doesn't require active_ref
T
Tejun Heo 已提交
3275
	 * protection.  Break them before grabbing cgroup_mutex.
3276 3277 3278
	 */
	kernfs_break_active_protection(new_parent);
	kernfs_break_active_protection(kn);
L
Li Zefan 已提交
3279

T
Tejun Heo 已提交
3280
	mutex_lock(&cgroup_mutex);
L
Li Zefan 已提交
3281

T
Tejun Heo 已提交
3282
	ret = kernfs_rename(kn, new_parent, new_name_str);
L
Li Zefan 已提交
3283

T
Tejun Heo 已提交
3284
	mutex_unlock(&cgroup_mutex);
3285 3286 3287

	kernfs_unbreak_active_protection(kn);
	kernfs_unbreak_active_protection(new_parent);
T
Tejun Heo 已提交
3288
	return ret;
L
Li Zefan 已提交
3289 3290
}

3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304
/* set uid and gid of cgroup dirs and files to that of the creator */
static int cgroup_kn_set_ugid(struct kernfs_node *kn)
{
	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
			       .ia_uid = current_fsuid(),
			       .ia_gid = current_fsgid(), };

	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
		return 0;

	return kernfs_setattr(kn, &iattr);
}

3305 3306
static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
			   struct cftype *cft)
3307
{
T
Tejun Heo 已提交
3308
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
3309 3310
	struct kernfs_node *kn;
	struct lock_class_key *key = NULL;
3311
	int ret;
T
Tejun Heo 已提交
3312

T
Tejun Heo 已提交
3313 3314 3315 3316 3317
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	key = &cft->lockdep_key;
#endif
	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
T
Tejun Heo 已提交
3318
				  NULL, key);
3319 3320 3321 3322
	if (IS_ERR(kn))
		return PTR_ERR(kn);

	ret = cgroup_kn_set_ugid(kn);
3323
	if (ret) {
3324
		kernfs_remove(kn);
3325 3326 3327
		return ret;
	}

3328 3329 3330
	if (cft->file_offset) {
		struct cgroup_file *cfile = (void *)css + cft->file_offset;

3331
		spin_lock_irq(&cgroup_file_kn_lock);
3332
		cfile->kn = kn;
3333
		spin_unlock_irq(&cgroup_file_kn_lock);
3334 3335
	}

3336
	return 0;
3337 3338
}

3339 3340
/**
 * cgroup_addrm_files - add or remove files to a cgroup directory
3341 3342
 * @css: the target css
 * @cgrp: the target cgroup (usually css->cgroup)
3343 3344 3345 3346
 * @cfts: array of cftypes to be added
 * @is_add: whether to add or remove
 *
 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3347
 * For removals, this function never fails.
3348
 */
3349 3350
static int cgroup_addrm_files(struct cgroup_subsys_state *css,
			      struct cgroup *cgrp, struct cftype cfts[],
3351
			      bool is_add)
3352
{
3353
	struct cftype *cft, *cft_end = NULL;
3354
	int ret = 0;
3355

3356
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
3357

3358 3359
restart:
	for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
3360
		/* does cft->flags tell us to skip this file on @cgrp? */
3361
		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
T
Tejun Heo 已提交
3362
			continue;
3363
		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3364
			continue;
T
Tejun Heo 已提交
3365
		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3366
			continue;
T
Tejun Heo 已提交
3367
		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3368 3369
			continue;

3370
		if (is_add) {
3371
			ret = cgroup_add_file(css, cgrp, cft);
3372
			if (ret) {
3373 3374
				pr_warn("%s: failed to add %s, err=%d\n",
					__func__, cft->name, ret);
3375 3376 3377
				cft_end = cft;
				is_add = false;
				goto restart;
3378
			}
3379 3380
		} else {
			cgroup_rm_file(cgrp, cft);
T
Tejun Heo 已提交
3381
		}
3382
	}
3383
	return ret;
3384 3385
}

3386
static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3387 3388
{
	LIST_HEAD(pending);
3389
	struct cgroup_subsys *ss = cfts[0].ss;
3390
	struct cgroup *root = &ss->root->cgrp;
3391
	struct cgroup_subsys_state *css;
3392
	int ret = 0;
3393

3394
	lockdep_assert_held(&cgroup_mutex);
3395 3396

	/* add/rm files for all cgroups created before */
3397
	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3398 3399
		struct cgroup *cgrp = css->cgroup;

3400 3401 3402
		if (cgroup_is_dead(cgrp))
			continue;

3403
		ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
3404 3405
		if (ret)
			break;
3406
	}
3407 3408 3409

	if (is_add && !ret)
		kernfs_activate(root->kn);
3410
	return ret;
3411 3412
}

3413
static void cgroup_exit_cftypes(struct cftype *cfts)
3414
{
3415
	struct cftype *cft;
3416

T
Tejun Heo 已提交
3417 3418 3419 3420 3421
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		/* free copy for custom atomic_write_len, see init_cftypes() */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
			kfree(cft->kf_ops);
		cft->kf_ops = NULL;
3422
		cft->ss = NULL;
3423 3424

		/* revert flags set by cgroup core while adding @cfts */
3425
		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
T
Tejun Heo 已提交
3426
	}
3427 3428
}

T
Tejun Heo 已提交
3429
static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3430 3431 3432
{
	struct cftype *cft;

T
Tejun Heo 已提交
3433 3434 3435
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		struct kernfs_ops *kf_ops;

T
Tejun Heo 已提交
3436 3437
		WARN_ON(cft->ss || cft->kf_ops);

T
Tejun Heo 已提交
3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454
		if (cft->seq_start)
			kf_ops = &cgroup_kf_ops;
		else
			kf_ops = &cgroup_kf_single_ops;

		/*
		 * Ugh... if @cft wants a custom max_write_len, we need to
		 * make a copy of kf_ops to set its atomic_write_len.
		 */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
			if (!kf_ops) {
				cgroup_exit_cftypes(cfts);
				return -ENOMEM;
			}
			kf_ops->atomic_write_len = cft->max_write_len;
		}
3455

T
Tejun Heo 已提交
3456
		cft->kf_ops = kf_ops;
3457
		cft->ss = ss;
T
Tejun Heo 已提交
3458
	}
3459

T
Tejun Heo 已提交
3460
	return 0;
3461 3462
}

3463 3464
static int cgroup_rm_cftypes_locked(struct cftype *cfts)
{
3465
	lockdep_assert_held(&cgroup_mutex);
3466 3467 3468 3469 3470 3471 3472 3473

	if (!cfts || !cfts[0].ss)
		return -ENOENT;

	list_del(&cfts->node);
	cgroup_apply_cftypes(cfts, false);
	cgroup_exit_cftypes(cfts);
	return 0;
3474 3475
}

3476 3477 3478 3479
/**
 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
3480 3481 3482
 * Unregister @cfts.  Files described by @cfts are removed from all
 * existing cgroups and all future cgroups won't have them either.  This
 * function can be called anytime whether @cfts' subsys is attached or not.
3483 3484
 *
 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3485
 * registered.
3486
 */
3487
int cgroup_rm_cftypes(struct cftype *cfts)
3488
{
3489
	int ret;
3490

3491
	mutex_lock(&cgroup_mutex);
3492
	ret = cgroup_rm_cftypes_locked(cfts);
3493
	mutex_unlock(&cgroup_mutex);
3494
	return ret;
T
Tejun Heo 已提交
3495 3496
}

3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510
/**
 * cgroup_add_cftypes - add an array of cftypes to a subsystem
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Register @cfts to @ss.  Files described by @cfts are created for all
 * existing cgroups to which @ss is attached and all future cgroups will
 * have them too.  This function can be called anytime whether @ss is
 * attached or not.
 *
 * Returns 0 on successful registration, -errno on failure.  Note that this
 * function currently returns 0 as long as @cfts registration is successful
 * even if some file creation attempts on existing cgroups fail.
 */
3511
static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3512
{
3513
	int ret;
3514

3515
	if (!cgroup_ssid_enabled(ss->id))
3516 3517
		return 0;

3518 3519
	if (!cfts || cfts[0].name[0] == '\0')
		return 0;
3520

T
Tejun Heo 已提交
3521 3522 3523
	ret = cgroup_init_cftypes(ss, cfts);
	if (ret)
		return ret;
3524

3525
	mutex_lock(&cgroup_mutex);
3526

T
Tejun Heo 已提交
3527
	list_add_tail(&cfts->node, &ss->cfts);
3528
	ret = cgroup_apply_cftypes(cfts, true);
3529
	if (ret)
3530
		cgroup_rm_cftypes_locked(cfts);
3531

3532
	mutex_unlock(&cgroup_mutex);
3533
	return ret;
3534 3535
}

3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548
/**
 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the default hierarchy.
 */
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
	struct cftype *cft;

	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3549
		cft->flags |= __CFTYPE_ONLY_ON_DFL;
3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560
	return cgroup_add_cftypes(ss, cfts);
}

/**
 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the legacy hierarchies.
 */
3561 3562
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
3563 3564
	struct cftype *cft;

3565 3566
	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
		cft->flags |= __CFTYPE_NOT_ON_DFL;
3567 3568 3569
	return cgroup_add_cftypes(ss, cfts);
}

3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585
/**
 * cgroup_file_notify - generate a file modified event for a cgroup_file
 * @cfile: target cgroup_file
 *
 * @cfile must have been obtained by setting cftype->file_offset.
 */
void cgroup_file_notify(struct cgroup_file *cfile)
{
	unsigned long flags;

	spin_lock_irqsave(&cgroup_file_kn_lock, flags);
	if (cfile->kn)
		kernfs_notify(cfile->kn);
	spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
}

L
Li Zefan 已提交
3586 3587 3588 3589 3590 3591
/**
 * cgroup_task_count - count the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 *
 * Return the number of tasks in the cgroup.
 */
3592
static int cgroup_task_count(const struct cgroup *cgrp)
3593 3594
{
	int count = 0;
3595
	struct cgrp_cset_link *link;
3596

3597
	spin_lock_bh(&css_set_lock);
3598 3599
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
3600
	spin_unlock_bh(&css_set_lock);
3601 3602 3603
	return count;
}

3604
/**
3605
 * css_next_child - find the next child of a given css
3606 3607
 * @pos: the current position (%NULL to initiate traversal)
 * @parent: css whose children to walk
3608
 *
3609
 * This function returns the next child of @parent and should be called
3610
 * under either cgroup_mutex or RCU read lock.  The only requirement is
3611 3612 3613 3614 3615 3616 3617 3618 3619
 * that @parent and @pos are accessible.  The next sibling is guaranteed to
 * be returned regardless of their states.
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3620
 */
3621 3622
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
					   struct cgroup_subsys_state *parent)
3623
{
3624
	struct cgroup_subsys_state *next;
3625

T
Tejun Heo 已提交
3626
	cgroup_assert_mutex_or_rcu_locked();
3627 3628

	/*
3629 3630 3631 3632 3633 3634 3635 3636 3637 3638
	 * @pos could already have been unlinked from the sibling list.
	 * Once a cgroup is removed, its ->sibling.next is no longer
	 * updated when its next sibling changes.  CSS_RELEASED is set when
	 * @pos is taken off list, at which time its next pointer is valid,
	 * and, as releases are serialized, the one pointed to by the next
	 * pointer is guaranteed to not have started release yet.  This
	 * implies that if we observe !CSS_RELEASED on @pos in this RCU
	 * critical section, the one pointed to by its next pointer is
	 * guaranteed to not have finished its RCU grace period even if we
	 * have dropped rcu_read_lock() inbetween iterations.
3639
	 *
3640 3641 3642 3643 3644 3645 3646
	 * If @pos has CSS_RELEASED set, its next pointer can't be
	 * dereferenced; however, as each css is given a monotonically
	 * increasing unique serial number and always appended to the
	 * sibling list, the next one can be found by walking the parent's
	 * children until the first css with higher serial number than
	 * @pos's.  While this path can be slower, it happens iff iteration
	 * races against release and the race window is very small.
3647
	 */
3648
	if (!pos) {
3649 3650 3651
		next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
	} else if (likely(!(pos->flags & CSS_RELEASED))) {
		next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3652
	} else {
3653
		list_for_each_entry_rcu(next, &parent->children, sibling)
3654 3655
			if (next->serial_nr > pos->serial_nr)
				break;
3656 3657
	}

3658 3659
	/*
	 * @next, if not pointing to the head, can be dereferenced and is
3660
	 * the next sibling.
3661
	 */
3662 3663
	if (&next->sibling != &parent->children)
		return next;
3664
	return NULL;
3665 3666
}

3667
/**
3668
 * css_next_descendant_pre - find the next descendant for pre-order walk
3669
 * @pos: the current position (%NULL to initiate traversal)
3670
 * @root: css whose descendants to walk
3671
 *
3672
 * To be used by css_for_each_descendant_pre().  Find the next descendant
3673 3674
 * to visit for pre-order traversal of @root's descendants.  @root is
 * included in the iteration and the first node to be visited.
3675
 *
3676 3677 3678 3679
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @root are accessible and @pos is a descendant of @root.
3680 3681 3682 3683 3684 3685 3686
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3687
 */
3688 3689 3690
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
			struct cgroup_subsys_state *root)
3691
{
3692
	struct cgroup_subsys_state *next;
3693

T
Tejun Heo 已提交
3694
	cgroup_assert_mutex_or_rcu_locked();
3695

3696
	/* if first iteration, visit @root */
3697
	if (!pos)
3698
		return root;
3699 3700

	/* visit the first child if exists */
3701
	next = css_next_child(NULL, pos);
3702 3703 3704 3705
	if (next)
		return next;

	/* no child, visit my or the closest ancestor's next sibling */
3706
	while (pos != root) {
T
Tejun Heo 已提交
3707
		next = css_next_child(pos, pos->parent);
3708
		if (next)
3709
			return next;
T
Tejun Heo 已提交
3710
		pos = pos->parent;
3711
	}
3712 3713 3714 3715

	return NULL;
}

3716
/**
3717 3718
 * css_rightmost_descendant - return the rightmost descendant of a css
 * @pos: css of interest
3719
 *
3720 3721
 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
 * is returned.  This can be used during pre-order traversal to skip
3722
 * subtree of @pos.
3723
 *
3724 3725 3726 3727
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct rightmost descendant as
 * long as @pos is accessible.
3728
 */
3729 3730
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
3731
{
3732
	struct cgroup_subsys_state *last, *tmp;
3733

T
Tejun Heo 已提交
3734
	cgroup_assert_mutex_or_rcu_locked();
3735 3736 3737 3738 3739

	do {
		last = pos;
		/* ->prev isn't RCU safe, walk ->next till the end */
		pos = NULL;
3740
		css_for_each_child(tmp, last)
3741 3742 3743 3744 3745 3746
			pos = tmp;
	} while (pos);

	return last;
}

3747 3748
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
3749
{
3750
	struct cgroup_subsys_state *last;
3751 3752 3753

	do {
		last = pos;
3754
		pos = css_next_child(NULL, pos);
3755 3756 3757 3758 3759 3760
	} while (pos);

	return last;
}

/**
3761
 * css_next_descendant_post - find the next descendant for post-order walk
3762
 * @pos: the current position (%NULL to initiate traversal)
3763
 * @root: css whose descendants to walk
3764
 *
3765
 * To be used by css_for_each_descendant_post().  Find the next descendant
3766 3767
 * to visit for post-order traversal of @root's descendants.  @root is
 * included in the iteration and the last node to be visited.
3768
 *
3769 3770 3771 3772 3773
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @cgroup are accessible and @pos is a descendant of
 * @cgroup.
3774 3775 3776 3777 3778 3779 3780
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3781
 */
3782 3783 3784
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
			 struct cgroup_subsys_state *root)
3785
{
3786
	struct cgroup_subsys_state *next;
3787

T
Tejun Heo 已提交
3788
	cgroup_assert_mutex_or_rcu_locked();
3789

3790 3791 3792
	/* if first iteration, visit leftmost descendant which may be @root */
	if (!pos)
		return css_leftmost_descendant(root);
3793

3794 3795 3796 3797
	/* if we visited @root, we're done */
	if (pos == root)
		return NULL;

3798
	/* if there's an unvisited sibling, visit its leftmost descendant */
T
Tejun Heo 已提交
3799
	next = css_next_child(pos, pos->parent);
3800
	if (next)
3801
		return css_leftmost_descendant(next);
3802 3803

	/* no sibling left, visit parent */
T
Tejun Heo 已提交
3804
	return pos->parent;
3805 3806
}

3807 3808 3809 3810 3811 3812 3813 3814 3815
/**
 * css_has_online_children - does a css have online children
 * @css: the target css
 *
 * Returns %true if @css has any online children; otherwise, %false.  This
 * function can be called from any context but the caller is responsible
 * for synchronizing against on/offlining as necessary.
 */
bool css_has_online_children(struct cgroup_subsys_state *css)
3816
{
3817 3818
	struct cgroup_subsys_state *child;
	bool ret = false;
3819 3820

	rcu_read_lock();
3821
	css_for_each_child(child, css) {
3822
		if (child->flags & CSS_ONLINE) {
3823 3824
			ret = true;
			break;
3825 3826 3827
		}
	}
	rcu_read_unlock();
3828
	return ret;
3829 3830
}

3831
/**
3832
 * css_task_iter_advance_css_set - advance a task itererator to the next css_set
3833 3834 3835
 * @it: the iterator to advance
 *
 * Advance @it to the next css_set to walk.
3836
 */
3837
static void css_task_iter_advance_css_set(struct css_task_iter *it)
3838
{
T
Tejun Heo 已提交
3839
	struct list_head *l = it->cset_pos;
3840 3841 3842
	struct cgrp_cset_link *link;
	struct css_set *cset;

3843
	lockdep_assert_held(&css_set_lock);
3844

3845 3846 3847
	/* Advance to the next non-empty css_set */
	do {
		l = l->next;
T
Tejun Heo 已提交
3848 3849
		if (l == it->cset_head) {
			it->cset_pos = NULL;
3850
			it->task_pos = NULL;
3851 3852
			return;
		}
3853 3854 3855 3856 3857 3858 3859 3860

		if (it->ss) {
			cset = container_of(l, struct css_set,
					    e_cset_node[it->ss->id]);
		} else {
			link = list_entry(l, struct cgrp_cset_link, cset_link);
			cset = link->cset;
		}
3861
	} while (!css_set_populated(cset));
T
Tejun Heo 已提交
3862

T
Tejun Heo 已提交
3863
	it->cset_pos = l;
T
Tejun Heo 已提交
3864 3865

	if (!list_empty(&cset->tasks))
T
Tejun Heo 已提交
3866
		it->task_pos = cset->tasks.next;
T
Tejun Heo 已提交
3867
	else
T
Tejun Heo 已提交
3868 3869 3870 3871
		it->task_pos = cset->mg_tasks.next;

	it->tasks_head = &cset->tasks;
	it->mg_tasks_head = &cset->mg_tasks;
3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894

	/*
	 * We don't keep css_sets locked across iteration steps and thus
	 * need to take steps to ensure that iteration can be resumed after
	 * the lock is re-acquired.  Iteration is performed at two levels -
	 * css_sets and tasks in them.
	 *
	 * Once created, a css_set never leaves its cgroup lists, so a
	 * pinned css_set is guaranteed to stay put and we can resume
	 * iteration afterwards.
	 *
	 * Tasks may leave @cset across iteration steps.  This is resolved
	 * by registering each iterator with the css_set currently being
	 * walked and making css_set_move_task() advance iterators whose
	 * next task is leaving.
	 */
	if (it->cur_cset) {
		list_del(&it->iters_node);
		put_css_set_locked(it->cur_cset);
	}
	get_css_set(cset);
	it->cur_cset = cset;
	list_add(&it->iters_node, &cset->task_iters);
3895 3896
}

3897 3898 3899 3900
static void css_task_iter_advance(struct css_task_iter *it)
{
	struct list_head *l = it->task_pos;

3901
	lockdep_assert_held(&css_set_lock);
3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919
	WARN_ON_ONCE(!l);

	/*
	 * Advance iterator to find next entry.  cset->tasks is consumed
	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
	 * next cset.
	 */
	l = l->next;

	if (l == it->tasks_head)
		l = it->mg_tasks_head->next;

	if (l == it->mg_tasks_head)
		css_task_iter_advance_css_set(it);
	else
		it->task_pos = l;
}

3920
/**
3921 3922
 * css_task_iter_start - initiate task iteration
 * @css: the css to walk tasks of
3923 3924
 * @it: the task iterator to use
 *
3925 3926 3927 3928
 * Initiate iteration through the tasks of @css.  The caller can call
 * css_task_iter_next() to walk through the tasks until the function
 * returns NULL.  On completion of iteration, css_task_iter_end() must be
 * called.
3929
 */
3930 3931
void css_task_iter_start(struct cgroup_subsys_state *css,
			 struct css_task_iter *it)
3932
{
3933 3934
	/* no one should try to iterate before mounting cgroups */
	WARN_ON_ONCE(!use_task_css_set_links);
3935

3936 3937
	memset(it, 0, sizeof(*it));

3938
	spin_lock_bh(&css_set_lock);
3939

3940 3941 3942 3943 3944 3945 3946
	it->ss = css->ss;

	if (it->ss)
		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
	else
		it->cset_pos = &css->cgroup->cset_links;

T
Tejun Heo 已提交
3947
	it->cset_head = it->cset_pos;
3948

3949
	css_task_iter_advance_css_set(it);
3950

3951
	spin_unlock_bh(&css_set_lock);
3952 3953
}

3954
/**
3955
 * css_task_iter_next - return the next task for the iterator
3956 3957 3958
 * @it: the task iterator being iterated
 *
 * The "next" function for task iteration.  @it should have been
3959 3960
 * initialized via css_task_iter_start().  Returns NULL when the iteration
 * reaches the end.
3961
 */
3962
struct task_struct *css_task_iter_next(struct css_task_iter *it)
3963
{
3964
	if (it->cur_task) {
3965
		put_task_struct(it->cur_task);
3966 3967
		it->cur_task = NULL;
	}
3968

3969
	spin_lock_bh(&css_set_lock);
3970

3971 3972 3973 3974 3975 3976
	if (it->task_pos) {
		it->cur_task = list_entry(it->task_pos, struct task_struct,
					  cg_list);
		get_task_struct(it->cur_task);
		css_task_iter_advance(it);
	}
3977

3978
	spin_unlock_bh(&css_set_lock);
3979 3980

	return it->cur_task;
3981 3982
}

3983
/**
3984
 * css_task_iter_end - finish task iteration
3985 3986
 * @it: the task iterator to finish
 *
3987
 * Finish task iteration started by css_task_iter_start().
3988
 */
3989
void css_task_iter_end(struct css_task_iter *it)
3990
{
3991
	if (it->cur_cset) {
3992
		spin_lock_bh(&css_set_lock);
3993 3994
		list_del(&it->iters_node);
		put_css_set_locked(it->cur_cset);
3995
		spin_unlock_bh(&css_set_lock);
3996 3997 3998 3999
	}

	if (it->cur_task)
		put_task_struct(it->cur_task);
4000 4001 4002
}

/**
4003 4004 4005
 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
4006
 *
4007 4008 4009 4010 4011
 * Locking rules between cgroup_post_fork() and the migration path
 * guarantee that, if a task is forking while being migrated, the new child
 * is guaranteed to be either visible in the source cgroup after the
 * parent's migration is complete or put into the target cgroup.  No task
 * can slip out of migration through forking.
4012
 */
4013
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
4014
{
4015 4016
	LIST_HEAD(preloaded_csets);
	struct cgrp_cset_link *link;
4017
	struct css_task_iter it;
4018
	struct task_struct *task;
4019
	int ret;
4020

4021
	mutex_lock(&cgroup_mutex);
4022

4023
	/* all tasks in @from are being moved, all csets are source */
4024
	spin_lock_bh(&css_set_lock);
4025 4026
	list_for_each_entry(link, &from->cset_links, cset_link)
		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
4027
	spin_unlock_bh(&css_set_lock);
4028

4029 4030 4031
	ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
	if (ret)
		goto out_err;
4032

4033
	/*
R
Rami Rosen 已提交
4034
	 * Migrate tasks one-by-one until @from is empty.  This fails iff
4035 4036
	 * ->can_attach() fails.
	 */
4037
	do {
4038
		css_task_iter_start(&from->self, &it);
4039 4040 4041 4042 4043 4044
		task = css_task_iter_next(&it);
		if (task)
			get_task_struct(task);
		css_task_iter_end(&it);

		if (task) {
4045
			ret = cgroup_migrate(task, false, to);
4046 4047 4048
			put_task_struct(task);
		}
	} while (task && !ret);
4049 4050
out_err:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
4051
	mutex_unlock(&cgroup_mutex);
4052
	return ret;
4053 4054
}

4055
/*
4056
 * Stuff for reading the 'tasks'/'procs' files.
4057 4058 4059 4060 4061 4062 4063 4064
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090
/* which pidlist file are we talking about? */
enum cgroup_filetype {
	CGROUP_FILE_PROCS,
	CGROUP_FILE_TASKS,
};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {
	/*
	 * used to find which pidlist is wanted. doesn't change as long as
	 * this particular list stays in the list.
	*/
	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
	/* array of xids */
	pid_t *list;
	/* how many elements the above list has */
	int length;
	/* each of these stored in a list by its cgroup */
	struct list_head links;
	/* pointer to the cgroup we belong to, for list removal purposes */
	struct cgroup *owner;
4091 4092
	/* for delayed destruction */
	struct delayed_work destroy_dwork;
4093 4094
};

4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107
/*
 * The following two functions "fix" the issue where there are more pids
 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 * TODO: replace with a kernel-wide solution to this problem
 */
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(count * sizeof(pid_t));
	else
		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
4108

4109 4110
static void pidlist_free(void *p)
{
4111
	kvfree(p);
4112 4113
}

4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140
/*
 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 * should be left afterwards.
 */
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
{
	struct cgroup_pidlist *l, *tmp_l;

	mutex_lock(&cgrp->pidlist_mutex);
	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
	mutex_unlock(&cgrp->pidlist_mutex);

	flush_workqueue(cgroup_pidlist_destroy_wq);
	BUG_ON(!list_empty(&cgrp->pidlists));
}

static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
						destroy_dwork);
	struct cgroup_pidlist *tofree = NULL;

	mutex_lock(&l->owner->pidlist_mutex);

	/*
4141 4142
	 * Destroy iff we didn't get queued again.  The state won't change
	 * as destroy_dwork can only be queued while locked.
4143
	 */
4144
	if (!delayed_work_pending(dwork)) {
4145 4146 4147 4148 4149 4150 4151 4152 4153 4154
		list_del(&l->links);
		pidlist_free(l->list);
		put_pid_ns(l->key.ns);
		tofree = l;
	}

	mutex_unlock(&l->owner->pidlist_mutex);
	kfree(tofree);
}

4155
/*
4156
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
4157
 * Returns the number of unique elements.
4158
 */
4159
static int pidlist_uniq(pid_t *list, int length)
4160
{
4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184
	int src, dest = 1;

	/*
	 * we presume the 0th element is unique, so i starts at 1. trivial
	 * edge cases first; no work needs to be done for either
	 */
	if (length == 0 || length == 1)
		return length;
	/* src and dest walk down the list; dest counts unique elements */
	for (src = 1; src < length; src++) {
		/* find next unique element */
		while (list[src] == list[src-1]) {
			src++;
			if (src == length)
				goto after;
		}
		/* dest always points to where the next unique element goes */
		list[dest] = list[src];
		dest++;
	}
after:
	return dest;
}

4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195
/*
 * The two pid files - task and cgroup.procs - guaranteed that the result
 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 * different per namespace, each namespace needs differently sorted list,
 * making it impossible to use, for example, single rbtree of member tasks
 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 * per open file is dangerous, so cgroup had to implement shared pool of
 * pidlists keyed by cgroup and namespace.
 *
 * All this extra complexity was caused by the original implementation
 * committing to an entirely unnecessary property.  In the long term, we
4196 4197 4198
 * want to do away with it.  Explicitly scramble sort order if on the
 * default hierarchy so that no such expectation exists in the new
 * interface.
4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212
 *
 * Scrambling is done by swapping every two consecutive bits, which is
 * non-identity one-to-one mapping which disturbs sort order sufficiently.
 */
static pid_t pid_fry(pid_t pid)
{
	unsigned a = pid & 0x55555555;
	unsigned b = pid & 0xAAAAAAAA;

	return (a << 1) | (b >> 1);
}

static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
{
4213
	if (cgroup_on_dfl(cgrp))
4214 4215 4216 4217 4218
		return pid_fry(pid);
	else
		return pid;
}

4219 4220 4221 4222 4223
static int cmppid(const void *a, const void *b)
{
	return *(pid_t *)a - *(pid_t *)b;
}

4224 4225 4226 4227 4228
static int fried_cmppid(const void *a, const void *b)
{
	return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
}

T
Tejun Heo 已提交
4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{
	struct cgroup_pidlist *l;
	/* don't need task_nsproxy() if we're looking at ourself */
	struct pid_namespace *ns = task_active_pid_ns(current);

	lockdep_assert_held(&cgrp->pidlist_mutex);

	list_for_each_entry(l, &cgrp->pidlists, links)
		if (l->key.type == type && l->key.ns == ns)
			return l;
	return NULL;
}

4244 4245 4246 4247 4248 4249
/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
T
Tejun Heo 已提交
4250 4251
static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
						enum cgroup_filetype type)
4252 4253
{
	struct cgroup_pidlist *l;
4254

T
Tejun Heo 已提交
4255 4256 4257 4258 4259 4260
	lockdep_assert_held(&cgrp->pidlist_mutex);

	l = cgroup_pidlist_find(cgrp, type);
	if (l)
		return l;

4261
	/* entry not found; create a new one */
4262
	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
T
Tejun Heo 已提交
4263
	if (!l)
4264
		return l;
T
Tejun Heo 已提交
4265

4266
	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
4267
	l->key.type = type;
T
Tejun Heo 已提交
4268 4269
	/* don't need task_nsproxy() if we're looking at ourself */
	l->key.ns = get_pid_ns(task_active_pid_ns(current));
4270 4271 4272 4273 4274
	l->owner = cgrp;
	list_add(&l->links, &cgrp->pidlists);
	return l;
}

4275 4276 4277
/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
4278 4279
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
4280 4281 4282 4283
{
	pid_t *array;
	int length;
	int pid, n = 0; /* used for populating the array */
4284
	struct css_task_iter it;
4285
	struct task_struct *tsk;
4286 4287
	struct cgroup_pidlist *l;

4288 4289
	lockdep_assert_held(&cgrp->pidlist_mutex);

4290 4291 4292 4293 4294 4295 4296
	/*
	 * If cgroup gets more users after we read count, we won't have
	 * enough space - tough.  This race is indistinguishable to the
	 * caller from the case that the additional cgroup users didn't
	 * show up until sometime later on.
	 */
	length = cgroup_task_count(cgrp);
4297
	array = pidlist_allocate(length);
4298 4299 4300
	if (!array)
		return -ENOMEM;
	/* now, populate the array */
4301
	css_task_iter_start(&cgrp->self, &it);
4302
	while ((tsk = css_task_iter_next(&it))) {
4303
		if (unlikely(n == length))
4304
			break;
4305
		/* get tgid or pid for procs or tasks file respectively */
4306 4307 4308 4309
		if (type == CGROUP_FILE_PROCS)
			pid = task_tgid_vnr(tsk);
		else
			pid = task_pid_vnr(tsk);
4310 4311
		if (pid > 0) /* make sure to only use valid results */
			array[n++] = pid;
4312
	}
4313
	css_task_iter_end(&it);
4314 4315
	length = n;
	/* now sort & (if procs) strip out duplicates */
4316
	if (cgroup_on_dfl(cgrp))
4317 4318 4319
		sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
	else
		sort(array, length, sizeof(pid_t), cmppid, NULL);
4320
	if (type == CGROUP_FILE_PROCS)
4321
		length = pidlist_uniq(array, length);
T
Tejun Heo 已提交
4322 4323

	l = cgroup_pidlist_find_create(cgrp, type);
4324
	if (!l) {
4325
		pidlist_free(array);
4326
		return -ENOMEM;
4327
	}
T
Tejun Heo 已提交
4328 4329

	/* store array, freeing old if necessary */
4330
	pidlist_free(l->list);
4331 4332
	l->list = array;
	l->length = length;
4333
	*lp = l;
4334
	return 0;
4335 4336
}

B
Balbir Singh 已提交
4337
/**
L
Li Zefan 已提交
4338
 * cgroupstats_build - build and fill cgroupstats
B
Balbir Singh 已提交
4339 4340 4341
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
L
Li Zefan 已提交
4342 4343 4344
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
B
Balbir Singh 已提交
4345 4346 4347
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
T
Tejun Heo 已提交
4348
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4349
	struct cgroup *cgrp;
4350
	struct css_task_iter it;
B
Balbir Singh 已提交
4351
	struct task_struct *tsk;
4352

T
Tejun Heo 已提交
4353 4354 4355 4356 4357
	/* it should be kernfs_node belonging to cgroupfs and is a directory */
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
		return -EINVAL;

4358 4359
	mutex_lock(&cgroup_mutex);

B
Balbir Singh 已提交
4360
	/*
T
Tejun Heo 已提交
4361
	 * We aren't being called from kernfs and there's no guarantee on
4362
	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
T
Tejun Heo 已提交
4363
	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
B
Balbir Singh 已提交
4364
	 */
T
Tejun Heo 已提交
4365 4366
	rcu_read_lock();
	cgrp = rcu_dereference(kn->priv);
4367
	if (!cgrp || cgroup_is_dead(cgrp)) {
T
Tejun Heo 已提交
4368
		rcu_read_unlock();
4369
		mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4370 4371
		return -ENOENT;
	}
4372
	rcu_read_unlock();
B
Balbir Singh 已提交
4373

4374
	css_task_iter_start(&cgrp->self, &it);
4375
	while ((tsk = css_task_iter_next(&it))) {
B
Balbir Singh 已提交
4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394
		switch (tsk->state) {
		case TASK_RUNNING:
			stats->nr_running++;
			break;
		case TASK_INTERRUPTIBLE:
			stats->nr_sleeping++;
			break;
		case TASK_UNINTERRUPTIBLE:
			stats->nr_uninterruptible++;
			break;
		case TASK_STOPPED:
			stats->nr_stopped++;
			break;
		default:
			if (delayacct_is_task_waiting_on_io(tsk))
				stats->nr_io_wait++;
			break;
		}
	}
4395
	css_task_iter_end(&it);
B
Balbir Singh 已提交
4396

4397
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4398
	return 0;
B
Balbir Singh 已提交
4399 4400
}

4401

4402
/*
4403
 * seq_file methods for the tasks/procs files. The seq_file position is the
4404
 * next pid to display; the seq_file iterator is a pointer to the pid
4405
 * in the cgroup->l->list array.
4406
 */
4407

4408
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
4409
{
4410 4411 4412 4413 4414 4415
	/*
	 * Initially we receive a position value that corresponds to
	 * one more than the last pid shown (or 0 on the first call or
	 * after a seek to the start). Use a binary-search to find the
	 * next pid to display, if any
	 */
T
Tejun Heo 已提交
4416
	struct kernfs_open_file *of = s->private;
4417
	struct cgroup *cgrp = seq_css(s)->cgroup;
4418
	struct cgroup_pidlist *l;
4419
	enum cgroup_filetype type = seq_cft(s)->private;
4420
	int index = 0, pid = *pos;
4421 4422 4423 4424 4425
	int *iter, ret;

	mutex_lock(&cgrp->pidlist_mutex);

	/*
4426
	 * !NULL @of->priv indicates that this isn't the first start()
4427
	 * after open.  If the matching pidlist is around, we can use that.
4428
	 * Look for it.  Note that @of->priv can't be used directly.  It
4429 4430
	 * could already have been destroyed.
	 */
4431 4432
	if (of->priv)
		of->priv = cgroup_pidlist_find(cgrp, type);
4433 4434 4435 4436 4437

	/*
	 * Either this is the first start() after open or the matching
	 * pidlist has been destroyed inbetween.  Create a new one.
	 */
4438 4439 4440
	if (!of->priv) {
		ret = pidlist_array_load(cgrp, type,
					 (struct cgroup_pidlist **)&of->priv);
4441 4442 4443
		if (ret)
			return ERR_PTR(ret);
	}
4444
	l = of->priv;
4445 4446

	if (pid) {
4447
		int end = l->length;
S
Stephen Rothwell 已提交
4448

4449 4450
		while (index < end) {
			int mid = (index + end) / 2;
4451
			if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
4452 4453
				index = mid;
				break;
4454
			} else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
4455 4456 4457 4458 4459 4460
				index = mid + 1;
			else
				end = mid;
		}
	}
	/* If we're off the end of the array, we're done */
4461
	if (index >= l->length)
4462 4463
		return NULL;
	/* Update the abstract position to be the actual pid that we found */
4464
	iter = l->list + index;
4465
	*pos = cgroup_pid_fry(cgrp, *iter);
4466 4467 4468
	return iter;
}

4469
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
4470
{
T
Tejun Heo 已提交
4471
	struct kernfs_open_file *of = s->private;
4472
	struct cgroup_pidlist *l = of->priv;
4473

4474 4475
	if (l)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
4476
				 CGROUP_PIDLIST_DESTROY_DELAY);
4477
	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
4478 4479
}

4480
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4481
{
T
Tejun Heo 已提交
4482
	struct kernfs_open_file *of = s->private;
4483
	struct cgroup_pidlist *l = of->priv;
4484 4485
	pid_t *p = v;
	pid_t *end = l->list + l->length;
4486 4487 4488 4489 4490 4491 4492 4493
	/*
	 * Advance to the next pid in the array. If this goes off the
	 * end, we're done
	 */
	p++;
	if (p >= end) {
		return NULL;
	} else {
4494
		*pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
4495 4496 4497 4498
		return p;
	}
}

4499
static int cgroup_pidlist_show(struct seq_file *s, void *v)
4500
{
4501 4502 4503
	seq_printf(s, "%d\n", *(int *)v);

	return 0;
4504
}
4505

4506 4507
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
4508
{
4509
	return notify_on_release(css->cgroup);
4510 4511
}

4512 4513
static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
4514 4515
{
	if (val)
4516
		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4517
	else
4518
		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4519 4520 4521
	return 0;
}

4522 4523
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
4524
{
4525
	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4526 4527
}

4528 4529
static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
4530 4531
{
	if (val)
4532
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4533
	else
4534
		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4535 4536 4537
	return 0;
}

4538 4539
/* cgroup core interface files for the default hierarchy */
static struct cftype cgroup_dfl_base_files[] = {
4540
	{
4541
		.name = "cgroup.procs",
4542
		.file_offset = offsetof(struct cgroup, procs_file),
4543 4544 4545 4546
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4547
		.private = CGROUP_FILE_PROCS,
4548
		.write = cgroup_procs_write,
4549
	},
4550 4551
	{
		.name = "cgroup.controllers",
4552
		.flags = CFTYPE_ONLY_ON_ROOT,
4553 4554 4555 4556
		.seq_show = cgroup_root_controllers_show,
	},
	{
		.name = "cgroup.controllers",
4557
		.flags = CFTYPE_NOT_ON_ROOT,
4558 4559 4560 4561 4562
		.seq_show = cgroup_controllers_show,
	},
	{
		.name = "cgroup.subtree_control",
		.seq_show = cgroup_subtree_control_show,
4563
		.write = cgroup_subtree_control_write,
4564
	},
4565
	{
4566
		.name = "cgroup.events",
4567
		.flags = CFTYPE_NOT_ON_ROOT,
4568
		.file_offset = offsetof(struct cgroup, events_file),
4569
		.seq_show = cgroup_events_show,
4570
	},
4571 4572
	{ }	/* terminate */
};
4573

4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594
/* cgroup core interface files for the legacy hierarchies */
static struct cftype cgroup_legacy_base_files[] = {
	{
		.name = "cgroup.procs",
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
		.private = CGROUP_FILE_PROCS,
		.write = cgroup_procs_write,
	},
	{
		.name = "cgroup.clone_children",
		.read_u64 = cgroup_clone_children_read,
		.write_u64 = cgroup_clone_children_write,
	},
	{
		.name = "cgroup.sane_behavior",
		.flags = CFTYPE_ONLY_ON_ROOT,
		.seq_show = cgroup_sane_behavior_show,
	},
4595 4596
	{
		.name = "tasks",
4597 4598 4599 4600
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4601
		.private = CGROUP_FILE_TASKS,
4602
		.write = cgroup_tasks_write,
4603 4604 4605 4606 4607 4608
	},
	{
		.name = "notify_on_release",
		.read_u64 = cgroup_read_notify_on_release,
		.write_u64 = cgroup_write_notify_on_release,
	},
4609 4610
	{
		.name = "release_agent",
4611
		.flags = CFTYPE_ONLY_ON_ROOT,
4612
		.seq_show = cgroup_release_agent_show,
4613
		.write = cgroup_release_agent_write,
4614
		.max_write_len = PATH_MAX - 1,
4615
	},
T
Tejun Heo 已提交
4616
	{ }	/* terminate */
4617 4618
};

4619 4620 4621 4622 4623 4624 4625
/*
 * css destruction is four-stage process.
 *
 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
 *    Implemented in kill_css().
 *
 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
4626 4627 4628
 *    and thus css_tryget_online() is guaranteed to fail, the css can be
 *    offlined by invoking offline_css().  After offlining, the base ref is
 *    put.  Implemented in css_killed_work_fn().
4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640
 *
 * 3. When the percpu_ref reaches zero, the only possible remaining
 *    accessors are inside RCU read sections.  css_release() schedules the
 *    RCU callback.
 *
 * 4. After the grace period, the css can be freed.  Implemented in
 *    css_free_work_fn().
 *
 * It is actually hairier because both step 2 and 4 require process context
 * and thus involve punting to css->destroy_work adding two additional
 * steps to the already complex sequence.
 */
4641
static void css_free_work_fn(struct work_struct *work)
4642 4643
{
	struct cgroup_subsys_state *css =
4644
		container_of(work, struct cgroup_subsys_state, destroy_work);
4645
	struct cgroup_subsys *ss = css->ss;
4646
	struct cgroup *cgrp = css->cgroup;
4647

4648 4649
	percpu_ref_exit(&css->refcnt);

4650
	if (ss) {
4651
		/* css free path */
4652
		struct cgroup_subsys_state *parent = css->parent;
4653 4654 4655 4656
		int id = css->id;

		ss->css_free(css);
		cgroup_idr_remove(&ss->css_idr, id);
4657
		cgroup_put(cgrp);
4658 4659 4660

		if (parent)
			css_put(parent);
4661 4662 4663 4664
	} else {
		/* cgroup free path */
		atomic_dec(&cgrp->root->nr_cgrps);
		cgroup_pidlist_destroy_all(cgrp);
4665
		cancel_work_sync(&cgrp->release_agent_work);
4666

T
Tejun Heo 已提交
4667
		if (cgroup_parent(cgrp)) {
4668 4669 4670 4671 4672 4673
			/*
			 * We get a ref to the parent, and put the ref when
			 * this cgroup is being freed, so it's guaranteed
			 * that the parent won't be destroyed before its
			 * children.
			 */
T
Tejun Heo 已提交
4674
			cgroup_put(cgroup_parent(cgrp));
4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685
			kernfs_put(cgrp->kn);
			kfree(cgrp);
		} else {
			/*
			 * This is root cgroup's refcnt reaching zero,
			 * which indicates that the root should be
			 * released.
			 */
			cgroup_destroy_root(cgrp->root);
		}
	}
4686 4687
}

4688
static void css_free_rcu_fn(struct rcu_head *rcu_head)
4689 4690
{
	struct cgroup_subsys_state *css =
4691
		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
4692

4693
	INIT_WORK(&css->destroy_work, css_free_work_fn);
4694
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4695 4696
}

4697
static void css_release_work_fn(struct work_struct *work)
4698 4699
{
	struct cgroup_subsys_state *css =
4700
		container_of(work, struct cgroup_subsys_state, destroy_work);
4701
	struct cgroup_subsys *ss = css->ss;
4702
	struct cgroup *cgrp = css->cgroup;
4703

4704 4705
	mutex_lock(&cgroup_mutex);

4706
	css->flags |= CSS_RELEASED;
4707 4708
	list_del_rcu(&css->sibling);

4709 4710
	if (ss) {
		/* css release path */
4711
		cgroup_idr_replace(&ss->css_idr, NULL, css->id);
4712 4713
		if (ss->css_released)
			ss->css_released(css);
4714 4715 4716 4717
	} else {
		/* cgroup release path */
		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
		cgrp->id = -1;
4718 4719 4720 4721 4722 4723 4724 4725 4726

		/*
		 * There are two control paths which try to determine
		 * cgroup from dentry without going through kernfs -
		 * cgroupstats_build() and css_tryget_online_from_dir().
		 * Those are supported by RCU protecting clearing of
		 * cgrp->kn->priv backpointer.
		 */
		RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
4727
	}
4728

4729 4730
	mutex_unlock(&cgroup_mutex);

4731
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4732 4733 4734 4735 4736 4737 4738
}

static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4739 4740
	INIT_WORK(&css->destroy_work, css_release_work_fn);
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4741 4742
}

4743 4744
static void init_and_link_css(struct cgroup_subsys_state *css,
			      struct cgroup_subsys *ss, struct cgroup *cgrp)
4745
{
4746 4747
	lockdep_assert_held(&cgroup_mutex);

4748 4749
	cgroup_get(cgrp);

4750
	memset(css, 0, sizeof(*css));
4751
	css->cgroup = cgrp;
4752
	css->ss = ss;
4753 4754
	INIT_LIST_HEAD(&css->sibling);
	INIT_LIST_HEAD(&css->children);
4755
	css->serial_nr = css_serial_nr_next++;
4756
	atomic_set(&css->online_cnt, 0);
4757

T
Tejun Heo 已提交
4758 4759
	if (cgroup_parent(cgrp)) {
		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
4760 4761
		css_get(css->parent);
	}
4762

4763
	BUG_ON(cgroup_css(cgrp, ss));
4764 4765
}

4766
/* invoke ->css_online() on a new CSS and mark it online if successful */
4767
static int online_css(struct cgroup_subsys_state *css)
4768
{
4769
	struct cgroup_subsys *ss = css->ss;
T
Tejun Heo 已提交
4770 4771
	int ret = 0;

4772 4773
	lockdep_assert_held(&cgroup_mutex);

4774
	if (ss->css_online)
4775
		ret = ss->css_online(css);
4776
	if (!ret) {
4777
		css->flags |= CSS_ONLINE;
4778
		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4779 4780 4781 4782

		atomic_inc(&css->online_cnt);
		if (css->parent)
			atomic_inc(&css->parent->online_cnt);
4783
	}
T
Tejun Heo 已提交
4784
	return ret;
4785 4786
}

4787
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4788
static void offline_css(struct cgroup_subsys_state *css)
4789
{
4790
	struct cgroup_subsys *ss = css->ss;
4791 4792 4793 4794 4795 4796

	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

4797
	if (ss->css_offline)
4798
		ss->css_offline(css);
4799

4800
	css->flags &= ~CSS_ONLINE;
4801
	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4802 4803

	wake_up_all(&css->cgroup->offline_waitq);
4804 4805
}

4806 4807 4808 4809
/**
 * create_css - create a cgroup_subsys_state
 * @cgrp: the cgroup new css will be associated with
 * @ss: the subsys of new css
4810
 * @visible: whether to create control knobs for the new css or not
4811 4812
 *
 * Create a new css associated with @cgrp - @ss pair.  On success, the new
4813 4814
 * css is online and installed in @cgrp with all interface files created if
 * @visible.  Returns 0 on success, -errno on failure.
4815
 */
4816 4817
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible)
4818
{
T
Tejun Heo 已提交
4819
	struct cgroup *parent = cgroup_parent(cgrp);
4820
	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
4821 4822 4823 4824 4825
	struct cgroup_subsys_state *css;
	int err;

	lockdep_assert_held(&cgroup_mutex);

4826
	css = ss->css_alloc(parent_css);
4827 4828 4829
	if (IS_ERR(css))
		return PTR_ERR(css);

4830
	init_and_link_css(css, ss, cgrp);
4831

4832
	err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
4833
	if (err)
4834
		goto err_free_css;
4835

V
Vladimir Davydov 已提交
4836
	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
4837 4838 4839
	if (err < 0)
		goto err_free_percpu_ref;
	css->id = err;
4840

4841
	if (visible) {
4842
		err = css_populate_dir(css, NULL);
4843 4844 4845
		if (err)
			goto err_free_id;
	}
4846 4847

	/* @css is ready to be brought online now, make it visible */
4848
	list_add_tail_rcu(&css->sibling, &parent_css->children);
4849
	cgroup_idr_replace(&ss->css_idr, css, css->id);
4850 4851 4852

	err = online_css(css);
	if (err)
4853
		goto err_list_del;
4854

4855
	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
T
Tejun Heo 已提交
4856
	    cgroup_parent(parent)) {
4857
		pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4858
			current->comm, current->pid, ss->name);
4859
		if (!strcmp(ss->name, "memory"))
4860
			pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4861 4862 4863 4864 4865
		ss->warned_broken_hierarchy = true;
	}

	return 0;

4866 4867
err_list_del:
	list_del_rcu(&css->sibling);
4868
	css_clear_dir(css, NULL);
4869 4870
err_free_id:
	cgroup_idr_remove(&ss->css_idr, css->id);
4871
err_free_percpu_ref:
4872
	percpu_ref_exit(&css->refcnt);
4873
err_free_css:
4874
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4875 4876 4877
	return err;
}

4878 4879
static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
			umode_t mode)
4880
{
4881
	struct cgroup *parent, *cgrp, *tcgrp;
4882
	struct cgroup_root *root;
4883
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
4884
	struct kernfs_node *kn;
4885
	int level, ssid, ret;
4886

4887 4888 4889 4890 4891
	/* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
	 */
	if (strchr(name, '\n'))
		return -EINVAL;

4892 4893 4894 4895
	parent = cgroup_kn_lock_live(parent_kn);
	if (!parent)
		return -ENODEV;
	root = parent->root;
4896
	level = parent->level + 1;
4897

T
Tejun Heo 已提交
4898
	/* allocate the cgroup and its ID, 0 is reserved for the root */
4899 4900
	cgrp = kzalloc(sizeof(*cgrp) +
		       sizeof(cgrp->ancestor_ids[0]) * (level + 1), GFP_KERNEL);
T
Tejun Heo 已提交
4901 4902 4903
	if (!cgrp) {
		ret = -ENOMEM;
		goto out_unlock;
4904 4905
	}

4906
	ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
4907 4908 4909
	if (ret)
		goto out_free_cgrp;

4910 4911 4912 4913
	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
V
Vladimir Davydov 已提交
4914
	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
4915
	if (cgrp->id < 0) {
T
Tejun Heo 已提交
4916
		ret = -ENOMEM;
4917
		goto out_cancel_ref;
4918 4919
	}

4920
	init_cgroup_housekeeping(cgrp);
4921

4922
	cgrp->self.parent = &parent->self;
T
Tejun Heo 已提交
4923
	cgrp->root = root;
4924 4925 4926 4927
	cgrp->level = level;

	for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp))
		cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
4928

4929 4930 4931
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

4932 4933
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4934

T
Tejun Heo 已提交
4935
	/* create the directory */
T
Tejun Heo 已提交
4936
	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
T
Tejun Heo 已提交
4937
	if (IS_ERR(kn)) {
T
Tejun Heo 已提交
4938 4939
		ret = PTR_ERR(kn);
		goto out_free_id;
T
Tejun Heo 已提交
4940 4941
	}
	cgrp->kn = kn;
4942

4943
	/*
4944 4945
	 * This extra ref will be put in cgroup_free_fn() and guarantees
	 * that @cgrp->kn is always accessible.
4946
	 */
4947
	kernfs_get(kn);
4948

4949
	cgrp->self.serial_nr = css_serial_nr_next++;
4950

4951
	/* allocation complete, commit to creation */
4952
	list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
4953
	atomic_inc(&root->nr_cgrps);
4954
	cgroup_get(parent);
4955

4956 4957 4958 4959
	/*
	 * @cgrp is now fully operational.  If something fails after this
	 * point, it'll be released via the normal destruction path.
	 */
4960
	cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4961

T
Tejun Heo 已提交
4962 4963 4964
	ret = cgroup_kn_set_ugid(kn);
	if (ret)
		goto out_destroy;
4965

4966
	ret = css_populate_dir(&cgrp->self, NULL);
T
Tejun Heo 已提交
4967 4968
	if (ret)
		goto out_destroy;
4969

4970
	/* let's create and online css's */
T
Tejun Heo 已提交
4971
	for_each_subsys(ss, ssid) {
4972
		if (parent->subtree_ss_mask & (1 << ssid)) {
4973 4974
			ret = create_css(cgrp, ss,
					 parent->subtree_control & (1 << ssid));
T
Tejun Heo 已提交
4975 4976
			if (ret)
				goto out_destroy;
T
Tejun Heo 已提交
4977
		}
4978
	}
4979

4980 4981
	/*
	 * On the default hierarchy, a child doesn't automatically inherit
4982
	 * subtree_control from the parent.  Each is configured manually.
4983
	 */
4984 4985
	if (!cgroup_on_dfl(cgrp)) {
		cgrp->subtree_control = parent->subtree_control;
4986
		cgroup_refresh_subtree_ss_mask(cgrp);
4987
	}
T
Tejun Heo 已提交
4988 4989

	kernfs_activate(kn);
4990

T
Tejun Heo 已提交
4991 4992
	ret = 0;
	goto out_unlock;
4993

T
Tejun Heo 已提交
4994
out_free_id:
4995
	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
4996
out_cancel_ref:
4997
	percpu_ref_exit(&cgrp->self.refcnt);
T
Tejun Heo 已提交
4998
out_free_cgrp:
4999
	kfree(cgrp);
T
Tejun Heo 已提交
5000
out_unlock:
5001
	cgroup_kn_unlock(parent_kn);
T
Tejun Heo 已提交
5002
	return ret;
5003

T
Tejun Heo 已提交
5004
out_destroy:
5005
	cgroup_destroy_locked(cgrp);
T
Tejun Heo 已提交
5006
	goto out_unlock;
5007 5008
}

5009 5010
/*
 * This is called when the refcnt of a css is confirmed to be killed.
5011 5012
 * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
 * initate destruction and put the css ref from kill_css().
5013 5014
 */
static void css_killed_work_fn(struct work_struct *work)
5015
{
5016 5017
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
5018

5019
	mutex_lock(&cgroup_mutex);
5020

5021 5022 5023 5024 5025 5026 5027 5028
	do {
		offline_css(css);
		css_put(css);
		/* @css can't go away while we're holding cgroup_mutex */
		css = css->parent;
	} while (css && atomic_dec_and_test(&css->online_cnt));

	mutex_unlock(&cgroup_mutex);
5029 5030
}

5031 5032
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
5033 5034 5035 5036
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

5037 5038 5039 5040
	if (atomic_dec_and_test(&css->online_cnt)) {
		INIT_WORK(&css->destroy_work, css_killed_work_fn);
		queue_work(cgroup_destroy_wq, &css->destroy_work);
	}
5041 5042
}

5043 5044 5045 5046 5047 5048
/**
 * kill_css - destroy a css
 * @css: css to destroy
 *
 * This function initiates destruction of @css by removing cgroup interface
 * files and putting its base reference.  ->css_offline() will be invoked
5049 5050
 * asynchronously once css_tryget_online() is guaranteed to fail and when
 * the reference count reaches zero, @css will be released.
5051 5052
 */
static void kill_css(struct cgroup_subsys_state *css)
T
Tejun Heo 已提交
5053
{
5054
	lockdep_assert_held(&cgroup_mutex);
5055

T
Tejun Heo 已提交
5056 5057 5058 5059
	/*
	 * This must happen before css is disassociated with its cgroup.
	 * See seq_css() for details.
	 */
5060
	css_clear_dir(css, NULL);
5061

T
Tejun Heo 已提交
5062 5063 5064 5065 5066 5067 5068 5069 5070
	/*
	 * Killing would put the base ref, but we need to keep it alive
	 * until after ->css_offline().
	 */
	css_get(css);

	/*
	 * cgroup core guarantees that, by the time ->css_offline() is
	 * invoked, no new css reference will be given out via
5071
	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
T
Tejun Heo 已提交
5072 5073 5074 5075 5076 5077 5078
	 * proceed to offlining css's because percpu_ref_kill() doesn't
	 * guarantee that the ref is seen as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.
	 */
	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
5079 5080 5081 5082 5083 5084 5085 5086
}

/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
5087 5088 5089
 * guarantee that css_tryget_online() won't succeed by the time
 * ->css_offline() is invoked.  To satisfy all the requirements,
 * destruction is implemented in the following two steps.
5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
5105 5106
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
5107
{
T
Tejun Heo 已提交
5108
	struct cgroup_subsys_state *css;
T
Tejun Heo 已提交
5109
	int ssid;
5110

5111 5112
	lockdep_assert_held(&cgroup_mutex);

5113 5114 5115 5116 5117
	/*
	 * Only migration can raise populated from zero and we're already
	 * holding cgroup_mutex.
	 */
	if (cgroup_is_populated(cgrp))
5118
		return -EBUSY;
L
Li Zefan 已提交
5119

5120
	/*
5121 5122 5123
	 * Make sure there's no live children.  We can't test emptiness of
	 * ->self.children as dead children linger on it while being
	 * drained; otherwise, "rmdir parent/child parent" may fail.
5124
	 */
5125
	if (css_has_online_children(&cgrp->self))
5126 5127
		return -EBUSY;

5128 5129
	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
5130
	 * creation by disabling cgroup_lock_live_group().
5131
	 */
5132
	cgrp->self.flags &= ~CSS_ONLINE;
5133

5134
	/* initiate massacre of all css's */
T
Tejun Heo 已提交
5135 5136
	for_each_css(css, ssid, cgrp)
		kill_css(css);
5137 5138

	/*
5139 5140
	 * Remove @cgrp directory along with the base files.  @cgrp has an
	 * extra ref on its kn.
5141
	 */
5142
	kernfs_remove(cgrp->kn);
5143

T
Tejun Heo 已提交
5144
	check_for_release(cgroup_parent(cgrp));
T
Tejun Heo 已提交
5145

5146
	/* put the base reference */
5147
	percpu_ref_kill(&cgrp->self.refcnt);
5148

5149 5150 5151
	return 0;
};

T
Tejun Heo 已提交
5152
static int cgroup_rmdir(struct kernfs_node *kn)
5153
{
5154
	struct cgroup *cgrp;
T
Tejun Heo 已提交
5155
	int ret = 0;
5156

5157 5158 5159
	cgrp = cgroup_kn_lock_live(kn);
	if (!cgrp)
		return 0;
5160

5161
	ret = cgroup_destroy_locked(cgrp);
5162

5163
	cgroup_kn_unlock(kn);
5164
	return ret;
5165 5166
}

T
Tejun Heo 已提交
5167 5168 5169 5170 5171 5172 5173 5174
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
	.remount_fs		= cgroup_remount,
	.show_options		= cgroup_show_options,
	.mkdir			= cgroup_mkdir,
	.rmdir			= cgroup_rmdir,
	.rename			= cgroup_rename,
};

5175
static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
5176 5177
{
	struct cgroup_subsys_state *css;
D
Diego Calleja 已提交
5178

5179
	pr_debug("Initializing cgroup subsys %s\n", ss->name);
5180

5181 5182
	mutex_lock(&cgroup_mutex);

5183
	idr_init(&ss->css_idr);
T
Tejun Heo 已提交
5184
	INIT_LIST_HEAD(&ss->cfts);
5185

5186 5187 5188
	/* Create the root cgroup state for this subsystem */
	ss->root = &cgrp_dfl_root;
	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
5189 5190
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
5191
	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
5192 5193 5194 5195 5196 5197 5198

	/*
	 * Root csses are never destroyed and we can't initialize
	 * percpu_ref during early init.  Disable refcnting.
	 */
	css->flags |= CSS_NO_REF;

5199
	if (early) {
5200
		/* allocation can't be done safely during early init */
5201 5202 5203 5204 5205
		css->id = 1;
	} else {
		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
		BUG_ON(css->id < 0);
	}
5206

L
Li Zefan 已提交
5207
	/* Update the init_css_set to contain a subsys
5208
	 * pointer to this state - since the subsystem is
L
Li Zefan 已提交
5209
	 * newly registered, all tasks and hence the
5210
	 * init_css_set is in the subsystem's root cgroup. */
5211
	init_css_set.subsys[ss->id] = css;
5212

5213 5214
	have_fork_callback |= (bool)ss->fork << ss->id;
	have_exit_callback |= (bool)ss->exit << ss->id;
5215
	have_free_callback |= (bool)ss->free << ss->id;
5216
	have_canfork_callback |= (bool)ss->can_fork << ss->id;
5217

L
Li Zefan 已提交
5218 5219 5220 5221 5222
	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

5223
	BUG_ON(online_css(css));
5224

B
Ben Blum 已提交
5225 5226 5227
	mutex_unlock(&cgroup_mutex);
}

5228
/**
L
Li Zefan 已提交
5229 5230 5231 5232
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
5233 5234 5235
 */
int __init cgroup_init_early(void)
{
5236
	static struct cgroup_sb_opts __initdata opts;
5237
	struct cgroup_subsys *ss;
5238
	int i;
5239

5240
	init_cgroup_root(&cgrp_dfl_root, &opts);
5241 5242
	cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;

5243
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
5244

T
Tejun Heo 已提交
5245
	for_each_subsys(ss, i) {
5246
		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
5247 5248
		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
5249
		     ss->id, ss->name);
5250 5251 5252
		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);

5253
		ss->id = i;
5254
		ss->name = cgroup_subsys_name[i];
5255 5256
		if (!ss->legacy_name)
			ss->legacy_name = cgroup_subsys_name[i];
5257 5258

		if (ss->early_init)
5259
			cgroup_init_subsys(ss, true);
5260 5261 5262 5263
	}
	return 0;
}

5264 5265
static unsigned long cgroup_disable_mask __initdata;

5266
/**
L
Li Zefan 已提交
5267 5268 5269 5270
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
5271 5272 5273
 */
int __init cgroup_init(void)
{
5274
	struct cgroup_subsys *ss;
5275
	unsigned long key;
5276
	int ssid;
5277

5278
	BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
5279 5280
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
5281

T
Tejun Heo 已提交
5282 5283
	mutex_lock(&cgroup_mutex);

5284 5285 5286 5287
	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

5288
	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
5289

T
Tejun Heo 已提交
5290 5291
	mutex_unlock(&cgroup_mutex);

5292
	for_each_subsys(ss, ssid) {
5293 5294 5295 5296 5297 5298 5299 5300 5301 5302
		if (ss->early_init) {
			struct cgroup_subsys_state *css =
				init_css_set.subsys[ss->id];

			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
						   GFP_KERNEL);
			BUG_ON(css->id < 0);
		} else {
			cgroup_init_subsys(ss, false);
		}
5303

T
Tejun Heo 已提交
5304 5305
		list_add_tail(&init_css_set.e_cset_node[ssid],
			      &cgrp_dfl_root.cgrp.e_csets[ssid]);
5306 5307

		/*
5308 5309 5310
		 * Setting dfl_root subsys_mask needs to consider the
		 * disabled flag and cftype registration needs kmalloc,
		 * both of which aren't available during early_init.
5311
		 */
5312 5313 5314 5315
		if (cgroup_disable_mask & (1 << ssid)) {
			static_branch_disable(cgroup_subsys_enabled_key[ssid]);
			printk(KERN_INFO "Disabling %s control group subsystem\n",
			       ss->name);
5316
			continue;
5317
		}
5318

5319 5320 5321 5322
		if (cgroup_ssid_no_v1(ssid))
			printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
			       ss->name);

5323 5324
		cgrp_dfl_root.subsys_mask |= 1 << ss->id;

5325 5326 5327
		if (!ss->dfl_cftypes)
			cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;

5328 5329 5330 5331 5332
		if (ss->dfl_cftypes == ss->legacy_cftypes) {
			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
		} else {
			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
5333
		}
5334 5335 5336

		if (ss->bind)
			ss->bind(init_css_set.subsys[ssid]);
5337 5338
	}

5339 5340
	WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
	WARN_ON(register_filesystem(&cgroup_fs_type));
5341
	WARN_ON(register_filesystem(&cgroup2_fs_type));
5342
	WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
5343

T
Tejun Heo 已提交
5344
	return 0;
5345
}
5346

5347 5348 5349 5350 5351
static int __init cgroup_wq_init(void)
{
	/*
	 * There isn't much point in executing destruction path in
	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
5352
	 * Use 1 for @max_active.
5353 5354 5355 5356
	 *
	 * We would prefer to do this in cgroup_init() above, but that
	 * is called before init_workqueues(): so leave this until after.
	 */
5357
	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5358
	BUG_ON(!cgroup_destroy_wq);
5359 5360 5361 5362 5363 5364 5365 5366 5367

	/*
	 * Used to destroy pidlists and separate to serve as flush domain.
	 * Cap @max_active to 1 too.
	 */
	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
						    0, 1);
	BUG_ON(!cgroup_pidlist_destroy_wq);

5368 5369 5370 5371
	return 0;
}
core_initcall(cgroup_wq_init);

5372 5373 5374 5375 5376
/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 */
Z
Zefan Li 已提交
5377 5378
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
		     struct pid *pid, struct task_struct *tsk)
5379
{
T
Tejun Heo 已提交
5380
	char *buf, *path;
5381
	int retval;
5382
	struct cgroup_root *root;
5383 5384

	retval = -ENOMEM;
T
Tejun Heo 已提交
5385
	buf = kmalloc(PATH_MAX, GFP_KERNEL);
5386 5387 5388 5389
	if (!buf)
		goto out;

	mutex_lock(&cgroup_mutex);
5390
	spin_lock_bh(&css_set_lock);
5391

5392
	for_each_root(root) {
5393
		struct cgroup_subsys *ss;
5394
		struct cgroup *cgrp;
T
Tejun Heo 已提交
5395
		int ssid, count = 0;
5396

T
Tejun Heo 已提交
5397
		if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
5398 5399
			continue;

5400
		seq_printf(m, "%d:", root->hierarchy_id);
5401 5402 5403 5404
		if (root != &cgrp_dfl_root)
			for_each_subsys(ss, ssid)
				if (root->subsys_mask & (1 << ssid))
					seq_printf(m, "%s%s", count++ ? "," : "",
5405
						   ss->legacy_name);
5406 5407 5408
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
5409
		seq_putc(m, ':');
5410

5411
		cgrp = task_cgroup_from_root(tsk, root);
5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429

		/*
		 * On traditional hierarchies, all zombie tasks show up as
		 * belonging to the root cgroup.  On the default hierarchy,
		 * while a zombie doesn't show up in "cgroup.procs" and
		 * thus can't be migrated, its /proc/PID/cgroup keeps
		 * reporting the cgroup it belonged to before exiting.  If
		 * the cgroup is removed before the zombie is reaped,
		 * " (deleted)" is appended to the cgroup path.
		 */
		if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
			path = cgroup_path(cgrp, buf, PATH_MAX);
			if (!path) {
				retval = -ENAMETOOLONG;
				goto out_unlock;
			}
		} else {
			path = "/";
T
Tejun Heo 已提交
5430
		}
5431

T
Tejun Heo 已提交
5432
		seq_puts(m, path);
5433 5434 5435 5436 5437

		if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
			seq_puts(m, " (deleted)\n");
		else
			seq_putc(m, '\n');
5438 5439
	}

Z
Zefan Li 已提交
5440
	retval = 0;
5441
out_unlock:
5442
	spin_unlock_bh(&css_set_lock);
5443 5444 5445 5446 5447 5448 5449 5450 5451
	mutex_unlock(&cgroup_mutex);
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
5452
	struct cgroup_subsys *ss;
5453 5454
	int i;

5455
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
B
Ben Blum 已提交
5456 5457 5458 5459 5460
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
5461
	mutex_lock(&cgroup_mutex);
5462 5463

	for_each_subsys(ss, i)
5464
		seq_printf(m, "%s\t%d\t%d\t%d\n",
5465
			   ss->legacy_name, ss->root->hierarchy_id,
5466 5467
			   atomic_read(&ss->root->nr_cgrps),
			   cgroup_ssid_enabled(i));
5468

5469 5470 5471 5472 5473 5474
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
5475
	return single_open(file, proc_cgroupstats_show, NULL);
5476 5477
}

5478
static const struct file_operations proc_cgroupstats_operations = {
5479 5480 5481 5482 5483 5484
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

5485
/**
5486
 * cgroup_fork - initialize cgroup related fields during copy_process()
L
Li Zefan 已提交
5487
 * @child: pointer to task_struct of forking parent process.
5488
 *
5489 5490 5491
 * A task is associated with the init_css_set until cgroup_post_fork()
 * attaches it to the parent's css_set.  Empty cg_list indicates that
 * @child isn't holding reference to its css_set.
5492 5493 5494
 */
void cgroup_fork(struct task_struct *child)
{
5495
	RCU_INIT_POINTER(child->cgroups, &init_css_set);
5496
	INIT_LIST_HEAD(&child->cg_list);
5497 5498
}

5499 5500 5501 5502 5503 5504 5505 5506
/**
 * cgroup_can_fork - called on a new task before the process is exposed
 * @child: the task in question.
 *
 * This calls the subsystem can_fork() callbacks. If the can_fork() callback
 * returns an error, the fork aborts with that error code. This allows for
 * a cgroup subsystem to conditionally allow or deny new forks.
 */
5507
int cgroup_can_fork(struct task_struct *child)
5508 5509 5510 5511 5512
{
	struct cgroup_subsys *ss;
	int i, j, ret;

	for_each_subsys_which(ss, i, &have_canfork_callback) {
5513
		ret = ss->can_fork(child);
5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524
		if (ret)
			goto out_revert;
	}

	return 0;

out_revert:
	for_each_subsys(ss, j) {
		if (j >= i)
			break;
		if (ss->cancel_fork)
5525
			ss->cancel_fork(child);
5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537
	}

	return ret;
}

/**
 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
 * @child: the task in question
 *
 * This calls the cancel_fork() callbacks if a fork failed *after*
 * cgroup_can_fork() succeded.
 */
5538
void cgroup_cancel_fork(struct task_struct *child)
5539 5540 5541 5542 5543 5544
{
	struct cgroup_subsys *ss;
	int i;

	for_each_subsys(ss, i)
		if (ss->cancel_fork)
5545
			ss->cancel_fork(child);
5546 5547
}

5548
/**
L
Li Zefan 已提交
5549 5550 5551
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
5552 5553 5554
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
5555
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5556
 * list.
L
Li Zefan 已提交
5557
 */
5558
void cgroup_post_fork(struct task_struct *child)
5559
{
5560
	struct cgroup_subsys *ss;
5561 5562
	int i;

5563
	/*
D
Dongsheng Yang 已提交
5564
	 * This may race against cgroup_enable_task_cg_lists().  As that
5565 5566 5567 5568 5569 5570 5571
	 * function sets use_task_css_set_links before grabbing
	 * tasklist_lock and we just went through tasklist_lock to add
	 * @child, it's guaranteed that either we see the set
	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
	 * @child during its iteration.
	 *
	 * If we won the race, @child is associated with %current's
5572
	 * css_set.  Grabbing css_set_lock guarantees both that the
5573 5574 5575 5576 5577 5578
	 * association is stable, and, on completion of the parent's
	 * migration, @child is visible in the source of migration or
	 * already in the destination cgroup.  This guarantee is necessary
	 * when implementing operations which need to migrate all tasks of
	 * a cgroup to another.
	 *
D
Dongsheng Yang 已提交
5579
	 * Note that if we lose to cgroup_enable_task_cg_lists(), @child
5580 5581 5582
	 * will remain in init_css_set.  This is safe because all tasks are
	 * in the init_css_set before cg_links is enabled and there's no
	 * operation which transfers all tasks out of init_css_set.
5583
	 */
5584
	if (use_task_css_set_links) {
5585 5586
		struct css_set *cset;

5587
		spin_lock_bh(&css_set_lock);
5588
		cset = task_css_set(current);
5589 5590
		if (list_empty(&child->cg_list)) {
			get_css_set(cset);
T
Tejun Heo 已提交
5591
			css_set_move_task(child, NULL, cset, false);
5592
		}
5593
		spin_unlock_bh(&css_set_lock);
5594
	}
5595 5596 5597 5598 5599 5600

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
5601
	for_each_subsys_which(ss, i, &have_fork_callback)
5602
		ss->fork(child);
5603
}
5604

5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
5617 5618 5619 5620 5621
 * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
 * call cgroup_exit() while the task is still competent to handle
 * notify_on_release(), then leave the task attached to the root cgroup in
 * each hierarchy for the remainder of its exit.  No need to bother with
 * init_css_set refcnting.  init_css_set never goes away and we can't race
5622
 * with migration path - PF_EXITING is visible to migration path.
5623
 */
5624
void cgroup_exit(struct task_struct *tsk)
5625
{
5626
	struct cgroup_subsys *ss;
5627
	struct css_set *cset;
5628
	int i;
5629 5630

	/*
5631
	 * Unlink from @tsk from its css_set.  As migration path can't race
5632
	 * with us, we can check css_set and cg_list without synchronization.
5633
	 */
5634 5635
	cset = task_css_set(tsk);

5636
	if (!list_empty(&tsk->cg_list)) {
5637
		spin_lock_bh(&css_set_lock);
T
Tejun Heo 已提交
5638
		css_set_move_task(tsk, cset, NULL, false);
5639
		spin_unlock_bh(&css_set_lock);
5640 5641
	} else {
		get_css_set(cset);
5642 5643
	}

5644
	/* see cgroup_post_fork() for details */
5645 5646 5647
	for_each_subsys_which(ss, i, &have_exit_callback)
		ss->exit(tsk);
}
5648

5649 5650 5651
void cgroup_free(struct task_struct *task)
{
	struct css_set *cset = task_css_set(task);
5652 5653 5654 5655 5656
	struct cgroup_subsys *ss;
	int ssid;

	for_each_subsys_which(ss, ssid, &have_free_callback)
		ss->free(task);
5657

5658
	put_css_set(cset);
5659
}
5660

5661
static void check_for_release(struct cgroup *cgrp)
5662
{
5663
	if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
5664 5665
	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
		schedule_work(&cgrp->release_agent_work);
5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
5693 5694 5695 5696 5697
	struct cgroup *cgrp =
		container_of(work, struct cgroup, release_agent_work);
	char *pathbuf = NULL, *agentbuf = NULL, *path;
	char *argv[3], *envp[3];

5698
	mutex_lock(&cgroup_mutex);
5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717

	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
	agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
	if (!pathbuf || !agentbuf)
		goto out;

	path = cgroup_path(cgrp, pathbuf, PATH_MAX);
	if (!path)
		goto out;

	argv[0] = agentbuf;
	argv[1] = path;
	argv[2] = NULL;

	/* minimal command environment */
	envp[0] = "HOME=/";
	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
	envp[2] = NULL;

5718
	mutex_unlock(&cgroup_mutex);
5719
	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
5720
	goto out_free;
5721
out:
5722
	mutex_unlock(&cgroup_mutex);
5723
out_free:
5724 5725
	kfree(agentbuf);
	kfree(pathbuf);
5726
}
5727 5728 5729

static int __init cgroup_disable(char *str)
{
5730
	struct cgroup_subsys *ss;
5731
	char *token;
5732
	int i;
5733 5734 5735 5736

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
5737

T
Tejun Heo 已提交
5738
		for_each_subsys(ss, i) {
5739 5740 5741
			if (strcmp(token, ss->name) &&
			    strcmp(token, ss->legacy_name))
				continue;
5742
			cgroup_disable_mask |= 1 << i;
5743 5744 5745 5746 5747
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
K
KAMEZAWA Hiroyuki 已提交
5748

5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775
static int __init cgroup_no_v1(char *str)
{
	struct cgroup_subsys *ss;
	char *token;
	int i;

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;

		if (!strcmp(token, "all")) {
			cgroup_no_v1_mask = ~0UL;
			break;
		}

		for_each_subsys(ss, i) {
			if (strcmp(token, ss->name) &&
			    strcmp(token, ss->legacy_name))
				continue;

			cgroup_no_v1_mask |= 1 << i;
		}
	}
	return 1;
}
__setup("cgroup_no_v1=", cgroup_no_v1);

5776
/**
5777
 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
5778 5779
 * @dentry: directory dentry of interest
 * @ss: subsystem of interest
5780
 *
5781 5782 5783
 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
 * to get the corresponding css and return it.  If such css doesn't exist
 * or can't be pinned, an ERR_PTR value is returned.
S
Stephane Eranian 已提交
5784
 */
5785 5786
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
						       struct cgroup_subsys *ss)
S
Stephane Eranian 已提交
5787
{
T
Tejun Heo 已提交
5788 5789
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
	struct cgroup_subsys_state *css = NULL;
S
Stephane Eranian 已提交
5790 5791
	struct cgroup *cgrp;

5792
	/* is @dentry a cgroup dir? */
T
Tejun Heo 已提交
5793 5794
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
S
Stephane Eranian 已提交
5795 5796
		return ERR_PTR(-EBADF);

5797 5798
	rcu_read_lock();

T
Tejun Heo 已提交
5799 5800 5801
	/*
	 * This path doesn't originate from kernfs and @kn could already
	 * have been or be removed at any point.  @kn->priv is RCU
5802
	 * protected for this access.  See css_release_work_fn() for details.
T
Tejun Heo 已提交
5803 5804 5805 5806
	 */
	cgrp = rcu_dereference(kn->priv);
	if (cgrp)
		css = cgroup_css(cgrp, ss);
5807

5808
	if (!css || !css_tryget_online(css))
5809 5810 5811 5812
		css = ERR_PTR(-ENOENT);

	rcu_read_unlock();
	return css;
S
Stephane Eranian 已提交
5813 5814
}

5815 5816 5817 5818 5819 5820 5821 5822 5823 5824
/**
 * css_from_id - lookup css by id
 * @id: the cgroup id
 * @ss: cgroup subsys to be looked into
 *
 * Returns the css if there's valid one with @id, otherwise returns NULL.
 * Should be called under rcu_read_lock().
 */
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
5825
	WARN_ON_ONCE(!rcu_read_lock_held());
5826
	return id > 0 ? idr_find(&ss->css_idr, id) : NULL;
S
Stephane Eranian 已提交
5827 5828
}

5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862
/**
 * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
 * @path: path on the default hierarchy
 *
 * Find the cgroup at @path on the default hierarchy, increment its
 * reference count and return it.  Returns pointer to the found cgroup on
 * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
 * if @path points to a non-directory.
 */
struct cgroup *cgroup_get_from_path(const char *path)
{
	struct kernfs_node *kn;
	struct cgroup *cgrp;

	mutex_lock(&cgroup_mutex);

	kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path);
	if (kn) {
		if (kernfs_type(kn) == KERNFS_DIR) {
			cgrp = kn->priv;
			cgroup_get(cgrp);
		} else {
			cgrp = ERR_PTR(-ENOTDIR);
		}
		kernfs_put(kn);
	} else {
		cgrp = ERR_PTR(-ENOENT);
	}

	mutex_unlock(&cgroup_mutex);
	return cgrp;
}
EXPORT_SYMBOL_GPL(cgroup_get_from_path);

T
Tejun Heo 已提交
5863 5864 5865 5866 5867 5868 5869 5870
/*
 * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
 * definition in cgroup-defs.h.
 */
#ifdef CONFIG_SOCK_CGROUP_DATA

#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)

5871
DEFINE_SPINLOCK(cgroup_sk_update_lock);
T
Tejun Heo 已提交
5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915
static bool cgroup_sk_alloc_disabled __read_mostly;

void cgroup_sk_alloc_disable(void)
{
	if (cgroup_sk_alloc_disabled)
		return;
	pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
	cgroup_sk_alloc_disabled = true;
}

#else

#define cgroup_sk_alloc_disabled	false

#endif

void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
	if (cgroup_sk_alloc_disabled)
		return;

	rcu_read_lock();

	while (true) {
		struct css_set *cset;

		cset = task_css_set(current);
		if (likely(cgroup_tryget(cset->dfl_cgrp))) {
			skcd->val = (unsigned long)cset->dfl_cgrp;
			break;
		}
		cpu_relax();
	}

	rcu_read_unlock();
}

void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
	cgroup_put(sock_cgroup_ptr(skcd));
}

#endif	/* CONFIG_SOCK_CGROUP_DATA */

5916
#ifdef CONFIG_CGROUP_DEBUG
5917 5918
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
5919 5920 5921 5922 5923 5924 5925 5926 5927
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

5928
static void debug_css_free(struct cgroup_subsys_state *css)
5929
{
5930
	kfree(css);
5931 5932
}

5933 5934
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5935
{
5936
	return cgroup_task_count(css->cgroup);
5937 5938
}

5939 5940
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5941 5942 5943 5944
{
	return (u64)(unsigned long)current->cgroups;
}

5945
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
L
Li Zefan 已提交
5946
					 struct cftype *cft)
5947 5948 5949 5950
{
	u64 count;

	rcu_read_lock();
5951
	count = atomic_read(&task_css_set(current)->refcount);
5952 5953 5954 5955
	rcu_read_unlock();
	return count;
}

5956
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
5957
{
5958
	struct cgrp_cset_link *link;
5959
	struct css_set *cset;
T
Tejun Heo 已提交
5960 5961 5962 5963 5964
	char *name_buf;

	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name_buf)
		return -ENOMEM;
5965

5966
	spin_lock_bh(&css_set_lock);
5967
	rcu_read_lock();
5968
	cset = rcu_dereference(current->cgroups);
5969
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5970 5971
		struct cgroup *c = link->cgrp;

T
Tejun Heo 已提交
5972
		cgroup_name(c, name_buf, NAME_MAX + 1);
5973
		seq_printf(seq, "Root %d group %s\n",
T
Tejun Heo 已提交
5974
			   c->root->hierarchy_id, name_buf);
5975 5976
	}
	rcu_read_unlock();
5977
	spin_unlock_bh(&css_set_lock);
T
Tejun Heo 已提交
5978
	kfree(name_buf);
5979 5980 5981 5982
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
5983
static int cgroup_css_links_read(struct seq_file *seq, void *v)
5984
{
5985
	struct cgroup_subsys_state *css = seq_css(seq);
5986
	struct cgrp_cset_link *link;
5987

5988
	spin_lock_bh(&css_set_lock);
5989
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5990
		struct css_set *cset = link->cset;
5991 5992
		struct task_struct *task;
		int count = 0;
T
Tejun Heo 已提交
5993

5994
		seq_printf(seq, "css_set %p\n", cset);
T
Tejun Heo 已提交
5995

5996
		list_for_each_entry(task, &cset->tasks, cg_list) {
T
Tejun Heo 已提交
5997 5998 5999 6000 6001 6002 6003 6004 6005
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
		}

		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
6006
		}
T
Tejun Heo 已提交
6007 6008 6009
		continue;
	overflow:
		seq_puts(seq, "  ...\n");
6010
	}
6011
	spin_unlock_bh(&css_set_lock);
6012 6013 6014
	return 0;
}

6015
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
6016
{
6017
	return (!cgroup_is_populated(css->cgroup) &&
Z
Zefan Li 已提交
6018
		!css_has_online_children(&css->cgroup->self));
6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

6037 6038
	{
		.name = "current_css_set_cg_links",
6039
		.seq_show = current_css_set_cg_links_read,
6040 6041 6042 6043
	},

	{
		.name = "cgroup_css_links",
6044
		.seq_show = cgroup_css_links_read,
6045 6046
	},

6047 6048 6049 6050 6051
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

6052 6053
	{ }	/* terminate */
};
6054

6055
struct cgroup_subsys debug_cgrp_subsys = {
6056 6057
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,
6058
	.legacy_cftypes = debug_files,
6059 6060
};
#endif /* CONFIG_CGROUP_DEBUG */