cgroup.c 162.9 KB
Newer Older
1 2 3 4 5 6
/*
 *  Generic process-grouping system.
 *
 *  Based originally on the cpuset system, extracted by Paul Menage
 *  Copyright (C) 2006 Google, Inc
 *
7 8 9 10
 *  Notifications support
 *  Copyright (C) 2009 Nokia Corporation
 *  Author: Kirill A. Shutemov
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *  Copyright notices from the original cpuset code:
 *  --------------------------------------------------
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
 *  2003-10-10 Written by Simon Derr.
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson.
 *  ---------------------------------------------------
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/cgroup.h>
32
#include <linux/cred.h>
33
#include <linux/ctype.h>
34
#include <linux/errno.h>
35
#include <linux/init_task.h>
36 37
#include <linux/kernel.h>
#include <linux/list.h>
38
#include <linux/magic.h>
39 40 41 42
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
43
#include <linux/proc_fs.h>
44 45 46 47
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
48
#include <linux/percpu-rwsem.h>
49
#include <linux/string.h>
50
#include <linux/sort.h>
51
#include <linux/kmod.h>
B
Balbir Singh 已提交
52 53
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
54
#include <linux/hashtable.h>
L
Li Zefan 已提交
55
#include <linux/pid_namespace.h>
56
#include <linux/idr.h>
57
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
58
#include <linux/kthread.h>
T
Tejun Heo 已提交
59
#include <linux/delay.h>
A
Arun Sharma 已提交
60
#include <linux/atomic.h>
61
#include <linux/cpuset.h>
T
Tejun Heo 已提交
62
#include <net/sock.h>
63

64 65 66 67 68 69 70 71
/*
 * pidlists linger the following amount before being destroyed.  The goal
 * is avoiding frequent destruction in the middle of consecutive read calls
 * Expiring in the middle is a performance problem not a correctness one.
 * 1 sec should be enough.
 */
#define CGROUP_PIDLIST_DESTROY_DELAY	HZ

T
Tejun Heo 已提交
72 73 74
#define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
					 MAX_CFTYPE_NAME + 2)

T
Tejun Heo 已提交
75 76 77 78
/*
 * cgroup_mutex is the master lock.  Any modification to cgroup or its
 * hierarchy must be performed while holding it.
 *
79
 * css_set_lock protects task->cgroups pointer, the list of css_set
80
 * objects, and the chain of tasks off each css_set.
T
Tejun Heo 已提交
81
 *
82 83
 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
 * cgroup.h can use them for lockdep annotations.
T
Tejun Heo 已提交
84
 */
T
Tejun Heo 已提交
85 86
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
87
DEFINE_SPINLOCK(css_set_lock);
88
EXPORT_SYMBOL_GPL(cgroup_mutex);
89
EXPORT_SYMBOL_GPL(css_set_lock);
T
Tejun Heo 已提交
90
#else
91
static DEFINE_MUTEX(cgroup_mutex);
92
static DEFINE_SPINLOCK(css_set_lock);
T
Tejun Heo 已提交
93 94
#endif

95
/*
96 97
 * Protects cgroup_idr and css_idr so that IDs can be released without
 * grabbing cgroup_mutex.
98 99 100
 */
static DEFINE_SPINLOCK(cgroup_idr_lock);

101 102 103 104 105 106
/*
 * Protects cgroup_file->kn for !self csses.  It synchronizes notifications
 * against file removal/re-creation across css hiding.
 */
static DEFINE_SPINLOCK(cgroup_file_kn_lock);

107 108 109 110 111
/*
 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
 */
static DEFINE_SPINLOCK(release_agent_path_lock);
112

113 114
struct percpu_rw_semaphore cgroup_threadgroup_rwsem;

T
Tejun Heo 已提交
115
#define cgroup_assert_mutex_or_rcu_locked()				\
116 117
	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
			   !lockdep_is_held(&cgroup_mutex),		\
T
Tejun Heo 已提交
118
			   "cgroup_mutex or RCU read lock required");
119

120 121 122 123 124 125 126 127
/*
 * cgroup destruction makes heavy use of work items and there can be a lot
 * of concurrent destructions.  Use a separate workqueue so that cgroup
 * destruction work items don't end up filling up max_active of system_wq
 * which may lead to deadlock.
 */
static struct workqueue_struct *cgroup_destroy_wq;

128 129 130 131 132 133
/*
 * pidlist destructions need to be flushed on cgroup destruction.  Use a
 * separate workqueue as flush domain.
 */
static struct workqueue_struct *cgroup_pidlist_destroy_wq;

T
Tejun Heo 已提交
134
/* generate an array of cgroup subsystem pointers */
135
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
T
Tejun Heo 已提交
136
static struct cgroup_subsys *cgroup_subsys[] = {
137 138
#include <linux/cgroup_subsys.h>
};
139 140 141 142 143
#undef SUBSYS

/* array of cgroup subsystem names */
#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
static const char *cgroup_subsys_name[] = {
144 145
#include <linux/cgroup_subsys.h>
};
146
#undef SUBSYS
147

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
#define SUBSYS(_x)								\
	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key);			\
	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key);			\
	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key);			\
	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
#include <linux/cgroup_subsys.h>
#undef SUBSYS

#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
static struct static_key_true *cgroup_subsys_enabled_key[] = {
#include <linux/cgroup_subsys.h>
};
#undef SUBSYS

#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
#include <linux/cgroup_subsys.h>
};
#undef SUBSYS

169
/*
170
 * The default hierarchy, reserved for the subsystems that are otherwise
171 172
 * unattached - it never has more than a single cgroup, and all tasks are
 * part of that cgroup.
173
 */
T
Tejun Heo 已提交
174
struct cgroup_root cgrp_dfl_root;
T
Tejun Heo 已提交
175
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
176

T
Tejun Heo 已提交
177 178 179 180 181
/*
 * The default hierarchy always exists but is hidden until mounted for the
 * first time.  This is for backward compatibility.
 */
static bool cgrp_dfl_root_visible;
182

183 184 185
/* Controllers blocked by the commandline in v1 */
static unsigned long cgroup_no_v1_mask;

186
/* some controllers are not supported in the default hierarchy */
187
static unsigned long cgrp_dfl_root_inhibit_ss_mask;
188

189 190
/* The list of hierarchy roots */

191 192
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
193

T
Tejun Heo 已提交
194
/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
195
static DEFINE_IDR(cgroup_hierarchy_idr);
196

197
/*
198 199 200 201 202
 * Assign a monotonically increasing serial number to csses.  It guarantees
 * cgroups with bigger numbers are newer than those with smaller numbers.
 * Also, as csses are always appended to the parent's ->children list, it
 * guarantees that sibling csses are always sorted in the ascending serial
 * number order on the list.  Protected by cgroup_mutex.
203
 */
204
static u64 css_serial_nr_next = 1;
205

206 207 208 209
/*
 * These bitmask flags indicate whether tasks in the fork and exit paths have
 * fork/exit handlers to call. This avoids us having to do extra work in the
 * fork/exit path to check which subsystems have fork/exit callbacks.
210
 */
211 212
static unsigned long have_fork_callback __read_mostly;
static unsigned long have_exit_callback __read_mostly;
213
static unsigned long have_free_callback __read_mostly;
214

215 216 217
/* Ditto for the can_fork callback. */
static unsigned long have_canfork_callback __read_mostly;

218
static struct file_system_type cgroup2_fs_type;
219 220
static struct cftype cgroup_dfl_base_files[];
static struct cftype cgroup_legacy_base_files[];
221

222
static int rebind_subsystems(struct cgroup_root *dst_root,
223
			     unsigned long ss_mask);
224
static void css_task_iter_advance(struct css_task_iter *it);
225
static int cgroup_destroy_locked(struct cgroup *cgrp);
226 227
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible);
228
static void css_release(struct percpu_ref *ref);
229
static void kill_css(struct cgroup_subsys_state *css);
230 231
static int cgroup_addrm_files(struct cgroup_subsys_state *css,
			      struct cgroup *cgrp, struct cftype cfts[],
232
			      bool is_add);
233

234 235 236 237 238 239 240 241 242 243 244 245 246
/**
 * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
 * @ssid: subsys ID of interest
 *
 * cgroup_subsys_enabled() can only be used with literal subsys names which
 * is fine for individual subsystems but unsuitable for cgroup core.  This
 * is slower static_key_enabled() based test indexed by @ssid.
 */
static bool cgroup_ssid_enabled(int ssid)
{
	return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
}

247 248 249 250 251
static bool cgroup_ssid_no_v1(int ssid)
{
	return cgroup_no_v1_mask & (1 << ssid);
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/**
 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
 * @cgrp: the cgroup of interest
 *
 * The default hierarchy is the v2 interface of cgroup and this function
 * can be used to test whether a cgroup is on the default hierarchy for
 * cases where a subsystem should behave differnetly depending on the
 * interface version.
 *
 * The set of behaviors which change on the default hierarchy are still
 * being determined and the mount option is prefixed with __DEVEL__.
 *
 * List of changed behaviors:
 *
 * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
 *   and "name" are disallowed.
 *
 * - When mounting an existing superblock, mount options should match.
 *
 * - Remount is disallowed.
 *
 * - rename(2) is disallowed.
 *
 * - "tasks" is removed.  Everything should be at process granularity.  Use
 *   "cgroup.procs" instead.
 *
 * - "cgroup.procs" is not sorted.  pids will be unique unless they got
 *   recycled inbetween reads.
 *
 * - "release_agent" and "notify_on_release" are removed.  Replacement
 *   notification mechanism will be implemented.
 *
 * - "cgroup.clone_children" is removed.
 *
 * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
 *   and its descendants contain no task; otherwise, 1.  The file also
 *   generates kernfs notification which can be monitored through poll and
 *   [di]notify when the value of the file changes.
 *
 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
 *   take masks of ancestors with non-empty cpus/mems, instead of being
 *   moved to an ancestor.
 *
 * - cpuset: a task can be moved into an empty cpuset, and again it takes
 *   masks of ancestors.
 *
 * - memcg: use_hierarchy is on by default and the cgroup file for the flag
 *   is not created.
 *
 * - blkcg: blk-throttle becomes properly hierarchical.
 *
 * - debug: disallowed on the default hierarchy.
 */
static bool cgroup_on_dfl(const struct cgroup *cgrp)
{
	return cgrp->root == &cgrp_dfl_root;
}

310 311 312 313 314 315 316
/* IDR wrappers which synchronize using cgroup_idr_lock */
static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
			    gfp_t gfp_mask)
{
	int ret;

	idr_preload(gfp_mask);
T
Tejun Heo 已提交
317
	spin_lock_bh(&cgroup_idr_lock);
318
	ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
T
Tejun Heo 已提交
319
	spin_unlock_bh(&cgroup_idr_lock);
320 321 322 323 324 325 326 327
	idr_preload_end();
	return ret;
}

static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
{
	void *ret;

T
Tejun Heo 已提交
328
	spin_lock_bh(&cgroup_idr_lock);
329
	ret = idr_replace(idr, ptr, id);
T
Tejun Heo 已提交
330
	spin_unlock_bh(&cgroup_idr_lock);
331 332 333 334 335
	return ret;
}

static void cgroup_idr_remove(struct idr *idr, int id)
{
T
Tejun Heo 已提交
336
	spin_lock_bh(&cgroup_idr_lock);
337
	idr_remove(idr, id);
T
Tejun Heo 已提交
338
	spin_unlock_bh(&cgroup_idr_lock);
339 340
}

T
Tejun Heo 已提交
341 342 343 344 345 346 347 348 349
static struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
	struct cgroup_subsys_state *parent_css = cgrp->self.parent;

	if (parent_css)
		return container_of(parent_css, struct cgroup, self);
	return NULL;
}

T
Tejun Heo 已提交
350 351 352
/**
 * cgroup_css - obtain a cgroup's css for the specified subsystem
 * @cgrp: the cgroup of interest
353
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
T
Tejun Heo 已提交
354
 *
355 356 357 358 359
 * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
 * function must be called either under cgroup_mutex or rcu_read_lock() and
 * the caller is responsible for pinning the returned css if it wants to
 * keep accessing it outside the said locks.  This function may return
 * %NULL if @cgrp doesn't have @subsys_id enabled.
T
Tejun Heo 已提交
360 361
 */
static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
362
					      struct cgroup_subsys *ss)
T
Tejun Heo 已提交
363
{
364
	if (ss)
365
		return rcu_dereference_check(cgrp->subsys[ss->id],
T
Tejun Heo 已提交
366
					lockdep_is_held(&cgroup_mutex));
367
	else
368
		return &cgrp->self;
T
Tejun Heo 已提交
369
}
370

371 372 373
/**
 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
374
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
375
 *
C
Chen Hanxiao 已提交
376
 * Similar to cgroup_css() but returns the effective css, which is defined
377 378 379 380 381 382 383 384 385 386
 * as the matching css of the nearest ancestor including self which has @ss
 * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
 * function is guaranteed to return non-NULL css.
 */
static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
						struct cgroup_subsys *ss)
{
	lockdep_assert_held(&cgroup_mutex);

	if (!ss)
387
		return &cgrp->self;
388 389 390 391

	if (!(cgrp->root->subsys_mask & (1 << ss->id)))
		return NULL;

T
Tejun Heo 已提交
392 393
	/*
	 * This function is used while updating css associations and thus
394
	 * can't test the csses directly.  Use ->subtree_ss_mask.
T
Tejun Heo 已提交
395
	 */
T
Tejun Heo 已提交
396
	while (cgroup_parent(cgrp) &&
397
	       !(cgroup_parent(cgrp)->subtree_ss_mask & (1 << ss->id)))
T
Tejun Heo 已提交
398
		cgrp = cgroup_parent(cgrp);
399 400

	return cgroup_css(cgrp, ss);
T
Tejun Heo 已提交
401
}
402

T
Tejun Heo 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/**
 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
 * @ss: the subsystem of interest
 *
 * Find and get the effective css of @cgrp for @ss.  The effective css is
 * defined as the matching css of the nearest ancestor including self which
 * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
 * the root css is returned, so this function always returns a valid css.
 * The returned css must be put using css_put().
 */
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
					     struct cgroup_subsys *ss)
{
	struct cgroup_subsys_state *css;

	rcu_read_lock();

	do {
		css = cgroup_css(cgrp, ss);

		if (css && css_tryget_online(css))
			goto out_unlock;
		cgrp = cgroup_parent(cgrp);
	} while (cgrp);

	css = init_css_set.subsys[ss->id];
	css_get(css);
out_unlock:
	rcu_read_unlock();
	return css;
}

436
/* convenient tests for these bits */
437
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
438
{
439
	return !(cgrp->self.flags & CSS_ONLINE);
440 441
}

T
Tejun Heo 已提交
442 443 444 445 446 447 448 449 450 451 452
static void cgroup_get(struct cgroup *cgrp)
{
	WARN_ON_ONCE(cgroup_is_dead(cgrp));
	css_get(&cgrp->self);
}

static bool cgroup_tryget(struct cgroup *cgrp)
{
	return css_tryget(&cgrp->self);
}

T
Tejun Heo 已提交
453
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
454
{
T
Tejun Heo 已提交
455
	struct cgroup *cgrp = of->kn->parent->priv;
T
Tejun Heo 已提交
456
	struct cftype *cft = of_cft(of);
T
Tejun Heo 已提交
457 458 459 460 461 462 463 464 465 466 467 468

	/*
	 * This is open and unprotected implementation of cgroup_css().
	 * seq_css() is only called from a kernfs file operation which has
	 * an active reference on the file.  Because all the subsystem
	 * files are drained before a css is disassociated with a cgroup,
	 * the matching css from the cgroup's subsys table is guaranteed to
	 * be and stay valid until the enclosing operation is complete.
	 */
	if (cft->ss)
		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
	else
469
		return &cgrp->self;
470
}
T
Tejun Heo 已提交
471
EXPORT_SYMBOL_GPL(of_css);
472

473
static int notify_on_release(const struct cgroup *cgrp)
474
{
475
	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
476 477
}

T
Tejun Heo 已提交
478 479 480 481 482 483
/**
 * for_each_css - iterate all css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
484
 * Should be called under cgroup_[tree_]mutex.
T
Tejun Heo 已提交
485 486 487 488 489 490 491 492
 */
#define for_each_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = rcu_dereference_check(			\
				(cgrp)->subsys[(ssid)],			\
				lockdep_is_held(&cgroup_mutex)))) { }	\
		else

493 494 495 496 497 498 499 500 501 502 503 504 505 506
/**
 * for_each_e_css - iterate all effective css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
 * Should be called under cgroup_[tree_]mutex.
 */
#define for_each_e_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
			;						\
		else

507
/**
T
Tejun Heo 已提交
508
 * for_each_subsys - iterate all enabled cgroup subsystems
509
 * @ss: the iteration cursor
510
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
511
 */
512
#define for_each_subsys(ss, ssid)					\
T
Tejun Heo 已提交
513 514
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
515

516
/**
517
 * do_each_subsys_mask - filter for_each_subsys with a bitmask
518 519
 * @ss: the iteration cursor
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
520
 * @ss_mask: the bitmask
521 522
 *
 * The block will only run for cases where the ssid-th bit (1 << ssid) of
523
 * @ss_mask is set.
524
 */
525 526 527
#define do_each_subsys_mask(ss, ssid, ss_mask) do {			\
	unsigned long __ss_mask = (ss_mask);				\
	if (!CGROUP_SUBSYS_COUNT) { /* to avoid spurious gcc warning */	\
528
		(ssid) = 0;						\
529 530 531 532 533 534 535 536 537 538
		break;							\
	}								\
	for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) {	\
		(ss) = cgroup_subsys[ssid];				\
		{

#define while_each_subsys_mask()					\
		}							\
	}								\
} while (false)
539

540 541
/* iterate across the hierarchies */
#define for_each_root(root)						\
542
	list_for_each_entry((root), &cgroup_roots, root_list)
543

544 545
/* iterate over child cgrps, lock should be held throughout iteration */
#define cgroup_for_each_live_child(child, cgrp)				\
546
	list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
T
Tejun Heo 已提交
547
		if (({ lockdep_assert_held(&cgroup_mutex);		\
548 549 550
		       cgroup_is_dead(child); }))			\
			;						\
		else
551

552
static void cgroup_release_agent(struct work_struct *work);
553
static void check_for_release(struct cgroup *cgrp);
554

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
/*
 * A cgroup can be associated with multiple css_sets as different tasks may
 * belong to different cgroups on different hierarchies.  In the other
 * direction, a css_set is naturally associated with multiple cgroups.
 * This M:N relationship is represented by the following link structure
 * which exists for each association and allows traversing the associations
 * from both sides.
 */
struct cgrp_cset_link {
	/* the cgroup and css_set this link associates */
	struct cgroup		*cgrp;
	struct css_set		*cset;

	/* list of cgrp_cset_links anchored at cgrp->cset_links */
	struct list_head	cset_link;

	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
	struct list_head	cgrp_link;
573 574
};

575 576
/*
 * The default css_set - used by init and its children prior to any
577 578 579 580 581
 * hierarchies being mounted. It contains a pointer to the root state
 * for each subsystem. Also used to anchor the list of css_sets. Not
 * reference-counted, to improve performance when child cgroups
 * haven't been created.
 */
T
Tejun Heo 已提交
582
struct css_set init_css_set = {
583 584 585 586 587 588
	.refcount		= ATOMIC_INIT(1),
	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
589
	.task_iters		= LIST_HEAD_INIT(init_css_set.task_iters),
590
};
591

592
static int css_set_count	= 1;	/* 1 for init_css_set */
593

594 595 596 597 598 599
/**
 * css_set_populated - does a css_set contain any tasks?
 * @cset: target css_set
 */
static bool css_set_populated(struct css_set *cset)
{
600
	lockdep_assert_held(&css_set_lock);
601 602 603 604

	return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
}

605 606 607 608 609
/**
 * cgroup_update_populated - updated populated count of a cgroup
 * @cgrp: the target cgroup
 * @populated: inc or dec populated count
 *
610 611 612 613
 * One of the css_sets associated with @cgrp is either getting its first
 * task or losing the last.  Update @cgrp->populated_cnt accordingly.  The
 * count is propagated towards root so that a given cgroup's populated_cnt
 * is zero iff the cgroup and all its descendants don't contain any tasks.
614 615 616 617 618 619 620 621 622
 *
 * @cgrp's interface file "cgroup.populated" is zero if
 * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
 * changes from or to zero, userland is notified that the content of the
 * interface file has changed.  This can be used to detect when @cgrp and
 * its descendants become populated or empty.
 */
static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
{
623
	lockdep_assert_held(&css_set_lock);
624 625 626 627 628 629 630 631 632 633 634 635

	do {
		bool trigger;

		if (populated)
			trigger = !cgrp->populated_cnt++;
		else
			trigger = !--cgrp->populated_cnt;

		if (!trigger)
			break;

636
		check_for_release(cgrp);
637 638
		cgroup_file_notify(&cgrp->events_file);

T
Tejun Heo 已提交
639
		cgrp = cgroup_parent(cgrp);
640 641 642
	} while (cgrp);
}

643 644 645 646 647 648 649 650 651 652 653 654
/**
 * css_set_update_populated - update populated state of a css_set
 * @cset: target css_set
 * @populated: whether @cset is populated or depopulated
 *
 * @cset is either getting the first task or losing the last.  Update the
 * ->populated_cnt of all associated cgroups accordingly.
 */
static void css_set_update_populated(struct css_set *cset, bool populated)
{
	struct cgrp_cset_link *link;

655
	lockdep_assert_held(&css_set_lock);
656 657 658 659 660

	list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
		cgroup_update_populated(link->cgrp, populated);
}

T
Tejun Heo 已提交
661 662 663 664 665 666 667 668 669 670 671
/**
 * css_set_move_task - move a task from one css_set to another
 * @task: task being moved
 * @from_cset: css_set @task currently belongs to (may be NULL)
 * @to_cset: new css_set @task is being moved to (may be NULL)
 * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
 *
 * Move @task from @from_cset to @to_cset.  If @task didn't belong to any
 * css_set, @from_cset can be NULL.  If @task is being disassociated
 * instead of moved, @to_cset can be NULL.
 *
672 673 674
 * This function automatically handles populated_cnt updates and
 * css_task_iter adjustments but the caller is responsible for managing
 * @from_cset and @to_cset's reference counts.
T
Tejun Heo 已提交
675 676 677 678 679
 */
static void css_set_move_task(struct task_struct *task,
			      struct css_set *from_cset, struct css_set *to_cset,
			      bool use_mg_tasks)
{
680
	lockdep_assert_held(&css_set_lock);
T
Tejun Heo 已提交
681 682

	if (from_cset) {
683 684
		struct css_task_iter *it, *pos;

T
Tejun Heo 已提交
685
		WARN_ON_ONCE(list_empty(&task->cg_list));
686 687 688 689 690 691 692 693 694 695 696 697 698

		/*
		 * @task is leaving, advance task iterators which are
		 * pointing to it so that they can resume at the next
		 * position.  Advancing an iterator might remove it from
		 * the list, use safe walk.  See css_task_iter_advance*()
		 * for details.
		 */
		list_for_each_entry_safe(it, pos, &from_cset->task_iters,
					 iters_node)
			if (it->task_pos == &task->cg_list)
				css_task_iter_advance(it);

T
Tejun Heo 已提交
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
		list_del_init(&task->cg_list);
		if (!css_set_populated(from_cset))
			css_set_update_populated(from_cset, false);
	} else {
		WARN_ON_ONCE(!list_empty(&task->cg_list));
	}

	if (to_cset) {
		/*
		 * We are synchronized through cgroup_threadgroup_rwsem
		 * against PF_EXITING setting such that we can't race
		 * against cgroup_exit() changing the css_set to
		 * init_css_set and dropping the old one.
		 */
		WARN_ON_ONCE(task->flags & PF_EXITING);

		if (!css_set_populated(to_cset))
			css_set_update_populated(to_cset, true);
		rcu_assign_pointer(task->cgroups, to_cset);
		list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
							     &to_cset->tasks);
	}
}

723 724 725 726 727
/*
 * hash table for cgroup groups. This improves the performance to find
 * an existing css_set. This hash doesn't (currently) take into
 * account cgroups in empty hierarchies.
 */
728
#define CSS_SET_HASH_BITS	7
729
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
730

731
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
732
{
733
	unsigned long key = 0UL;
734 735
	struct cgroup_subsys *ss;
	int i;
736

737
	for_each_subsys(ss, i)
738 739
		key += (unsigned long)css[i];
	key = (key >> 16) ^ key;
740

741
	return key;
742 743
}

Z
Zefan Li 已提交
744
static void put_css_set_locked(struct css_set *cset)
745
{
746
	struct cgrp_cset_link *link, *tmp_link;
T
Tejun Heo 已提交
747 748
	struct cgroup_subsys *ss;
	int ssid;
749

750
	lockdep_assert_held(&css_set_lock);
751 752

	if (!atomic_dec_and_test(&cset->refcount))
753
		return;
754

755 756
	/* This css_set is dead. unlink it and release cgroup and css refs */
	for_each_subsys(ss, ssid) {
T
Tejun Heo 已提交
757
		list_del(&cset->e_cset_node[ssid]);
758 759
		css_put(cset->subsys[ssid]);
	}
760
	hash_del(&cset->hlist);
761 762
	css_set_count--;

763 764 765
	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
766 767
		if (cgroup_parent(link->cgrp))
			cgroup_put(link->cgrp);
768
		kfree(link);
769
	}
770

771
	kfree_rcu(cset, rcu_head);
772 773
}

Z
Zefan Li 已提交
774
static void put_css_set(struct css_set *cset)
775 776 777 778 779 780 781 782 783
{
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * rwlock
	 */
	if (atomic_add_unless(&cset->refcount, -1, 1))
		return;

784
	spin_lock_bh(&css_set_lock);
Z
Zefan Li 已提交
785
	put_css_set_locked(cset);
786
	spin_unlock_bh(&css_set_lock);
787 788
}

789 790 791
/*
 * refcounted get/put for css_set objects
 */
792
static inline void get_css_set(struct css_set *cset)
793
{
794
	atomic_inc(&cset->refcount);
795 796
}

797
/**
798
 * compare_css_sets - helper function for find_existing_css_set().
799 800
 * @cset: candidate css_set being tested
 * @old_cset: existing css_set for a task
801 802 803
 * @new_cgrp: cgroup that's being entered by the task
 * @template: desired set of css pointers in css_set (pre-calculated)
 *
L
Li Zefan 已提交
804
 * Returns true if "cset" matches "old_cset" except for the hierarchy
805 806
 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
 */
807 808
static bool compare_css_sets(struct css_set *cset,
			     struct css_set *old_cset,
809 810 811 812 813
			     struct cgroup *new_cgrp,
			     struct cgroup_subsys_state *template[])
{
	struct list_head *l1, *l2;

814 815 816 817 818 819
	/*
	 * On the default hierarchy, there can be csets which are
	 * associated with the same set of cgroups but different csses.
	 * Let's first ensure that csses match.
	 */
	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
820 821 822 823
		return false;

	/*
	 * Compare cgroup pointers in order to distinguish between
824 825 826
	 * different cgroups in hierarchies.  As different cgroups may
	 * share the same effective css, this comparison is always
	 * necessary.
827
	 */
828 829
	l1 = &cset->cgrp_links;
	l2 = &old_cset->cgrp_links;
830
	while (1) {
831
		struct cgrp_cset_link *link1, *link2;
832
		struct cgroup *cgrp1, *cgrp2;
833 834 835 836

		l1 = l1->next;
		l2 = l2->next;
		/* See if we reached the end - both lists are equal length. */
837 838
		if (l1 == &cset->cgrp_links) {
			BUG_ON(l2 != &old_cset->cgrp_links);
839 840
			break;
		} else {
841
			BUG_ON(l2 == &old_cset->cgrp_links);
842 843
		}
		/* Locate the cgroups associated with these links. */
844 845 846 847
		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
		cgrp1 = link1->cgrp;
		cgrp2 = link2->cgrp;
848
		/* Hierarchies should be linked in the same order. */
849
		BUG_ON(cgrp1->root != cgrp2->root);
850 851 852 853 854 855 856 857

		/*
		 * If this hierarchy is the hierarchy of the cgroup
		 * that's changing, then we need to check that this
		 * css_set points to the new cgroup; if it's any other
		 * hierarchy, then this css_set should point to the
		 * same cgroup as the old css_set.
		 */
858 859
		if (cgrp1->root == new_cgrp->root) {
			if (cgrp1 != new_cgrp)
860 861
				return false;
		} else {
862
			if (cgrp1 != cgrp2)
863 864 865 866 867 868
				return false;
		}
	}
	return true;
}

869 870 871 872 873
/**
 * find_existing_css_set - init css array and find the matching css_set
 * @old_cset: the css_set that we're using before the cgroup transition
 * @cgrp: the cgroup that we're moving into
 * @template: out param for the new set of csses, should be clear on entry
874
 */
875 876 877
static struct css_set *find_existing_css_set(struct css_set *old_cset,
					struct cgroup *cgrp,
					struct cgroup_subsys_state *template[])
878
{
879
	struct cgroup_root *root = cgrp->root;
880
	struct cgroup_subsys *ss;
881
	struct css_set *cset;
882
	unsigned long key;
883
	int i;
884

B
Ben Blum 已提交
885 886 887 888 889
	/*
	 * Build the set of subsystem state objects that we want to see in the
	 * new css_set. while subsystems can change globally, the entries here
	 * won't change, so no need for locking.
	 */
890
	for_each_subsys(ss, i) {
891
		if (root->subsys_mask & (1UL << i)) {
892 893 894 895 896
			/*
			 * @ss is in this hierarchy, so we want the
			 * effective css from @cgrp.
			 */
			template[i] = cgroup_e_css(cgrp, ss);
897
		} else {
898 899 900 901
			/*
			 * @ss is not in this hierarchy, so we don't want
			 * to change the css.
			 */
902
			template[i] = old_cset->subsys[i];
903 904 905
		}
	}

906
	key = css_set_hash(template);
907 908
	hash_for_each_possible(css_set_table, cset, hlist, key) {
		if (!compare_css_sets(cset, old_cset, cgrp, template))
909 910 911
			continue;

		/* This css_set matches what we need */
912
		return cset;
913
	}
914 915 916 917 918

	/* No existing cgroup group matched */
	return NULL;
}

919
static void free_cgrp_cset_links(struct list_head *links_to_free)
920
{
921
	struct cgrp_cset_link *link, *tmp_link;
922

923 924
	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
		list_del(&link->cset_link);
925 926 927 928
		kfree(link);
	}
}

929 930 931 932 933 934 935
/**
 * allocate_cgrp_cset_links - allocate cgrp_cset_links
 * @count: the number of links to allocate
 * @tmp_links: list_head the allocated links are put on
 *
 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
 * through ->cset_link.  Returns 0 on success or -errno.
936
 */
937
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
938
{
939
	struct cgrp_cset_link *link;
940
	int i;
941 942 943

	INIT_LIST_HEAD(tmp_links);

944
	for (i = 0; i < count; i++) {
945
		link = kzalloc(sizeof(*link), GFP_KERNEL);
946
		if (!link) {
947
			free_cgrp_cset_links(tmp_links);
948 949
			return -ENOMEM;
		}
950
		list_add(&link->cset_link, tmp_links);
951 952 953 954
	}
	return 0;
}

955 956
/**
 * link_css_set - a helper function to link a css_set to a cgroup
957
 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
958
 * @cset: the css_set to be linked
959 960
 * @cgrp: the destination cgroup
 */
961 962
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
			 struct cgroup *cgrp)
963
{
964
	struct cgrp_cset_link *link;
965

966
	BUG_ON(list_empty(tmp_links));
T
Tejun Heo 已提交
967 968 969 970

	if (cgroup_on_dfl(cgrp))
		cset->dfl_cgrp = cgrp;

971 972
	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
	link->cset = cset;
973
	link->cgrp = cgrp;
974

975
	/*
976 977
	 * Always add links to the tail of the lists so that the lists are
	 * in choronological order.
978
	 */
979
	list_move_tail(&link->cset_link, &cgrp->cset_links);
980
	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
981 982 983

	if (cgroup_parent(cgrp))
		cgroup_get(cgrp);
984 985
}

986 987 988 989 990 991 992
/**
 * find_css_set - return a new css_set with one cgroup updated
 * @old_cset: the baseline css_set
 * @cgrp: the cgroup to be updated
 *
 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
 * substituted into the appropriate hierarchy.
993
 */
994 995
static struct css_set *find_css_set(struct css_set *old_cset,
				    struct cgroup *cgrp)
996
{
997
	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
998
	struct css_set *cset;
999 1000
	struct list_head tmp_links;
	struct cgrp_cset_link *link;
T
Tejun Heo 已提交
1001
	struct cgroup_subsys *ss;
1002
	unsigned long key;
T
Tejun Heo 已提交
1003
	int ssid;
1004

1005 1006
	lockdep_assert_held(&cgroup_mutex);

1007 1008
	/* First see if we already have a cgroup group that matches
	 * the desired set */
1009
	spin_lock_bh(&css_set_lock);
1010 1011 1012
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
		get_css_set(cset);
1013
	spin_unlock_bh(&css_set_lock);
1014

1015 1016
	if (cset)
		return cset;
1017

1018
	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
1019
	if (!cset)
1020 1021
		return NULL;

1022
	/* Allocate all the cgrp_cset_link objects that we'll need */
1023
	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
1024
		kfree(cset);
1025 1026 1027
		return NULL;
	}

1028
	atomic_set(&cset->refcount, 1);
1029
	INIT_LIST_HEAD(&cset->cgrp_links);
1030
	INIT_LIST_HEAD(&cset->tasks);
T
Tejun Heo 已提交
1031
	INIT_LIST_HEAD(&cset->mg_tasks);
1032
	INIT_LIST_HEAD(&cset->mg_preload_node);
1033
	INIT_LIST_HEAD(&cset->mg_node);
1034
	INIT_LIST_HEAD(&cset->task_iters);
1035
	INIT_HLIST_NODE(&cset->hlist);
1036 1037 1038

	/* Copy the set of subsystem state objects generated in
	 * find_existing_css_set() */
1039
	memcpy(cset->subsys, template, sizeof(cset->subsys));
1040

1041
	spin_lock_bh(&css_set_lock);
1042
	/* Add reference counts and links from the new css_set. */
1043
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
1044
		struct cgroup *c = link->cgrp;
1045

1046 1047
		if (c->root == cgrp->root)
			c = cgrp;
1048
		link_css_set(&tmp_links, cset, c);
1049
	}
1050

1051
	BUG_ON(!list_empty(&tmp_links));
1052 1053

	css_set_count++;
1054

T
Tejun Heo 已提交
1055
	/* Add @cset to the hash table */
1056 1057
	key = css_set_hash(cset->subsys);
	hash_add(css_set_table, &cset->hlist, key);
1058

1059 1060 1061
	for_each_subsys(ss, ssid) {
		struct cgroup_subsys_state *css = cset->subsys[ssid];

T
Tejun Heo 已提交
1062
		list_add_tail(&cset->e_cset_node[ssid],
1063 1064 1065
			      &css->cgroup->e_csets[ssid]);
		css_get(css);
	}
T
Tejun Heo 已提交
1066

1067
	spin_unlock_bh(&css_set_lock);
1068

1069
	return cset;
1070 1071
}

1072
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
1073
{
1074
	struct cgroup *root_cgrp = kf_root->kn->priv;
T
Tejun Heo 已提交
1075

1076
	return root_cgrp->root;
T
Tejun Heo 已提交
1077 1078
}

1079
static int cgroup_init_root_id(struct cgroup_root *root)
1080 1081 1082 1083 1084
{
	int id;

	lockdep_assert_held(&cgroup_mutex);

1085
	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
1086 1087 1088 1089 1090 1091 1092
	if (id < 0)
		return id;

	root->hierarchy_id = id;
	return 0;
}

1093
static void cgroup_exit_root_id(struct cgroup_root *root)
1094 1095 1096 1097 1098 1099 1100 1101 1102
{
	lockdep_assert_held(&cgroup_mutex);

	if (root->hierarchy_id) {
		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
		root->hierarchy_id = 0;
	}
}

1103
static void cgroup_free_root(struct cgroup_root *root)
1104 1105
{
	if (root) {
C
Chen Hanxiao 已提交
1106
		/* hierarchy ID should already have been released */
1107 1108 1109 1110 1111 1112 1113
		WARN_ON_ONCE(root->hierarchy_id);

		idr_destroy(&root->cgroup_idr);
		kfree(root);
	}
}

1114
static void cgroup_destroy_root(struct cgroup_root *root)
1115
{
1116
	struct cgroup *cgrp = &root->cgrp;
1117 1118
	struct cgrp_cset_link *link, *tmp_link;

T
Tejun Heo 已提交
1119
	mutex_lock(&cgroup_mutex);
1120

T
Tejun Heo 已提交
1121
	BUG_ON(atomic_read(&root->nr_cgrps));
1122
	BUG_ON(!list_empty(&cgrp->self.children));
1123 1124

	/* Rebind all subsystems back to the default hierarchy */
1125
	rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
1126 1127

	/*
1128 1129
	 * Release all the links from cset_links to this hierarchy's
	 * root cgroup
1130
	 */
1131
	spin_lock_bh(&css_set_lock);
1132 1133 1134 1135 1136 1137

	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
		kfree(link);
	}
1138 1139

	spin_unlock_bh(&css_set_lock);
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149

	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
		cgroup_root_count--;
	}

	cgroup_exit_root_id(root);

	mutex_unlock(&cgroup_mutex);

T
Tejun Heo 已提交
1150
	kernfs_destroy_root(root->kf_root);
1151 1152 1153
	cgroup_free_root(root);
}

1154 1155
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
1156
					    struct cgroup_root *root)
1157 1158 1159
{
	struct cgroup *res = NULL;

1160
	lockdep_assert_held(&cgroup_mutex);
1161
	lockdep_assert_held(&css_set_lock);
1162

1163
	if (cset == &init_css_set) {
1164
		res = &root->cgrp;
1165
	} else {
1166 1167 1168
		struct cgrp_cset_link *link;

		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1169
			struct cgroup *c = link->cgrp;
1170

1171 1172 1173 1174 1175 1176
			if (c->root == root) {
				res = c;
				break;
			}
		}
	}
1177

1178 1179 1180 1181
	BUG_ON(!res);
	return res;
}

1182
/*
1183
 * Return the cgroup for "task" from the given hierarchy. Must be
1184
 * called with cgroup_mutex and css_set_lock held.
1185 1186
 */
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
1187
					    struct cgroup_root *root)
1188 1189 1190 1191 1192 1193 1194 1195 1196
{
	/*
	 * No need to lock the task - since we hold cgroup_mutex the
	 * task can't change groups, so the only thing that can happen
	 * is that it exits and its css is set back to init_css_set.
	 */
	return cset_cgroup_from_root(task_css_set(task), root);
}

1197 1198 1199 1200 1201 1202
/*
 * A task must hold cgroup_mutex to modify cgroups.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding cgroup_mutex can't rely on the count
 * field not changing.  However, if the count goes to zero, then only
1203
 * cgroup_attach_task() can increment it again.  Because a count of zero
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
 * means that no tasks are currently attached, therefore there is no
 * way a task attached to that cgroup can fork (the other way to
 * increment the count).  So code holding cgroup_mutex can safely
 * assume that if the count is zero, it will stay zero. Similarly, if
 * a task holds cgroup_mutex on a cgroup with zero count, it
 * knows that the cgroup won't be removed, as cgroup_rmdir()
 * needs that mutex.
 *
 * A cgroup can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cgroups is empty.  Since all
 * tasks in the system use _some_ cgroup, and since there is always at
1215
 * least one task in the system (init, pid == 1), therefore, root cgroup
1216
 * always has either children cgroups and/or using tasks.  So we don't
1217
 * need a special hack to ensure that root cgroup cannot be deleted.
1218 1219
 *
 * P.S.  One more locking exception.  RCU is used to guard the
1220
 * update of a tasks cgroup pointer by cgroup_attach_task()
1221 1222
 */

T
Tejun Heo 已提交
1223
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1224
static const struct file_operations proc_cgroupstats_operations;
1225

T
Tejun Heo 已提交
1226 1227
static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
			      char *buf)
1228
{
1229 1230
	struct cgroup_subsys *ss = cft->ss;

T
Tejun Heo 已提交
1231 1232 1233
	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
1234 1235
			 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
			 cft->name);
T
Tejun Heo 已提交
1236 1237 1238
	else
		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
	return buf;
1239 1240
}

1241 1242 1243 1244
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
1245
 * S_IRUGO for read, S_IWUSR for write.
1246 1247
 */
static umode_t cgroup_file_mode(const struct cftype *cft)
1248
{
1249
	umode_t mode = 0;
1250

1251 1252 1253
	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
		mode |= S_IRUGO;

1254 1255 1256 1257 1258 1259
	if (cft->write_u64 || cft->write_s64 || cft->write) {
		if (cft->flags & CFTYPE_WORLD_WRITABLE)
			mode |= S_IWUGO;
		else
			mode |= S_IWUSR;
	}
1260 1261

	return mode;
1262 1263
}

1264
/**
1265
 * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
1266
 * @cgrp: the target cgroup
1267
 * @subtree_control: the new subtree_control mask to consider
1268 1269 1270 1271 1272
 *
 * On the default hierarchy, a subsystem may request other subsystems to be
 * enabled together through its ->depends_on mask.  In such cases, more
 * subsystems than specified in "cgroup.subtree_control" may be enabled.
 *
1273 1274 1275
 * This function calculates which subsystems need to be enabled if
 * @subtree_control is to be applied to @cgrp.  The returned mask is always
 * a superset of @subtree_control and follows the usual hierarchy rules.
1276
 */
1277 1278
static unsigned long cgroup_calc_subtree_ss_mask(struct cgroup *cgrp,
						 unsigned long subtree_control)
1279
{
1280
	struct cgroup *parent = cgroup_parent(cgrp);
1281
	unsigned long cur_ss_mask = subtree_control;
1282 1283 1284 1285 1286
	struct cgroup_subsys *ss;
	int ssid;

	lockdep_assert_held(&cgroup_mutex);

1287 1288
	if (!cgroup_on_dfl(cgrp))
		return cur_ss_mask;
1289 1290

	while (true) {
1291
		unsigned long new_ss_mask = cur_ss_mask;
1292

1293
		do_each_subsys_mask(ss, ssid, cur_ss_mask) {
1294
			new_ss_mask |= ss->depends_on;
1295
		} while_each_subsys_mask();
1296 1297 1298 1299 1300 1301 1302

		/*
		 * Mask out subsystems which aren't available.  This can
		 * happen only if some depended-upon subsystems were bound
		 * to non-default hierarchies.
		 */
		if (parent)
1303
			new_ss_mask &= parent->subtree_ss_mask;
1304 1305 1306 1307 1308 1309 1310 1311
		else
			new_ss_mask &= cgrp->root->subsys_mask;

		if (new_ss_mask == cur_ss_mask)
			break;
		cur_ss_mask = new_ss_mask;
	}

1312 1313 1314 1315
	return cur_ss_mask;
}

/**
1316
 * cgroup_refresh_subtree_ss_mask - update subtree_ss_mask
1317 1318
 * @cgrp: the target cgroup
 *
1319 1320
 * Update @cgrp->subtree_ss_mask according to the current
 * @cgrp->subtree_control using cgroup_calc_subtree_ss_mask().
1321
 */
1322
static void cgroup_refresh_subtree_ss_mask(struct cgroup *cgrp)
1323
{
1324 1325
	cgrp->subtree_ss_mask =
		cgroup_calc_subtree_ss_mask(cgrp, cgrp->subtree_control);
1326 1327
}

1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
/**
 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper undoes cgroup_kn_lock_live() and should be invoked before
 * the method finishes if locking succeeded.  Note that once this function
 * returns the cgroup returned by cgroup_kn_lock_live() may become
 * inaccessible any time.  If the caller intends to continue to access the
 * cgroup, it should pin it before invoking this function.
 */
static void cgroup_kn_unlock(struct kernfs_node *kn)
1339
{
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;

	mutex_unlock(&cgroup_mutex);

	kernfs_unbreak_active_protection(kn);
	cgroup_put(cgrp);
1351 1352
}

1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
/**
 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper is to be used by a cgroup kernfs method currently servicing
 * @kn.  It breaks the active protection, performs cgroup locking and
 * verifies that the associated cgroup is alive.  Returns the cgroup if
 * alive; otherwise, %NULL.  A successful return should be undone by a
 * matching cgroup_kn_unlock() invocation.
 *
 * Any cgroup kernfs method implementation which requires locking the
 * associated cgroup should use this helper.  It avoids nesting cgroup
 * locking under kernfs active protection and allows all kernfs operations
 * including self-removal.
 */
static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
T
Tejun Heo 已提交
1369
{
1370 1371 1372 1373 1374 1375
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;
T
Tejun Heo 已提交
1376

1377
	/*
1378
	 * We're gonna grab cgroup_mutex which nests outside kernfs
1379 1380 1381
	 * active_ref.  cgroup liveliness check alone provides enough
	 * protection against removal.  Ensure @cgrp stays accessible and
	 * break the active_ref protection.
1382
	 */
1383 1384
	if (!cgroup_tryget(cgrp))
		return NULL;
1385 1386
	kernfs_break_active_protection(kn);

T
Tejun Heo 已提交
1387
	mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1388

1389 1390 1391 1392 1393
	if (!cgroup_is_dead(cgrp))
		return cgrp;

	cgroup_kn_unlock(kn);
	return NULL;
1394
}
T
Tejun Heo 已提交
1395

1396
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
T
Tejun Heo 已提交
1397
{
T
Tejun Heo 已提交
1398
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
1399

1400
	lockdep_assert_held(&cgroup_mutex);
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410

	if (cft->file_offset) {
		struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
		struct cgroup_file *cfile = (void *)css + cft->file_offset;

		spin_lock_irq(&cgroup_file_kn_lock);
		cfile->kn = NULL;
		spin_unlock_irq(&cgroup_file_kn_lock);
	}

T
Tejun Heo 已提交
1411
	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
T
Tejun Heo 已提交
1412 1413
}

1414
/**
1415 1416 1417
 * css_clear_dir - remove subsys files in a cgroup directory
 * @css: taget css
 * @cgrp_override: specify if target cgroup is different from css->cgroup
1418
 */
1419 1420
static void css_clear_dir(struct cgroup_subsys_state *css,
			  struct cgroup *cgrp_override)
T
Tejun Heo 已提交
1421
{
1422 1423
	struct cgroup *cgrp = cgrp_override ?: css->cgroup;
	struct cftype *cfts;
T
Tejun Heo 已提交
1424

1425 1426
	list_for_each_entry(cfts, &css->ss->cfts, node)
		cgroup_addrm_files(css, cgrp, cfts, false);
1427 1428
}

1429
/**
1430 1431 1432
 * css_populate_dir - create subsys files in a cgroup directory
 * @css: target css
 * @cgrp_overried: specify if target cgroup is different from css->cgroup
1433 1434 1435
 *
 * On failure, no file is added.
 */
1436 1437
static int css_populate_dir(struct cgroup_subsys_state *css,
			    struct cgroup *cgrp_override)
1438
{
1439 1440 1441
	struct cgroup *cgrp = cgrp_override ?: css->cgroup;
	struct cftype *cfts, *failed_cfts;
	int ret;
1442

1443 1444 1445 1446 1447
	if (!css->ss) {
		if (cgroup_on_dfl(cgrp))
			cfts = cgroup_dfl_base_files;
		else
			cfts = cgroup_legacy_base_files;
1448

1449 1450
		return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
	}
1451

1452 1453 1454 1455 1456
	list_for_each_entry(cfts, &css->ss->cfts, node) {
		ret = cgroup_addrm_files(css, cgrp, cfts, true);
		if (ret < 0) {
			failed_cfts = cfts;
			goto err;
1457 1458 1459 1460
		}
	}
	return 0;
err:
1461 1462 1463 1464 1465
	list_for_each_entry(cfts, &css->ss->cfts, node) {
		if (cfts == failed_cfts)
			break;
		cgroup_addrm_files(css, cgrp, cfts, false);
	}
1466 1467 1468
	return ret;
}

1469 1470
static int rebind_subsystems(struct cgroup_root *dst_root,
			     unsigned long ss_mask)
1471
{
1472
	struct cgroup *dcgrp = &dst_root->cgrp;
1473
	struct cgroup_subsys *ss;
1474
	unsigned long tmp_ss_mask;
T
Tejun Heo 已提交
1475
	int ssid, i, ret;
1476

T
Tejun Heo 已提交
1477
	lockdep_assert_held(&cgroup_mutex);
1478

1479
	do_each_subsys_mask(ss, ssid, ss_mask) {
1480 1481
		/* if @ss has non-root csses attached to it, can't move */
		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
T
Tejun Heo 已提交
1482
			return -EBUSY;
1483

1484
		/* can't move between two non-dummy roots either */
1485
		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1486
			return -EBUSY;
1487
	} while_each_subsys_mask();
1488

1489 1490 1491 1492 1493
	/* skip creating root files on dfl_root for inhibited subsystems */
	tmp_ss_mask = ss_mask;
	if (dst_root == &cgrp_dfl_root)
		tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;

1494
	do_each_subsys_mask(ss, ssid, tmp_ss_mask) {
1495 1496 1497 1498 1499 1500
		struct cgroup *scgrp = &ss->root->cgrp;
		int tssid;

		ret = css_populate_dir(cgroup_css(scgrp, ss), dcgrp);
		if (!ret)
			continue;
1501

T
Tejun Heo 已提交
1502 1503 1504 1505 1506 1507
		/*
		 * Rebinding back to the default root is not allowed to
		 * fail.  Using both default and non-default roots should
		 * be rare.  Moving subsystems back and forth even more so.
		 * Just warn about it and continue.
		 */
1508 1509 1510 1511 1512 1513 1514
		if (dst_root == &cgrp_dfl_root) {
			if (cgrp_dfl_root_visible) {
				pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
					ret, ss_mask);
				pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
			}
			continue;
T
Tejun Heo 已提交
1515
		}
1516

1517
		do_each_subsys_mask(ss, tssid, tmp_ss_mask) {
1518 1519 1520
			if (tssid == ssid)
				break;
			css_clear_dir(cgroup_css(scgrp, ss), dcgrp);
1521
		} while_each_subsys_mask();
1522
		return ret;
1523
	} while_each_subsys_mask();
1524 1525 1526 1527 1528

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
1529
	do_each_subsys_mask(ss, ssid, ss_mask) {
1530 1531 1532
		struct cgroup_root *src_root = ss->root;
		struct cgroup *scgrp = &src_root->cgrp;
		struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
T
Tejun Heo 已提交
1533
		struct css_set *cset;
1534

1535
		WARN_ON(!css || cgroup_css(dcgrp, ss));
1536

1537 1538
		css_clear_dir(css, NULL);

1539 1540
		RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
		rcu_assign_pointer(dcgrp->subsys[ssid], css);
1541
		ss->root = dst_root;
1542
		css->cgroup = dcgrp;
1543

1544
		spin_lock_bh(&css_set_lock);
T
Tejun Heo 已提交
1545 1546
		hash_for_each(css_set_table, i, cset, hlist)
			list_move_tail(&cset->e_cset_node[ss->id],
1547
				       &dcgrp->e_csets[ss->id]);
1548
		spin_unlock_bh(&css_set_lock);
T
Tejun Heo 已提交
1549

1550
		src_root->subsys_mask &= ~(1 << ssid);
1551
		scgrp->subtree_control &= ~(1 << ssid);
1552
		cgroup_refresh_subtree_ss_mask(scgrp);
1553

1554
		/* default hierarchy doesn't enable controllers by default */
1555
		dst_root->subsys_mask |= 1 << ssid;
1556 1557 1558
		if (dst_root == &cgrp_dfl_root) {
			static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
		} else {
1559
			dcgrp->subtree_control |= 1 << ssid;
1560
			cgroup_refresh_subtree_ss_mask(dcgrp);
1561
			static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
1562
		}
1563

1564 1565
		if (ss->bind)
			ss->bind(css);
1566
	} while_each_subsys_mask();
1567

1568
	kernfs_activate(dcgrp->kn);
1569 1570 1571
	return 0;
}

T
Tejun Heo 已提交
1572 1573
static int cgroup_show_options(struct seq_file *seq,
			       struct kernfs_root *kf_root)
1574
{
1575
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1576
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
1577
	int ssid;
1578

1579 1580 1581
	if (root != &cgrp_dfl_root)
		for_each_subsys(ss, ssid)
			if (root->subsys_mask & (1 << ssid))
1582
				seq_show_option(seq, ss->legacy_name, NULL);
1583
	if (root->flags & CGRP_ROOT_NOPREFIX)
1584
		seq_puts(seq, ",noprefix");
1585
	if (root->flags & CGRP_ROOT_XATTR)
A
Aristeu Rozanski 已提交
1586
		seq_puts(seq, ",xattr");
1587 1588

	spin_lock(&release_agent_path_lock);
1589
	if (strlen(root->release_agent_path))
1590 1591
		seq_show_option(seq, "release_agent",
				root->release_agent_path);
1592 1593
	spin_unlock(&release_agent_path_lock);

1594
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1595
		seq_puts(seq, ",clone_children");
1596
	if (strlen(root->name))
1597
		seq_show_option(seq, "name", root->name);
1598 1599 1600 1601
	return 0;
}

struct cgroup_sb_opts {
1602
	unsigned long subsys_mask;
1603
	unsigned int flags;
1604
	char *release_agent;
1605
	bool cpuset_clone_children;
1606
	char *name;
1607 1608
	/* User explicitly requested empty subsystem */
	bool none;
1609 1610
};

B
Ben Blum 已提交
1611
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1612
{
1613 1614
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
1615
	unsigned long mask = -1UL;
1616
	struct cgroup_subsys *ss;
1617
	int nr_opts = 0;
1618
	int i;
1619 1620

#ifdef CONFIG_CPUSETS
1621
	mask = ~(1U << cpuset_cgrp_id);
1622
#endif
1623

1624
	memset(opts, 0, sizeof(*opts));
1625 1626

	while ((token = strsep(&o, ",")) != NULL) {
1627 1628
		nr_opts++;

1629 1630
		if (!*token)
			return -EINVAL;
1631
		if (!strcmp(token, "none")) {
1632 1633
			/* Explicitly have no subsystems */
			opts->none = true;
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
		if (!strcmp(token, "noprefix")) {
1644
			opts->flags |= CGRP_ROOT_NOPREFIX;
1645 1646 1647
			continue;
		}
		if (!strcmp(token, "clone_children")) {
1648
			opts->cpuset_clone_children = true;
1649 1650
			continue;
		}
A
Aristeu Rozanski 已提交
1651
		if (!strcmp(token, "xattr")) {
1652
			opts->flags |= CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
1653 1654
			continue;
		}
1655
		if (!strncmp(token, "release_agent=", 14)) {
1656 1657 1658
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
1659
			opts->release_agent =
1660
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1661 1662
			if (!opts->release_agent)
				return -ENOMEM;
1663 1664 1665
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
1683
					      MAX_CGROUP_ROOT_NAMELEN - 1,
1684 1685 1686
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
1687 1688 1689 1690

			continue;
		}

1691
		for_each_subsys(ss, i) {
1692
			if (strcmp(token, ss->legacy_name))
1693
				continue;
1694
			if (!cgroup_ssid_enabled(i))
1695
				continue;
1696 1697
			if (cgroup_ssid_no_v1(i))
				continue;
1698 1699 1700 1701

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
1702
			opts->subsys_mask |= (1 << i);
1703 1704 1705 1706 1707 1708 1709 1710
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

1711 1712 1713 1714 1715 1716 1717
	/*
	 * If the 'all' option was specified select all the subsystems,
	 * otherwise if 'none', 'name=' and a subsystem name options were
	 * not specified, let's default to 'all'
	 */
	if (all_ss || (!one_ss && !opts->none && !opts->name))
		for_each_subsys(ss, i)
1718
			if (cgroup_ssid_enabled(i) && !cgroup_ssid_no_v1(i))
1719 1720 1721 1722 1723 1724 1725 1726 1727
				opts->subsys_mask |= (1 << i);

	/*
	 * We either have to specify by name or by subsystems. (So all
	 * empty hierarchies must have a name).
	 */
	if (!opts->subsys_mask && !opts->name)
		return -EINVAL;

1728 1729 1730 1731 1732
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
1733
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1734 1735
		return -EINVAL;

1736
	/* Can't specify "none" and some subsystems */
1737
	if (opts->subsys_mask && opts->none)
1738 1739
		return -EINVAL;

1740 1741 1742
	return 0;
}

T
Tejun Heo 已提交
1743
static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1744 1745
{
	int ret = 0;
1746
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1747
	struct cgroup_sb_opts opts;
1748
	unsigned long added_mask, removed_mask;
1749

1750 1751
	if (root == &cgrp_dfl_root) {
		pr_err("remount is not allowed\n");
1752 1753 1754
		return -EINVAL;
	}

1755 1756 1757 1758 1759 1760 1761
	mutex_lock(&cgroup_mutex);

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

1762
	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1763
		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1764
			task_tgid_nr(current), current->comm);
1765

1766 1767
	added_mask = opts.subsys_mask & ~root->subsys_mask;
	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1768

B
Ben Blum 已提交
1769
	/* Don't allow flags or name to change at remount */
T
Tejun Heo 已提交
1770
	if ((opts.flags ^ root->flags) ||
B
Ben Blum 已提交
1771
	    (opts.name && strcmp(opts.name, root->name))) {
1772
		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
T
Tejun Heo 已提交
1773
		       opts.flags, opts.name ?: "", root->flags, root->name);
1774 1775 1776 1777
		ret = -EINVAL;
		goto out_unlock;
	}

1778
	/* remounting is not allowed for populated hierarchies */
1779
	if (!list_empty(&root->cgrp.self.children)) {
1780
		ret = -EBUSY;
1781
		goto out_unlock;
B
Ben Blum 已提交
1782
	}
1783

1784
	ret = rebind_subsystems(root, added_mask);
1785
	if (ret)
1786
		goto out_unlock;
1787

1788
	rebind_subsystems(&cgrp_dfl_root, removed_mask);
1789

1790 1791
	if (opts.release_agent) {
		spin_lock(&release_agent_path_lock);
1792
		strcpy(root->release_agent_path, opts.release_agent);
1793 1794
		spin_unlock(&release_agent_path_lock);
	}
1795
 out_unlock:
1796
	kfree(opts.release_agent);
1797
	kfree(opts.name);
1798 1799 1800 1801
	mutex_unlock(&cgroup_mutex);
	return ret;
}

1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
/*
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
 * words after the first mount.
 */
static bool use_task_css_set_links __read_mostly;

static void cgroup_enable_task_cg_lists(void)
{
	struct task_struct *p, *g;

1814
	spin_lock_bh(&css_set_lock);
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836

	if (use_task_css_set_links)
		goto out_unlock;

	use_task_css_set_links = true;

	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
	do_each_thread(g, p) {
		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
			     task_css_set(p) != &init_css_set);

		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
1837 1838
		 * Do it while holding siglock so that we don't end up
		 * racing against cgroup_exit().
1839
		 */
1840
		spin_lock_irq(&p->sighand->siglock);
1841 1842 1843
		if (!(p->flags & PF_EXITING)) {
			struct css_set *cset = task_css_set(p);

1844 1845
			if (!css_set_populated(cset))
				css_set_update_populated(cset, true);
1846
			list_add_tail(&p->cg_list, &cset->tasks);
1847 1848
			get_css_set(cset);
		}
1849
		spin_unlock_irq(&p->sighand->siglock);
1850 1851 1852
	} while_each_thread(g, p);
	read_unlock(&tasklist_lock);
out_unlock:
1853
	spin_unlock_bh(&css_set_lock);
1854
}
1855

1856 1857
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
T
Tejun Heo 已提交
1858 1859 1860
	struct cgroup_subsys *ss;
	int ssid;

1861 1862
	INIT_LIST_HEAD(&cgrp->self.sibling);
	INIT_LIST_HEAD(&cgrp->self.children);
1863
	INIT_LIST_HEAD(&cgrp->cset_links);
1864 1865
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
1866
	cgrp->self.cgroup = cgrp;
1867
	cgrp->self.flags |= CSS_ONLINE;
T
Tejun Heo 已提交
1868 1869 1870

	for_each_subsys(ss, ssid)
		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1871 1872

	init_waitqueue_head(&cgrp->offline_waitq);
1873
	INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
1874
}
1875

1876
static void init_cgroup_root(struct cgroup_root *root,
1877
			     struct cgroup_sb_opts *opts)
1878
{
1879
	struct cgroup *cgrp = &root->cgrp;
1880

1881
	INIT_LIST_HEAD(&root->root_list);
1882
	atomic_set(&root->nr_cgrps, 1);
1883
	cgrp->root = root;
1884
	init_cgroup_housekeeping(cgrp);
1885
	idr_init(&root->cgroup_idr);
1886 1887 1888 1889 1890 1891

	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
1892
	if (opts->cpuset_clone_children)
1893
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1894 1895
}

1896
static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
1897
{
1898
	LIST_HEAD(tmp_links);
1899
	struct cgroup *root_cgrp = &root->cgrp;
1900 1901
	struct css_set *cset;
	int i, ret;
1902

1903
	lockdep_assert_held(&cgroup_mutex);
1904

V
Vladimir Davydov 已提交
1905
	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
1906
	if (ret < 0)
T
Tejun Heo 已提交
1907
		goto out;
1908
	root_cgrp->id = ret;
1909
	root_cgrp->ancestor_ids[0] = ret;
1910

1911 1912
	ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
			      GFP_KERNEL);
1913 1914 1915
	if (ret)
		goto out;

1916
	/*
1917
	 * We're accessing css_set_count without locking css_set_lock here,
1918 1919 1920 1921 1922 1923
	 * but that's OK - it can only be increased by someone holding
	 * cgroup_lock, and that's us. The worst that can happen is that we
	 * have some link structures left over
	 */
	ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
	if (ret)
1924
		goto cancel_ref;
1925

1926
	ret = cgroup_init_root_id(root);
1927
	if (ret)
1928
		goto cancel_ref;
1929

T
Tejun Heo 已提交
1930 1931 1932 1933 1934 1935 1936 1937
	root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
					   KERNFS_ROOT_CREATE_DEACTIVATED,
					   root_cgrp);
	if (IS_ERR(root->kf_root)) {
		ret = PTR_ERR(root->kf_root);
		goto exit_root_id;
	}
	root_cgrp->kn = root->kf_root->kn;
1938

1939
	ret = css_populate_dir(&root_cgrp->self, NULL);
1940
	if (ret)
T
Tejun Heo 已提交
1941
		goto destroy_root;
1942

1943
	ret = rebind_subsystems(root, ss_mask);
1944
	if (ret)
T
Tejun Heo 已提交
1945
		goto destroy_root;
1946

1947 1948 1949 1950 1951 1952 1953
	/*
	 * There must be no failure case after here, since rebinding takes
	 * care of subsystems' refcounts, which are explicitly dropped in
	 * the failure exit path.
	 */
	list_add(&root->root_list, &cgroup_roots);
	cgroup_root_count++;
A
Al Viro 已提交
1954

1955
	/*
1956
	 * Link the root cgroup in this hierarchy into all the css_set
1957 1958
	 * objects.
	 */
1959
	spin_lock_bh(&css_set_lock);
1960
	hash_for_each(css_set_table, i, cset, hlist) {
1961
		link_css_set(&tmp_links, cset, root_cgrp);
1962 1963 1964
		if (css_set_populated(cset))
			cgroup_update_populated(root_cgrp, true);
	}
1965
	spin_unlock_bh(&css_set_lock);
1966

1967
	BUG_ON(!list_empty(&root_cgrp->self.children));
1968
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1969

T
Tejun Heo 已提交
1970
	kernfs_activate(root_cgrp->kn);
1971
	ret = 0;
T
Tejun Heo 已提交
1972
	goto out;
1973

T
Tejun Heo 已提交
1974 1975 1976 1977
destroy_root:
	kernfs_destroy_root(root->kf_root);
	root->kf_root = NULL;
exit_root_id:
1978
	cgroup_exit_root_id(root);
1979
cancel_ref:
1980
	percpu_ref_exit(&root_cgrp->self.refcnt);
T
Tejun Heo 已提交
1981
out:
1982 1983
	free_cgrp_cset_links(&tmp_links);
	return ret;
1984 1985
}

A
Al Viro 已提交
1986
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1987
			 int flags, const char *unused_dev_name,
A
Al Viro 已提交
1988
			 void *data)
1989
{
1990
	bool is_v2 = fs_type == &cgroup2_fs_type;
1991
	struct super_block *pinned_sb = NULL;
1992
	struct cgroup_subsys *ss;
1993
	struct cgroup_root *root;
1994
	struct cgroup_sb_opts opts;
T
Tejun Heo 已提交
1995
	struct dentry *dentry;
1996
	int ret;
1997
	int i;
L
Li Zefan 已提交
1998
	bool new_sb;
1999

2000 2001 2002 2003 2004 2005
	/*
	 * The first time anyone tries to mount a cgroup, enable the list
	 * linking each css_set to its tasks and fix up all existing tasks.
	 */
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();
2006

2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
	if (is_v2) {
		if (data) {
			pr_err("cgroup2: unknown option \"%s\"\n", (char *)data);
			return ERR_PTR(-EINVAL);
		}
		cgrp_dfl_root_visible = true;
		root = &cgrp_dfl_root;
		cgroup_get(&root->cgrp);
		goto out_mount;
	}

B
Ben Blum 已提交
2018
	mutex_lock(&cgroup_mutex);
2019 2020

	/* First find the desired set of subsystems */
2021
	ret = parse_cgroupfs_options(data, &opts);
2022
	if (ret)
2023
		goto out_unlock;
2024

2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
	/*
	 * Destruction of cgroup root is asynchronous, so subsystems may
	 * still be dying after the previous unmount.  Let's drain the
	 * dying subsystems.  We just need to ensure that the ones
	 * unmounted previously finish dying and don't care about new ones
	 * starting.  Testing ref liveliness is good enough.
	 */
	for_each_subsys(ss, i) {
		if (!(opts.subsys_mask & (1 << i)) ||
		    ss->root == &cgrp_dfl_root)
			continue;

		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
			mutex_unlock(&cgroup_mutex);
			msleep(10);
			ret = restart_syscall();
			goto out_free;
		}
		cgroup_put(&ss->root->cgrp);
	}

2046
	for_each_root(root) {
T
Tejun Heo 已提交
2047
		bool name_match = false;
2048

2049
		if (root == &cgrp_dfl_root)
2050
			continue;
2051

B
Ben Blum 已提交
2052
		/*
T
Tejun Heo 已提交
2053 2054 2055
		 * If we asked for a name then it must match.  Also, if
		 * name matches but sybsys_mask doesn't, we should fail.
		 * Remember whether name matched.
B
Ben Blum 已提交
2056
		 */
T
Tejun Heo 已提交
2057 2058 2059 2060 2061
		if (opts.name) {
			if (strcmp(opts.name, root->name))
				continue;
			name_match = true;
		}
2062

2063
		/*
T
Tejun Heo 已提交
2064 2065
		 * If we asked for subsystems (or explicitly for no
		 * subsystems) then they must match.
2066
		 */
T
Tejun Heo 已提交
2067
		if ((opts.subsys_mask || opts.none) &&
2068
		    (opts.subsys_mask != root->subsys_mask)) {
T
Tejun Heo 已提交
2069 2070 2071 2072 2073
			if (!name_match)
				continue;
			ret = -EBUSY;
			goto out_unlock;
		}
2074

2075 2076
		if (root->flags ^ opts.flags)
			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
2077

T
Tejun Heo 已提交
2078
		/*
2079 2080 2081 2082 2083 2084 2085 2086 2087 2088
		 * We want to reuse @root whose lifetime is governed by its
		 * ->cgrp.  Let's check whether @root is alive and keep it
		 * that way.  As cgroup_kill_sb() can happen anytime, we
		 * want to block it by pinning the sb so that @root doesn't
		 * get killed before mount is complete.
		 *
		 * With the sb pinned, tryget_live can reliably indicate
		 * whether @root can be reused.  If it's being killed,
		 * drain it.  We can use wait_queue for the wait but this
		 * path is super cold.  Let's just sleep a bit and retry.
T
Tejun Heo 已提交
2089
		 */
2090 2091 2092
		pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
		if (IS_ERR(pinned_sb) ||
		    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
T
Tejun Heo 已提交
2093
			mutex_unlock(&cgroup_mutex);
2094 2095
			if (!IS_ERR_OR_NULL(pinned_sb))
				deactivate_super(pinned_sb);
T
Tejun Heo 已提交
2096
			msleep(10);
2097 2098
			ret = restart_syscall();
			goto out_free;
T
Tejun Heo 已提交
2099
		}
2100

T
Tejun Heo 已提交
2101
		ret = 0;
T
Tejun Heo 已提交
2102
		goto out_unlock;
2103 2104
	}

2105
	/*
2106 2107 2108
	 * No such thing, create a new one.  name= matching without subsys
	 * specification is allowed for already existing hierarchies but we
	 * can't create new one without subsys specification.
2109
	 */
2110 2111 2112
	if (!opts.subsys_mask && !opts.none) {
		ret = -EINVAL;
		goto out_unlock;
2113 2114
	}

2115 2116 2117
	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root) {
		ret = -ENOMEM;
T
Tejun Heo 已提交
2118
		goto out_unlock;
2119
	}
2120

2121 2122
	init_cgroup_root(root, &opts);

T
Tejun Heo 已提交
2123
	ret = cgroup_setup_root(root, opts.subsys_mask);
T
Tejun Heo 已提交
2124 2125
	if (ret)
		cgroup_free_root(root);
2126

2127
out_unlock:
2128
	mutex_unlock(&cgroup_mutex);
2129
out_free:
2130 2131
	kfree(opts.release_agent);
	kfree(opts.name);
A
Aristeu Rozanski 已提交
2132

T
Tejun Heo 已提交
2133
	if (ret)
2134
		return ERR_PTR(ret);
2135
out_mount:
2136
	dentry = kernfs_mount(fs_type, flags, root->kf_root,
2137 2138
			      is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC,
			      &new_sb);
L
Li Zefan 已提交
2139
	if (IS_ERR(dentry) || !new_sb)
2140
		cgroup_put(&root->cgrp);
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150

	/*
	 * If @pinned_sb, we're reusing an existing root and holding an
	 * extra ref on its sb.  Mount is complete.  Put the extra ref.
	 */
	if (pinned_sb) {
		WARN_ON(new_sb);
		deactivate_super(pinned_sb);
	}

T
Tejun Heo 已提交
2151 2152 2153 2154 2155 2156
	return dentry;
}

static void cgroup_kill_sb(struct super_block *sb)
{
	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
2157
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
T
Tejun Heo 已提交
2158

2159 2160 2161 2162
	/*
	 * If @root doesn't have any mounts or children, start killing it.
	 * This prevents new mounts by disabling percpu_ref_tryget_live().
	 * cgroup_mount() may wait for @root's release.
2163 2164
	 *
	 * And don't kill the default root.
2165
	 */
2166
	if (!list_empty(&root->cgrp.self.children) ||
2167
	    root == &cgrp_dfl_root)
2168 2169 2170 2171
		cgroup_put(&root->cgrp);
	else
		percpu_ref_kill(&root->cgrp.self.refcnt);

T
Tejun Heo 已提交
2172
	kernfs_kill_sb(sb);
2173 2174 2175 2176
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
A
Al Viro 已提交
2177
	.mount = cgroup_mount,
2178 2179 2180
	.kill_sb = cgroup_kill_sb,
};

2181 2182 2183 2184 2185 2186
static struct file_system_type cgroup2_fs_type = {
	.name = "cgroup2",
	.mount = cgroup_mount,
	.kill_sb = cgroup_kill_sb,
};

2187
/**
2188
 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
2189 2190 2191 2192
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
2193 2194 2195 2196 2197
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
T
Tejun Heo 已提交
2198
 * Return value is the same as kernfs_path().
2199
 */
T
Tejun Heo 已提交
2200
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2201
{
2202
	struct cgroup_root *root;
2203
	struct cgroup *cgrp;
T
Tejun Heo 已提交
2204 2205
	int hierarchy_id = 1;
	char *path = NULL;
2206 2207

	mutex_lock(&cgroup_mutex);
2208
	spin_lock_bh(&css_set_lock);
2209

2210 2211
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

2212 2213
	if (root) {
		cgrp = task_cgroup_from_root(task, root);
T
Tejun Heo 已提交
2214
		path = cgroup_path(cgrp, buf, buflen);
2215 2216
	} else {
		/* if no hierarchy exists, everyone is in "/" */
T
Tejun Heo 已提交
2217 2218
		if (strlcpy(buf, "/", buflen) < buflen)
			path = buf;
2219 2220
	}

2221
	spin_unlock_bh(&css_set_lock);
2222
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
2223
	return path;
2224
}
2225
EXPORT_SYMBOL_GPL(task_cgroup_path);
2226

2227
/* used to track tasks and other necessary states during migration */
2228
struct cgroup_taskset {
2229 2230 2231 2232
	/* the src and dst cset list running through cset->mg_node */
	struct list_head	src_csets;
	struct list_head	dst_csets;

2233 2234 2235
	/* the subsys currently being processed */
	int			ssid;

2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
	/*
	 * Fields for cgroup_taskset_*() iteration.
	 *
	 * Before migration is committed, the target migration tasks are on
	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
	 * or ->dst_csets depending on whether migration is committed.
	 *
	 * ->cur_csets and ->cur_task point to the current task position
	 * during iteration.
	 */
	struct list_head	*csets;
	struct css_set		*cur_cset;
	struct task_struct	*cur_task;
2250 2251
};

2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272
#define CGROUP_TASKSET_INIT(tset)	(struct cgroup_taskset){	\
	.src_csets		= LIST_HEAD_INIT(tset.src_csets),	\
	.dst_csets		= LIST_HEAD_INIT(tset.dst_csets),	\
	.csets			= &tset.src_csets,			\
}

/**
 * cgroup_taskset_add - try to add a migration target task to a taskset
 * @task: target task
 * @tset: target taskset
 *
 * Add @task, which is a migration target, to @tset.  This function becomes
 * noop if @task doesn't need to be migrated.  @task's css_set should have
 * been added as a migration source and @task->cg_list will be moved from
 * the css_set's tasks list to mg_tasks one.
 */
static void cgroup_taskset_add(struct task_struct *task,
			       struct cgroup_taskset *tset)
{
	struct css_set *cset;

2273
	lockdep_assert_held(&css_set_lock);
2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294

	/* @task either already exited or can't exit until the end */
	if (task->flags & PF_EXITING)
		return;

	/* leave @task alone if post_fork() hasn't linked it yet */
	if (list_empty(&task->cg_list))
		return;

	cset = task_css_set(task);
	if (!cset->mg_src_cgrp)
		return;

	list_move_tail(&task->cg_list, &cset->mg_tasks);
	if (list_empty(&cset->mg_node))
		list_add_tail(&cset->mg_node, &tset->src_csets);
	if (list_empty(&cset->mg_dst_cset->mg_node))
		list_move_tail(&cset->mg_dst_cset->mg_node,
			       &tset->dst_csets);
}

2295 2296 2297
/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
2298
 * @dst_cssp: output variable for the destination css
2299 2300 2301
 *
 * @tset iteration is initialized and the first task is returned.
 */
2302 2303
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
					 struct cgroup_subsys_state **dst_cssp)
2304
{
2305 2306 2307
	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
	tset->cur_task = NULL;

2308
	return cgroup_taskset_next(tset, dst_cssp);
2309 2310 2311 2312 2313
}

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
2314
 * @dst_cssp: output variable for the destination css
2315 2316 2317 2318
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
2319 2320
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
					struct cgroup_subsys_state **dst_cssp)
2321
{
2322 2323
	struct css_set *cset = tset->cur_cset;
	struct task_struct *task = tset->cur_task;
2324

2325 2326 2327 2328 2329 2330
	while (&cset->mg_node != tset->csets) {
		if (!task)
			task = list_first_entry(&cset->mg_tasks,
						struct task_struct, cg_list);
		else
			task = list_next_entry(task, cg_list);
2331

2332 2333 2334
		if (&task->cg_list != &cset->mg_tasks) {
			tset->cur_cset = cset;
			tset->cur_task = task;
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346

			/*
			 * This function may be called both before and
			 * after cgroup_taskset_migrate().  The two cases
			 * can be distinguished by looking at whether @cset
			 * has its ->mg_dst_cset set.
			 */
			if (cset->mg_dst_cset)
				*dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
			else
				*dst_cssp = cset->subsys[tset->ssid];

2347 2348
			return task;
		}
2349

2350 2351 2352
		cset = list_next_entry(cset, mg_node);
		task = NULL;
	}
2353

2354
	return NULL;
2355 2356
}

2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381
/**
 * cgroup_taskset_migrate - migrate a taskset to a cgroup
 * @tset: taget taskset
 * @dst_cgrp: destination cgroup
 *
 * Migrate tasks in @tset to @dst_cgrp.  This function fails iff one of the
 * ->can_attach callbacks fails and guarantees that either all or none of
 * the tasks in @tset are migrated.  @tset is consumed regardless of
 * success.
 */
static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
				  struct cgroup *dst_cgrp)
{
	struct cgroup_subsys_state *css, *failed_css = NULL;
	struct task_struct *task, *tmp_task;
	struct css_set *cset, *tmp_cset;
	int i, ret;

	/* methods shouldn't be called if no task is actually migrating */
	if (list_empty(&tset->src_csets))
		return 0;

	/* check that we can legitimately attach to the cgroup */
	for_each_e_css(css, i, dst_cgrp) {
		if (css->ss->can_attach) {
2382 2383
			tset->ssid = i;
			ret = css->ss->can_attach(tset);
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
			if (ret) {
				failed_css = css;
				goto out_cancel_attach;
			}
		}
	}

	/*
	 * Now that we're guaranteed success, proceed to move all tasks to
	 * the new cgroup.  There are no failure cases after here, so this
	 * is the commit point.
	 */
2396
	spin_lock_bh(&css_set_lock);
2397
	list_for_each_entry(cset, &tset->src_csets, mg_node) {
T
Tejun Heo 已提交
2398 2399 2400 2401 2402 2403 2404 2405
		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
			struct css_set *from_cset = task_css_set(task);
			struct css_set *to_cset = cset->mg_dst_cset;

			get_css_set(to_cset);
			css_set_move_task(task, from_cset, to_cset, true);
			put_css_set_locked(from_cset);
		}
2406
	}
2407
	spin_unlock_bh(&css_set_lock);
2408 2409 2410 2411 2412 2413 2414 2415

	/*
	 * Migration is committed, all target tasks are now on dst_csets.
	 * Nothing is sensitive to fork() after this point.  Notify
	 * controllers that migration is complete.
	 */
	tset->csets = &tset->dst_csets;

2416 2417 2418 2419 2420 2421
	for_each_e_css(css, i, dst_cgrp) {
		if (css->ss->attach) {
			tset->ssid = i;
			css->ss->attach(tset);
		}
	}
2422 2423 2424 2425 2426 2427 2428 2429

	ret = 0;
	goto out_release_tset;

out_cancel_attach:
	for_each_e_css(css, i, dst_cgrp) {
		if (css == failed_css)
			break;
2430 2431 2432 2433
		if (css->ss->cancel_attach) {
			tset->ssid = i;
			css->ss->cancel_attach(tset);
		}
2434 2435
	}
out_release_tset:
2436
	spin_lock_bh(&css_set_lock);
2437 2438 2439 2440 2441
	list_splice_init(&tset->dst_csets, &tset->src_csets);
	list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
		list_del_init(&cset->mg_node);
	}
2442
	spin_unlock_bh(&css_set_lock);
2443 2444 2445
	return ret;
}

L
Li Zefan 已提交
2446
/**
2447 2448
 * cgroup_migrate_finish - cleanup after attach
 * @preloaded_csets: list of preloaded css_sets
B
Ben Blum 已提交
2449
 *
2450 2451
 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
 * those functions for details.
B
Ben Blum 已提交
2452
 */
2453
static void cgroup_migrate_finish(struct list_head *preloaded_csets)
B
Ben Blum 已提交
2454
{
2455
	struct css_set *cset, *tmp_cset;
B
Ben Blum 已提交
2456

2457 2458
	lockdep_assert_held(&cgroup_mutex);

2459
	spin_lock_bh(&css_set_lock);
2460 2461 2462 2463
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
		cset->mg_src_cgrp = NULL;
		cset->mg_dst_cset = NULL;
		list_del_init(&cset->mg_preload_node);
Z
Zefan Li 已提交
2464
		put_css_set_locked(cset);
2465
	}
2466
	spin_unlock_bh(&css_set_lock);
2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
}

/**
 * cgroup_migrate_add_src - add a migration source css_set
 * @src_cset: the source css_set to add
 * @dst_cgrp: the destination cgroup
 * @preloaded_csets: list of preloaded css_sets
 *
 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
 * @src_cset and add it to @preloaded_csets, which should later be cleaned
 * up by cgroup_migrate_finish().
 *
2479 2480 2481 2482 2483
 * This function may be called without holding cgroup_threadgroup_rwsem
 * even if the target is a process.  Threads may be created and destroyed
 * but as long as cgroup_mutex is not dropped, no new css_set can be put
 * into play and the preloaded css_sets are guaranteed to cover all
 * migrations.
2484 2485 2486 2487 2488 2489 2490 2491
 */
static void cgroup_migrate_add_src(struct css_set *src_cset,
				   struct cgroup *dst_cgrp,
				   struct list_head *preloaded_csets)
{
	struct cgroup *src_cgrp;

	lockdep_assert_held(&cgroup_mutex);
2492
	lockdep_assert_held(&css_set_lock);
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509

	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);

	if (!list_empty(&src_cset->mg_preload_node))
		return;

	WARN_ON(src_cset->mg_src_cgrp);
	WARN_ON(!list_empty(&src_cset->mg_tasks));
	WARN_ON(!list_empty(&src_cset->mg_node));

	src_cset->mg_src_cgrp = src_cgrp;
	get_css_set(src_cset);
	list_add(&src_cset->mg_preload_node, preloaded_csets);
}

/**
 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2510
 * @dst_cgrp: the destination cgroup (may be %NULL)
2511 2512 2513 2514
 * @preloaded_csets: list of preloaded source css_sets
 *
 * Tasks are about to be moved to @dst_cgrp and all the source css_sets
 * have been preloaded to @preloaded_csets.  This function looks up and
2515 2516 2517
 * pins all destination css_sets, links each to its source, and append them
 * to @preloaded_csets.  If @dst_cgrp is %NULL, the destination of each
 * source css_set is assumed to be its cgroup on the default hierarchy.
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
 *
 * This function must be called after cgroup_migrate_add_src() has been
 * called on each migration source css_set.  After migration is performed
 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
 * @preloaded_csets.
 */
static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
				      struct list_head *preloaded_csets)
{
	LIST_HEAD(csets);
2528
	struct css_set *src_cset, *tmp_cset;
2529 2530 2531

	lockdep_assert_held(&cgroup_mutex);

2532
	/*
2533
	 * Except for the root, subtree_ss_mask must be zero for a cgroup
2534 2535
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
2536
	if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) &&
2537
	    dst_cgrp->subtree_ss_mask)
2538 2539
		return -EBUSY;

2540
	/* look up the dst cset for each src cset and link it to src */
2541
	list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
2542 2543
		struct css_set *dst_cset;

2544 2545
		dst_cset = find_css_set(src_cset,
					dst_cgrp ?: src_cset->dfl_cgrp);
2546 2547 2548 2549
		if (!dst_cset)
			goto err;

		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2550 2551 2552 2553 2554 2555 2556 2557 2558

		/*
		 * If src cset equals dst, it's noop.  Drop the src.
		 * cgroup_migrate() will skip the cset too.  Note that we
		 * can't handle src == dst as some nodes are used by both.
		 */
		if (src_cset == dst_cset) {
			src_cset->mg_src_cgrp = NULL;
			list_del_init(&src_cset->mg_preload_node);
Z
Zefan Li 已提交
2559 2560
			put_css_set(src_cset);
			put_css_set(dst_cset);
2561 2562 2563
			continue;
		}

2564 2565 2566 2567 2568
		src_cset->mg_dst_cset = dst_cset;

		if (list_empty(&dst_cset->mg_preload_node))
			list_add(&dst_cset->mg_preload_node, &csets);
		else
Z
Zefan Li 已提交
2569
			put_css_set(dst_cset);
2570 2571
	}

2572
	list_splice_tail(&csets, preloaded_csets);
2573 2574 2575 2576 2577 2578 2579 2580 2581 2582
	return 0;
err:
	cgroup_migrate_finish(&csets);
	return -ENOMEM;
}

/**
 * cgroup_migrate - migrate a process or task to a cgroup
 * @leader: the leader of the process or the task to migrate
 * @threadgroup: whether @leader points to the whole process or a single task
2583
 * @cgrp: the destination cgroup
2584 2585
 *
 * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
2586
 * process, the caller must be holding cgroup_threadgroup_rwsem.  The
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596
 * caller is also responsible for invoking cgroup_migrate_add_src() and
 * cgroup_migrate_prepare_dst() on the targets before invoking this
 * function and following up with cgroup_migrate_finish().
 *
 * As long as a controller's ->can_attach() doesn't fail, this function is
 * guaranteed to succeed.  This means that, excluding ->can_attach()
 * failure, when migrating multiple targets, the success or failure can be
 * decided for all targets by invoking group_migrate_prepare_dst() before
 * actually starting migrating.
 */
2597 2598
static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
			  struct cgroup *cgrp)
B
Ben Blum 已提交
2599
{
2600 2601
	struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
	struct task_struct *task;
B
Ben Blum 已提交
2602

2603 2604 2605 2606 2607
	/*
	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 */
2608
	spin_lock_bh(&css_set_lock);
2609
	rcu_read_lock();
2610
	task = leader;
B
Ben Blum 已提交
2611
	do {
2612
		cgroup_taskset_add(task, &tset);
2613 2614
		if (!threadgroup)
			break;
2615
	} while_each_thread(leader, task);
2616
	rcu_read_unlock();
2617
	spin_unlock_bh(&css_set_lock);
B
Ben Blum 已提交
2618

2619
	return cgroup_taskset_migrate(&tset, cgrp);
B
Ben Blum 已提交
2620 2621
}

2622 2623 2624 2625 2626 2627
/**
 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
 * @dst_cgrp: the cgroup to attach to
 * @leader: the task or the leader of the threadgroup to be attached
 * @threadgroup: attach the whole threadgroup?
 *
2628
 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2629 2630 2631 2632 2633 2634 2635 2636 2637
 */
static int cgroup_attach_task(struct cgroup *dst_cgrp,
			      struct task_struct *leader, bool threadgroup)
{
	LIST_HEAD(preloaded_csets);
	struct task_struct *task;
	int ret;

	/* look up all src csets */
2638
	spin_lock_bh(&css_set_lock);
2639 2640 2641 2642 2643 2644 2645 2646 2647
	rcu_read_lock();
	task = leader;
	do {
		cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
				       &preloaded_csets);
		if (!threadgroup)
			break;
	} while_each_thread(leader, task);
	rcu_read_unlock();
2648
	spin_unlock_bh(&css_set_lock);
2649 2650 2651 2652

	/* prepare dst csets and commit */
	ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
	if (!ret)
2653
		ret = cgroup_migrate(leader, threadgroup, dst_cgrp);
2654 2655 2656

	cgroup_migrate_finish(&preloaded_csets);
	return ret;
B
Ben Blum 已提交
2657 2658
}

2659 2660 2661
static int cgroup_procs_write_permission(struct task_struct *task,
					 struct cgroup *dst_cgrp,
					 struct kernfs_open_file *of)
2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675
{
	const struct cred *cred = current_cred();
	const struct cred *tcred = get_task_cred(task);
	int ret = 0;

	/*
	 * even if we're attaching all tasks in the thread group, we only
	 * need to check permissions on one of them.
	 */
	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
	    !uid_eq(cred->euid, tcred->uid) &&
	    !uid_eq(cred->euid, tcred->suid))
		ret = -EACCES;

2676 2677 2678 2679 2680
	if (!ret && cgroup_on_dfl(dst_cgrp)) {
		struct super_block *sb = of->file->f_path.dentry->d_sb;
		struct cgroup *cgrp;
		struct inode *inode;

2681
		spin_lock_bh(&css_set_lock);
2682
		cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
2683
		spin_unlock_bh(&css_set_lock);
2684 2685 2686 2687 2688

		while (!cgroup_is_descendant(dst_cgrp, cgrp))
			cgrp = cgroup_parent(cgrp);

		ret = -ENOMEM;
2689
		inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
2690 2691 2692 2693 2694 2695
		if (inode) {
			ret = inode_permission(inode, MAY_WRITE);
			iput(inode);
		}
	}

2696 2697 2698 2699
	put_cred(tcred);
	return ret;
}

B
Ben Blum 已提交
2700 2701
/*
 * Find the task_struct of the task to attach by vpid and pass it along to the
2702
 * function to attach either it or all tasks in its threadgroup. Will lock
2703
 * cgroup_mutex and threadgroup.
2704
 */
2705 2706
static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
				    size_t nbytes, loff_t off, bool threadgroup)
2707 2708
{
	struct task_struct *tsk;
2709
	struct cgroup *cgrp;
2710
	pid_t pid;
2711 2712
	int ret;

2713 2714 2715
	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
		return -EINVAL;

2716 2717
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
B
Ben Blum 已提交
2718 2719
		return -ENODEV;

T
Tejun Heo 已提交
2720
	percpu_down_write(&cgroup_threadgroup_rwsem);
2721
	rcu_read_lock();
2722
	if (pid) {
2723
		tsk = find_task_by_vpid(pid);
B
Ben Blum 已提交
2724
		if (!tsk) {
S
SeongJae Park 已提交
2725
			ret = -ESRCH;
T
Tejun Heo 已提交
2726
			goto out_unlock_rcu;
2727
		}
2728
	} else {
2729
		tsk = current;
2730
	}
2731 2732

	if (threadgroup)
2733
		tsk = tsk->group_leader;
2734 2735

	/*
2736
	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2737 2738 2739
	 * trapped in a cpuset, or RT worker may be born in a cgroup
	 * with no rt_runtime allocated.  Just say no.
	 */
2740
	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2741
		ret = -EINVAL;
T
Tejun Heo 已提交
2742
		goto out_unlock_rcu;
2743 2744
	}

2745 2746 2747
	get_task_struct(tsk);
	rcu_read_unlock();

2748
	ret = cgroup_procs_write_permission(tsk, cgrp, of);
2749 2750
	if (!ret)
		ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2751

2752
	put_task_struct(tsk);
T
Tejun Heo 已提交
2753 2754 2755 2756 2757 2758
	goto out_unlock_threadgroup;

out_unlock_rcu:
	rcu_read_unlock();
out_unlock_threadgroup:
	percpu_up_write(&cgroup_threadgroup_rwsem);
2759
	cgroup_kn_unlock(of->kn);
2760
	cpuset_post_attach_flush();
2761
	return ret ?: nbytes;
2762 2763
}

2764 2765 2766 2767 2768 2769 2770
/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
2771
	struct cgroup_root *root;
2772 2773
	int retval = 0;

T
Tejun Heo 已提交
2774
	mutex_lock(&cgroup_mutex);
2775
	for_each_root(root) {
2776 2777
		struct cgroup *from_cgrp;

2778
		if (root == &cgrp_dfl_root)
2779 2780
			continue;

2781
		spin_lock_bh(&css_set_lock);
2782
		from_cgrp = task_cgroup_from_root(from, root);
2783
		spin_unlock_bh(&css_set_lock);
2784

L
Li Zefan 已提交
2785
		retval = cgroup_attach_task(from_cgrp, tsk, false);
2786 2787 2788
		if (retval)
			break;
	}
T
Tejun Heo 已提交
2789
	mutex_unlock(&cgroup_mutex);
2790 2791 2792 2793 2794

	return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);

2795 2796
static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
B
Ben Blum 已提交
2797
{
2798
	return __cgroup_procs_write(of, buf, nbytes, off, false);
B
Ben Blum 已提交
2799 2800
}

2801 2802
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
2803
{
2804
	return __cgroup_procs_write(of, buf, nbytes, off, true);
2805 2806
}

2807 2808
static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
					  char *buf, size_t nbytes, loff_t off)
2809
{
2810
	struct cgroup *cgrp;
2811

2812
	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
2813

2814 2815
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
2816
		return -ENODEV;
2817
	spin_lock(&release_agent_path_lock);
2818 2819
	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
		sizeof(cgrp->root->release_agent_path));
2820
	spin_unlock(&release_agent_path_lock);
2821
	cgroup_kn_unlock(of->kn);
2822
	return nbytes;
2823 2824
}

2825
static int cgroup_release_agent_show(struct seq_file *seq, void *v)
2826
{
2827
	struct cgroup *cgrp = seq_css(seq)->cgroup;
2828

2829
	spin_lock(&release_agent_path_lock);
2830
	seq_puts(seq, cgrp->root->release_agent_path);
2831
	spin_unlock(&release_agent_path_lock);
2832 2833 2834 2835
	seq_putc(seq, '\n');
	return 0;
}

2836
static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
2837
{
2838
	seq_puts(seq, "0\n");
2839 2840 2841
	return 0;
}

2842
static void cgroup_print_ss_mask(struct seq_file *seq, unsigned long ss_mask)
2843
{
2844 2845 2846
	struct cgroup_subsys *ss;
	bool printed = false;
	int ssid;
2847

2848
	do_each_subsys_mask(ss, ssid, ss_mask) {
2849 2850 2851 2852
		if (printed)
			seq_putc(seq, ' ');
		seq_printf(seq, "%s", ss->name);
		printed = true;
2853
	} while_each_subsys_mask();
2854 2855
	if (printed)
		seq_putc(seq, '\n');
2856 2857
}

2858 2859
/* show controllers which are currently attached to the default hierarchy */
static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
2860
{
2861 2862
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2863 2864
	cgroup_print_ss_mask(seq, cgrp->root->subsys_mask &
			     ~cgrp_dfl_root_inhibit_ss_mask);
2865
	return 0;
2866 2867
}

2868 2869
/* show controllers which are enabled from the parent */
static int cgroup_controllers_show(struct seq_file *seq, void *v)
2870
{
2871 2872
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2873
	cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
2874
	return 0;
2875 2876
}

2877 2878
/* show controllers which are enabled for a given cgroup's children */
static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2879
{
2880 2881
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2882
	cgroup_print_ss_mask(seq, cgrp->subtree_control);
2883 2884 2885 2886 2887 2888 2889
	return 0;
}

/**
 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
 * @cgrp: root of the subtree to update csses for
 *
2890
 * @cgrp's subtree_ss_mask has changed and its subtree's (self excluded)
2891 2892 2893 2894 2895 2896 2897
 * css associations need to be updated accordingly.  This function looks up
 * all css_sets which are attached to the subtree, creates the matching
 * updated css_sets and migrates the tasks to the new ones.
 */
static int cgroup_update_dfl_csses(struct cgroup *cgrp)
{
	LIST_HEAD(preloaded_csets);
2898
	struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
2899 2900 2901 2902 2903 2904
	struct cgroup_subsys_state *css;
	struct css_set *src_cset;
	int ret;

	lockdep_assert_held(&cgroup_mutex);

T
Tejun Heo 已提交
2905 2906
	percpu_down_write(&cgroup_threadgroup_rwsem);

2907
	/* look up all csses currently attached to @cgrp's subtree */
2908
	spin_lock_bh(&css_set_lock);
2909 2910 2911
	css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
		struct cgrp_cset_link *link;

2912
		/* self is not affected by subtree_ss_mask change */
2913 2914 2915 2916 2917 2918 2919
		if (css->cgroup == cgrp)
			continue;

		list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
			cgroup_migrate_add_src(link->cset, cgrp,
					       &preloaded_csets);
	}
2920
	spin_unlock_bh(&css_set_lock);
2921 2922 2923 2924 2925 2926

	/* NULL dst indicates self on default hierarchy */
	ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
	if (ret)
		goto out_finish;

2927
	spin_lock_bh(&css_set_lock);
2928
	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
2929
		struct task_struct *task, *ntask;
2930 2931 2932 2933 2934

		/* src_csets precede dst_csets, break on the first dst_cset */
		if (!src_cset->mg_src_cgrp)
			break;

2935 2936 2937
		/* all tasks in src_csets need to be migrated */
		list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
			cgroup_taskset_add(task, &tset);
2938
	}
2939
	spin_unlock_bh(&css_set_lock);
2940

2941
	ret = cgroup_taskset_migrate(&tset, cgrp);
2942 2943
out_finish:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
2944
	percpu_up_write(&cgroup_threadgroup_rwsem);
2945 2946 2947 2948
	return ret;
}

/* change the enabled child controllers for a cgroup in the default hierarchy */
2949 2950 2951
static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
2952
{
2953 2954
	unsigned long enable = 0, disable = 0;
	unsigned long css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
2955
	struct cgroup *cgrp, *child;
2956
	struct cgroup_subsys *ss;
2957
	char *tok;
2958 2959 2960
	int ssid, ret;

	/*
2961 2962
	 * Parse input - space separated list of subsystem names prefixed
	 * with either + or -.
2963
	 */
2964 2965
	buf = strstrip(buf);
	while ((tok = strsep(&buf, " "))) {
2966 2967
		if (tok[0] == '\0')
			continue;
2968
		do_each_subsys_mask(ss, ssid, ~cgrp_dfl_root_inhibit_ss_mask) {
2969 2970
			if (!cgroup_ssid_enabled(ssid) ||
			    strcmp(tok + 1, ss->name))
2971 2972 2973
				continue;

			if (*tok == '+') {
2974 2975
				enable |= 1 << ssid;
				disable &= ~(1 << ssid);
2976
			} else if (*tok == '-') {
2977 2978
				disable |= 1 << ssid;
				enable &= ~(1 << ssid);
2979 2980 2981 2982
			} else {
				return -EINVAL;
			}
			break;
2983
		} while_each_subsys_mask();
2984 2985 2986 2987
		if (ssid == CGROUP_SUBSYS_COUNT)
			return -EINVAL;
	}

2988 2989 2990
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
		return -ENODEV;
2991 2992 2993

	for_each_subsys(ss, ssid) {
		if (enable & (1 << ssid)) {
2994
			if (cgrp->subtree_control & (1 << ssid)) {
2995 2996 2997 2998
				enable &= ~(1 << ssid);
				continue;
			}

2999 3000 3001
			/* unavailable or not enabled on the parent? */
			if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
			    (cgroup_parent(cgrp) &&
3002
			     !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
3003 3004 3005
				ret = -ENOENT;
				goto out_unlock;
			}
3006
		} else if (disable & (1 << ssid)) {
3007
			if (!(cgrp->subtree_control & (1 << ssid))) {
3008 3009 3010 3011 3012 3013
				disable &= ~(1 << ssid);
				continue;
			}

			/* a child has it enabled? */
			cgroup_for_each_live_child(child, cgrp) {
3014
				if (child->subtree_control & (1 << ssid)) {
3015
					ret = -EBUSY;
3016
					goto out_unlock;
3017 3018 3019 3020 3021 3022 3023
				}
			}
		}
	}

	if (!enable && !disable) {
		ret = 0;
3024
		goto out_unlock;
3025 3026 3027
	}

	/*
3028
	 * Except for the root, subtree_control must be zero for a cgroup
3029 3030
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
3031
	if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
3032 3033 3034 3035 3036
		ret = -EBUSY;
		goto out_unlock;
	}

	/*
3037 3038 3039 3040
	 * Update subsys masks and calculate what needs to be done.  More
	 * subsystems than specified may need to be enabled or disabled
	 * depending on subsystem dependencies.
	 */
3041
	old_sc = cgrp->subtree_control;
3042
	old_ss = cgrp->subtree_ss_mask;
3043
	new_sc = (old_sc | enable) & ~disable;
3044
	new_ss = cgroup_calc_subtree_ss_mask(cgrp, new_sc);
3045

3046 3047
	css_enable = ~old_ss & new_ss;
	css_disable = old_ss & ~new_ss;
3048 3049
	enable |= css_enable;
	disable |= css_disable;
3050

3051 3052 3053 3054 3055 3056
	/*
	 * Because css offlining is asynchronous, userland might try to
	 * re-enable the same controller while the previous instance is
	 * still around.  In such cases, wait till it's gone using
	 * offline_waitq.
	 */
3057
	do_each_subsys_mask(ss, ssid, css_enable) {
3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073
		cgroup_for_each_live_child(child, cgrp) {
			DEFINE_WAIT(wait);

			if (!cgroup_css(child, ss))
				continue;

			cgroup_get(child);
			prepare_to_wait(&child->offline_waitq, &wait,
					TASK_UNINTERRUPTIBLE);
			cgroup_kn_unlock(of->kn);
			schedule();
			finish_wait(&child->offline_waitq, &wait);
			cgroup_put(child);

			return restart_syscall();
		}
3074
	} while_each_subsys_mask();
3075

3076
	cgrp->subtree_control = new_sc;
3077
	cgrp->subtree_ss_mask = new_ss;
3078

3079 3080 3081 3082 3083
	/*
	 * Create new csses or make the existing ones visible.  A css is
	 * created invisible if it's being implicitly enabled through
	 * dependency.  An invisible css is made visible when the userland
	 * explicitly enables it.
3084 3085 3086 3087 3088 3089
	 */
	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
3090 3091 3092 3093
			if (css_enable & (1 << ssid))
				ret = create_css(child, ss,
					cgrp->subtree_control & (1 << ssid));
			else
3094 3095
				ret = css_populate_dir(cgroup_css(child, ss),
						       NULL);
3096 3097 3098 3099 3100
			if (ret)
				goto err_undo_css;
		}
	}

3101 3102 3103 3104 3105
	/*
	 * At this point, cgroup_e_css() results reflect the new csses
	 * making the following cgroup_update_dfl_csses() properly update
	 * css associations of all tasks in the subtree.
	 */
3106 3107 3108 3109
	ret = cgroup_update_dfl_csses(cgrp);
	if (ret)
		goto err_undo_css;

3110 3111 3112
	/*
	 * All tasks are migrated out of disabled csses.  Kill or hide
	 * them.  A css is hidden when the userland requests it to be
3113 3114 3115 3116
	 * disabled while other subsystems are still depending on it.  The
	 * css must not actively control resources and be in the vanilla
	 * state if it's made visible again later.  Controllers which may
	 * be depended upon should provide ->css_reset() for this purpose.
3117
	 */
3118 3119 3120 3121
	for_each_subsys(ss, ssid) {
		if (!(disable & (1 << ssid)))
			continue;

3122
		cgroup_for_each_live_child(child, cgrp) {
3123 3124 3125 3126 3127
			struct cgroup_subsys_state *css = cgroup_css(child, ss);

			if (css_disable & (1 << ssid)) {
				kill_css(css);
			} else {
3128
				css_clear_dir(css, NULL);
3129 3130 3131
				if (ss->css_reset)
					ss->css_reset(css);
			}
3132
		}
3133 3134 3135 3136 3137
	}

	kernfs_activate(cgrp->kn);
	ret = 0;
out_unlock:
3138
	cgroup_kn_unlock(of->kn);
3139
	return ret ?: nbytes;
3140 3141

err_undo_css:
3142
	cgrp->subtree_control = old_sc;
3143
	cgrp->subtree_ss_mask = old_ss;
3144 3145 3146 3147 3148 3149 3150

	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
			struct cgroup_subsys_state *css = cgroup_css(child, ss);
3151 3152 3153 3154 3155

			if (!css)
				continue;

			if (css_enable & (1 << ssid))
3156
				kill_css(css);
3157
			else
3158
				css_clear_dir(css, NULL);
3159 3160 3161 3162 3163
		}
	}
	goto out_unlock;
}

3164
static int cgroup_events_show(struct seq_file *seq, void *v)
3165
{
3166
	seq_printf(seq, "populated %d\n",
3167
		   cgroup_is_populated(seq_css(seq)->cgroup));
3168 3169 3170
	return 0;
}

T
Tejun Heo 已提交
3171 3172
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
				 size_t nbytes, loff_t off)
3173
{
T
Tejun Heo 已提交
3174 3175 3176
	struct cgroup *cgrp = of->kn->parent->priv;
	struct cftype *cft = of->kn->priv;
	struct cgroup_subsys_state *css;
3177
	int ret;
3178

T
Tejun Heo 已提交
3179 3180 3181
	if (cft->write)
		return cft->write(of, buf, nbytes, off);

T
Tejun Heo 已提交
3182 3183 3184 3185 3186 3187 3188 3189 3190
	/*
	 * kernfs guarantees that a file isn't deleted with operations in
	 * flight, which means that the matching css is and stays alive and
	 * doesn't need to be pinned.  The RCU locking is not necessary
	 * either.  It's just for the convenience of using cgroup_css().
	 */
	rcu_read_lock();
	css = cgroup_css(cgrp, cft->ss);
	rcu_read_unlock();
3191

3192
	if (cft->write_u64) {
3193 3194 3195 3196 3197 3198 3199 3200 3201
		unsigned long long v;
		ret = kstrtoull(buf, 0, &v);
		if (!ret)
			ret = cft->write_u64(css, cft, v);
	} else if (cft->write_s64) {
		long long v;
		ret = kstrtoll(buf, 0, &v);
		if (!ret)
			ret = cft->write_s64(css, cft, v);
3202
	} else {
3203
		ret = -EINVAL;
3204
	}
T
Tejun Heo 已提交
3205

3206
	return ret ?: nbytes;
3207 3208
}

3209
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
3210
{
T
Tejun Heo 已提交
3211
	return seq_cft(seq)->seq_start(seq, ppos);
3212 3213
}

3214
static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
3215
{
T
Tejun Heo 已提交
3216
	return seq_cft(seq)->seq_next(seq, v, ppos);
3217 3218
}

3219
static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
3220
{
T
Tejun Heo 已提交
3221
	seq_cft(seq)->seq_stop(seq, v);
3222 3223
}

3224
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
3225
{
3226 3227
	struct cftype *cft = seq_cft(m);
	struct cgroup_subsys_state *css = seq_css(m);
3228

3229 3230
	if (cft->seq_show)
		return cft->seq_show(m, arg);
3231

3232
	if (cft->read_u64)
3233 3234 3235 3236 3237 3238
		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
	else if (cft->read_s64)
		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
	else
		return -EINVAL;
	return 0;
3239 3240
}

T
Tejun Heo 已提交
3241 3242 3243 3244
static struct kernfs_ops cgroup_kf_single_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_show		= cgroup_seqfile_show,
3245 3246
};

T
Tejun Heo 已提交
3247 3248 3249 3250 3251 3252 3253 3254
static struct kernfs_ops cgroup_kf_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_start		= cgroup_seqfile_start,
	.seq_next		= cgroup_seqfile_next,
	.seq_stop		= cgroup_seqfile_stop,
	.seq_show		= cgroup_seqfile_show,
};
3255 3256 3257 3258

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
T
Tejun Heo 已提交
3259 3260
static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
			 const char *new_name_str)
3261
{
T
Tejun Heo 已提交
3262
	struct cgroup *cgrp = kn->priv;
3263 3264
	int ret;

T
Tejun Heo 已提交
3265
	if (kernfs_type(kn) != KERNFS_DIR)
3266
		return -ENOTDIR;
T
Tejun Heo 已提交
3267
	if (kn->parent != new_parent)
3268
		return -EIO;
3269

3270 3271
	/*
	 * This isn't a proper migration and its usefulness is very
3272
	 * limited.  Disallow on the default hierarchy.
3273
	 */
3274
	if (cgroup_on_dfl(cgrp))
3275
		return -EPERM;
L
Li Zefan 已提交
3276

3277
	/*
T
Tejun Heo 已提交
3278
	 * We're gonna grab cgroup_mutex which nests outside kernfs
3279
	 * active_ref.  kernfs_rename() doesn't require active_ref
T
Tejun Heo 已提交
3280
	 * protection.  Break them before grabbing cgroup_mutex.
3281 3282 3283
	 */
	kernfs_break_active_protection(new_parent);
	kernfs_break_active_protection(kn);
L
Li Zefan 已提交
3284

T
Tejun Heo 已提交
3285
	mutex_lock(&cgroup_mutex);
L
Li Zefan 已提交
3286

T
Tejun Heo 已提交
3287
	ret = kernfs_rename(kn, new_parent, new_name_str);
L
Li Zefan 已提交
3288

T
Tejun Heo 已提交
3289
	mutex_unlock(&cgroup_mutex);
3290 3291 3292

	kernfs_unbreak_active_protection(kn);
	kernfs_unbreak_active_protection(new_parent);
T
Tejun Heo 已提交
3293
	return ret;
L
Li Zefan 已提交
3294 3295
}

3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309
/* set uid and gid of cgroup dirs and files to that of the creator */
static int cgroup_kn_set_ugid(struct kernfs_node *kn)
{
	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
			       .ia_uid = current_fsuid(),
			       .ia_gid = current_fsgid(), };

	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
		return 0;

	return kernfs_setattr(kn, &iattr);
}

3310 3311
static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
			   struct cftype *cft)
3312
{
T
Tejun Heo 已提交
3313
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
3314 3315
	struct kernfs_node *kn;
	struct lock_class_key *key = NULL;
3316
	int ret;
T
Tejun Heo 已提交
3317

T
Tejun Heo 已提交
3318 3319 3320 3321 3322
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	key = &cft->lockdep_key;
#endif
	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
T
Tejun Heo 已提交
3323
				  NULL, key);
3324 3325 3326 3327
	if (IS_ERR(kn))
		return PTR_ERR(kn);

	ret = cgroup_kn_set_ugid(kn);
3328
	if (ret) {
3329
		kernfs_remove(kn);
3330 3331 3332
		return ret;
	}

3333 3334 3335
	if (cft->file_offset) {
		struct cgroup_file *cfile = (void *)css + cft->file_offset;

3336
		spin_lock_irq(&cgroup_file_kn_lock);
3337
		cfile->kn = kn;
3338
		spin_unlock_irq(&cgroup_file_kn_lock);
3339 3340
	}

3341
	return 0;
3342 3343
}

3344 3345
/**
 * cgroup_addrm_files - add or remove files to a cgroup directory
3346 3347
 * @css: the target css
 * @cgrp: the target cgroup (usually css->cgroup)
3348 3349 3350 3351
 * @cfts: array of cftypes to be added
 * @is_add: whether to add or remove
 *
 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3352
 * For removals, this function never fails.
3353
 */
3354 3355
static int cgroup_addrm_files(struct cgroup_subsys_state *css,
			      struct cgroup *cgrp, struct cftype cfts[],
3356
			      bool is_add)
3357
{
3358
	struct cftype *cft, *cft_end = NULL;
3359
	int ret = 0;
3360

3361
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
3362

3363 3364
restart:
	for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
3365
		/* does cft->flags tell us to skip this file on @cgrp? */
3366
		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
T
Tejun Heo 已提交
3367
			continue;
3368
		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3369
			continue;
T
Tejun Heo 已提交
3370
		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3371
			continue;
T
Tejun Heo 已提交
3372
		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3373 3374
			continue;

3375
		if (is_add) {
3376
			ret = cgroup_add_file(css, cgrp, cft);
3377
			if (ret) {
3378 3379
				pr_warn("%s: failed to add %s, err=%d\n",
					__func__, cft->name, ret);
3380 3381 3382
				cft_end = cft;
				is_add = false;
				goto restart;
3383
			}
3384 3385
		} else {
			cgroup_rm_file(cgrp, cft);
T
Tejun Heo 已提交
3386
		}
3387
	}
3388
	return ret;
3389 3390
}

3391
static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3392 3393
{
	LIST_HEAD(pending);
3394
	struct cgroup_subsys *ss = cfts[0].ss;
3395
	struct cgroup *root = &ss->root->cgrp;
3396
	struct cgroup_subsys_state *css;
3397
	int ret = 0;
3398

3399
	lockdep_assert_held(&cgroup_mutex);
3400 3401

	/* add/rm files for all cgroups created before */
3402
	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3403 3404
		struct cgroup *cgrp = css->cgroup;

3405 3406 3407
		if (cgroup_is_dead(cgrp))
			continue;

3408
		ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
3409 3410
		if (ret)
			break;
3411
	}
3412 3413 3414

	if (is_add && !ret)
		kernfs_activate(root->kn);
3415
	return ret;
3416 3417
}

3418
static void cgroup_exit_cftypes(struct cftype *cfts)
3419
{
3420
	struct cftype *cft;
3421

T
Tejun Heo 已提交
3422 3423 3424 3425 3426
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		/* free copy for custom atomic_write_len, see init_cftypes() */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
			kfree(cft->kf_ops);
		cft->kf_ops = NULL;
3427
		cft->ss = NULL;
3428 3429

		/* revert flags set by cgroup core while adding @cfts */
3430
		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
T
Tejun Heo 已提交
3431
	}
3432 3433
}

T
Tejun Heo 已提交
3434
static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3435 3436 3437
{
	struct cftype *cft;

T
Tejun Heo 已提交
3438 3439 3440
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		struct kernfs_ops *kf_ops;

T
Tejun Heo 已提交
3441 3442
		WARN_ON(cft->ss || cft->kf_ops);

T
Tejun Heo 已提交
3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459
		if (cft->seq_start)
			kf_ops = &cgroup_kf_ops;
		else
			kf_ops = &cgroup_kf_single_ops;

		/*
		 * Ugh... if @cft wants a custom max_write_len, we need to
		 * make a copy of kf_ops to set its atomic_write_len.
		 */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
			if (!kf_ops) {
				cgroup_exit_cftypes(cfts);
				return -ENOMEM;
			}
			kf_ops->atomic_write_len = cft->max_write_len;
		}
3460

T
Tejun Heo 已提交
3461
		cft->kf_ops = kf_ops;
3462
		cft->ss = ss;
T
Tejun Heo 已提交
3463
	}
3464

T
Tejun Heo 已提交
3465
	return 0;
3466 3467
}

3468 3469
static int cgroup_rm_cftypes_locked(struct cftype *cfts)
{
3470
	lockdep_assert_held(&cgroup_mutex);
3471 3472 3473 3474 3475 3476 3477 3478

	if (!cfts || !cfts[0].ss)
		return -ENOENT;

	list_del(&cfts->node);
	cgroup_apply_cftypes(cfts, false);
	cgroup_exit_cftypes(cfts);
	return 0;
3479 3480
}

3481 3482 3483 3484
/**
 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
3485 3486 3487
 * Unregister @cfts.  Files described by @cfts are removed from all
 * existing cgroups and all future cgroups won't have them either.  This
 * function can be called anytime whether @cfts' subsys is attached or not.
3488 3489
 *
 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3490
 * registered.
3491
 */
3492
int cgroup_rm_cftypes(struct cftype *cfts)
3493
{
3494
	int ret;
3495

3496
	mutex_lock(&cgroup_mutex);
3497
	ret = cgroup_rm_cftypes_locked(cfts);
3498
	mutex_unlock(&cgroup_mutex);
3499
	return ret;
T
Tejun Heo 已提交
3500 3501
}

3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515
/**
 * cgroup_add_cftypes - add an array of cftypes to a subsystem
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Register @cfts to @ss.  Files described by @cfts are created for all
 * existing cgroups to which @ss is attached and all future cgroups will
 * have them too.  This function can be called anytime whether @ss is
 * attached or not.
 *
 * Returns 0 on successful registration, -errno on failure.  Note that this
 * function currently returns 0 as long as @cfts registration is successful
 * even if some file creation attempts on existing cgroups fail.
 */
3516
static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3517
{
3518
	int ret;
3519

3520
	if (!cgroup_ssid_enabled(ss->id))
3521 3522
		return 0;

3523 3524
	if (!cfts || cfts[0].name[0] == '\0')
		return 0;
3525

T
Tejun Heo 已提交
3526 3527 3528
	ret = cgroup_init_cftypes(ss, cfts);
	if (ret)
		return ret;
3529

3530
	mutex_lock(&cgroup_mutex);
3531

T
Tejun Heo 已提交
3532
	list_add_tail(&cfts->node, &ss->cfts);
3533
	ret = cgroup_apply_cftypes(cfts, true);
3534
	if (ret)
3535
		cgroup_rm_cftypes_locked(cfts);
3536

3537
	mutex_unlock(&cgroup_mutex);
3538
	return ret;
3539 3540
}

3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553
/**
 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the default hierarchy.
 */
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
	struct cftype *cft;

	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3554
		cft->flags |= __CFTYPE_ONLY_ON_DFL;
3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565
	return cgroup_add_cftypes(ss, cfts);
}

/**
 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the legacy hierarchies.
 */
3566 3567
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
3568 3569
	struct cftype *cft;

3570 3571
	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
		cft->flags |= __CFTYPE_NOT_ON_DFL;
3572 3573 3574
	return cgroup_add_cftypes(ss, cfts);
}

3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590
/**
 * cgroup_file_notify - generate a file modified event for a cgroup_file
 * @cfile: target cgroup_file
 *
 * @cfile must have been obtained by setting cftype->file_offset.
 */
void cgroup_file_notify(struct cgroup_file *cfile)
{
	unsigned long flags;

	spin_lock_irqsave(&cgroup_file_kn_lock, flags);
	if (cfile->kn)
		kernfs_notify(cfile->kn);
	spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
}

L
Li Zefan 已提交
3591 3592 3593 3594 3595 3596
/**
 * cgroup_task_count - count the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 *
 * Return the number of tasks in the cgroup.
 */
3597
static int cgroup_task_count(const struct cgroup *cgrp)
3598 3599
{
	int count = 0;
3600
	struct cgrp_cset_link *link;
3601

3602
	spin_lock_bh(&css_set_lock);
3603 3604
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
3605
	spin_unlock_bh(&css_set_lock);
3606 3607 3608
	return count;
}

3609
/**
3610
 * css_next_child - find the next child of a given css
3611 3612
 * @pos: the current position (%NULL to initiate traversal)
 * @parent: css whose children to walk
3613
 *
3614
 * This function returns the next child of @parent and should be called
3615
 * under either cgroup_mutex or RCU read lock.  The only requirement is
3616 3617 3618 3619 3620 3621 3622 3623 3624
 * that @parent and @pos are accessible.  The next sibling is guaranteed to
 * be returned regardless of their states.
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3625
 */
3626 3627
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
					   struct cgroup_subsys_state *parent)
3628
{
3629
	struct cgroup_subsys_state *next;
3630

T
Tejun Heo 已提交
3631
	cgroup_assert_mutex_or_rcu_locked();
3632 3633

	/*
3634 3635 3636 3637 3638 3639 3640 3641 3642 3643
	 * @pos could already have been unlinked from the sibling list.
	 * Once a cgroup is removed, its ->sibling.next is no longer
	 * updated when its next sibling changes.  CSS_RELEASED is set when
	 * @pos is taken off list, at which time its next pointer is valid,
	 * and, as releases are serialized, the one pointed to by the next
	 * pointer is guaranteed to not have started release yet.  This
	 * implies that if we observe !CSS_RELEASED on @pos in this RCU
	 * critical section, the one pointed to by its next pointer is
	 * guaranteed to not have finished its RCU grace period even if we
	 * have dropped rcu_read_lock() inbetween iterations.
3644
	 *
3645 3646 3647 3648 3649 3650 3651
	 * If @pos has CSS_RELEASED set, its next pointer can't be
	 * dereferenced; however, as each css is given a monotonically
	 * increasing unique serial number and always appended to the
	 * sibling list, the next one can be found by walking the parent's
	 * children until the first css with higher serial number than
	 * @pos's.  While this path can be slower, it happens iff iteration
	 * races against release and the race window is very small.
3652
	 */
3653
	if (!pos) {
3654 3655 3656
		next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
	} else if (likely(!(pos->flags & CSS_RELEASED))) {
		next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3657
	} else {
3658
		list_for_each_entry_rcu(next, &parent->children, sibling)
3659 3660
			if (next->serial_nr > pos->serial_nr)
				break;
3661 3662
	}

3663 3664
	/*
	 * @next, if not pointing to the head, can be dereferenced and is
3665
	 * the next sibling.
3666
	 */
3667 3668
	if (&next->sibling != &parent->children)
		return next;
3669
	return NULL;
3670 3671
}

3672
/**
3673
 * css_next_descendant_pre - find the next descendant for pre-order walk
3674
 * @pos: the current position (%NULL to initiate traversal)
3675
 * @root: css whose descendants to walk
3676
 *
3677
 * To be used by css_for_each_descendant_pre().  Find the next descendant
3678 3679
 * to visit for pre-order traversal of @root's descendants.  @root is
 * included in the iteration and the first node to be visited.
3680
 *
3681 3682 3683 3684
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @root are accessible and @pos is a descendant of @root.
3685 3686 3687 3688 3689 3690 3691
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3692
 */
3693 3694 3695
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
			struct cgroup_subsys_state *root)
3696
{
3697
	struct cgroup_subsys_state *next;
3698

T
Tejun Heo 已提交
3699
	cgroup_assert_mutex_or_rcu_locked();
3700

3701
	/* if first iteration, visit @root */
3702
	if (!pos)
3703
		return root;
3704 3705

	/* visit the first child if exists */
3706
	next = css_next_child(NULL, pos);
3707 3708 3709 3710
	if (next)
		return next;

	/* no child, visit my or the closest ancestor's next sibling */
3711
	while (pos != root) {
T
Tejun Heo 已提交
3712
		next = css_next_child(pos, pos->parent);
3713
		if (next)
3714
			return next;
T
Tejun Heo 已提交
3715
		pos = pos->parent;
3716
	}
3717 3718 3719 3720

	return NULL;
}

3721
/**
3722 3723
 * css_rightmost_descendant - return the rightmost descendant of a css
 * @pos: css of interest
3724
 *
3725 3726
 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
 * is returned.  This can be used during pre-order traversal to skip
3727
 * subtree of @pos.
3728
 *
3729 3730 3731 3732
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct rightmost descendant as
 * long as @pos is accessible.
3733
 */
3734 3735
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
3736
{
3737
	struct cgroup_subsys_state *last, *tmp;
3738

T
Tejun Heo 已提交
3739
	cgroup_assert_mutex_or_rcu_locked();
3740 3741 3742 3743 3744

	do {
		last = pos;
		/* ->prev isn't RCU safe, walk ->next till the end */
		pos = NULL;
3745
		css_for_each_child(tmp, last)
3746 3747 3748 3749 3750 3751
			pos = tmp;
	} while (pos);

	return last;
}

3752 3753
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
3754
{
3755
	struct cgroup_subsys_state *last;
3756 3757 3758

	do {
		last = pos;
3759
		pos = css_next_child(NULL, pos);
3760 3761 3762 3763 3764 3765
	} while (pos);

	return last;
}

/**
3766
 * css_next_descendant_post - find the next descendant for post-order walk
3767
 * @pos: the current position (%NULL to initiate traversal)
3768
 * @root: css whose descendants to walk
3769
 *
3770
 * To be used by css_for_each_descendant_post().  Find the next descendant
3771 3772
 * to visit for post-order traversal of @root's descendants.  @root is
 * included in the iteration and the last node to be visited.
3773
 *
3774 3775 3776 3777 3778
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @cgroup are accessible and @pos is a descendant of
 * @cgroup.
3779 3780 3781 3782 3783 3784 3785
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3786
 */
3787 3788 3789
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
			 struct cgroup_subsys_state *root)
3790
{
3791
	struct cgroup_subsys_state *next;
3792

T
Tejun Heo 已提交
3793
	cgroup_assert_mutex_or_rcu_locked();
3794

3795 3796 3797
	/* if first iteration, visit leftmost descendant which may be @root */
	if (!pos)
		return css_leftmost_descendant(root);
3798

3799 3800 3801 3802
	/* if we visited @root, we're done */
	if (pos == root)
		return NULL;

3803
	/* if there's an unvisited sibling, visit its leftmost descendant */
T
Tejun Heo 已提交
3804
	next = css_next_child(pos, pos->parent);
3805
	if (next)
3806
		return css_leftmost_descendant(next);
3807 3808

	/* no sibling left, visit parent */
T
Tejun Heo 已提交
3809
	return pos->parent;
3810 3811
}

3812 3813 3814 3815 3816 3817 3818 3819 3820
/**
 * css_has_online_children - does a css have online children
 * @css: the target css
 *
 * Returns %true if @css has any online children; otherwise, %false.  This
 * function can be called from any context but the caller is responsible
 * for synchronizing against on/offlining as necessary.
 */
bool css_has_online_children(struct cgroup_subsys_state *css)
3821
{
3822 3823
	struct cgroup_subsys_state *child;
	bool ret = false;
3824 3825

	rcu_read_lock();
3826
	css_for_each_child(child, css) {
3827
		if (child->flags & CSS_ONLINE) {
3828 3829
			ret = true;
			break;
3830 3831 3832
		}
	}
	rcu_read_unlock();
3833
	return ret;
3834 3835
}

3836
/**
3837
 * css_task_iter_advance_css_set - advance a task itererator to the next css_set
3838 3839 3840
 * @it: the iterator to advance
 *
 * Advance @it to the next css_set to walk.
3841
 */
3842
static void css_task_iter_advance_css_set(struct css_task_iter *it)
3843
{
T
Tejun Heo 已提交
3844
	struct list_head *l = it->cset_pos;
3845 3846 3847
	struct cgrp_cset_link *link;
	struct css_set *cset;

3848
	lockdep_assert_held(&css_set_lock);
3849

3850 3851 3852
	/* Advance to the next non-empty css_set */
	do {
		l = l->next;
T
Tejun Heo 已提交
3853 3854
		if (l == it->cset_head) {
			it->cset_pos = NULL;
3855
			it->task_pos = NULL;
3856 3857
			return;
		}
3858 3859 3860 3861 3862 3863 3864 3865

		if (it->ss) {
			cset = container_of(l, struct css_set,
					    e_cset_node[it->ss->id]);
		} else {
			link = list_entry(l, struct cgrp_cset_link, cset_link);
			cset = link->cset;
		}
3866
	} while (!css_set_populated(cset));
T
Tejun Heo 已提交
3867

T
Tejun Heo 已提交
3868
	it->cset_pos = l;
T
Tejun Heo 已提交
3869 3870

	if (!list_empty(&cset->tasks))
T
Tejun Heo 已提交
3871
		it->task_pos = cset->tasks.next;
T
Tejun Heo 已提交
3872
	else
T
Tejun Heo 已提交
3873 3874 3875 3876
		it->task_pos = cset->mg_tasks.next;

	it->tasks_head = &cset->tasks;
	it->mg_tasks_head = &cset->mg_tasks;
3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899

	/*
	 * We don't keep css_sets locked across iteration steps and thus
	 * need to take steps to ensure that iteration can be resumed after
	 * the lock is re-acquired.  Iteration is performed at two levels -
	 * css_sets and tasks in them.
	 *
	 * Once created, a css_set never leaves its cgroup lists, so a
	 * pinned css_set is guaranteed to stay put and we can resume
	 * iteration afterwards.
	 *
	 * Tasks may leave @cset across iteration steps.  This is resolved
	 * by registering each iterator with the css_set currently being
	 * walked and making css_set_move_task() advance iterators whose
	 * next task is leaving.
	 */
	if (it->cur_cset) {
		list_del(&it->iters_node);
		put_css_set_locked(it->cur_cset);
	}
	get_css_set(cset);
	it->cur_cset = cset;
	list_add(&it->iters_node, &cset->task_iters);
3900 3901
}

3902 3903 3904 3905
static void css_task_iter_advance(struct css_task_iter *it)
{
	struct list_head *l = it->task_pos;

3906
	lockdep_assert_held(&css_set_lock);
3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924
	WARN_ON_ONCE(!l);

	/*
	 * Advance iterator to find next entry.  cset->tasks is consumed
	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
	 * next cset.
	 */
	l = l->next;

	if (l == it->tasks_head)
		l = it->mg_tasks_head->next;

	if (l == it->mg_tasks_head)
		css_task_iter_advance_css_set(it);
	else
		it->task_pos = l;
}

3925
/**
3926 3927
 * css_task_iter_start - initiate task iteration
 * @css: the css to walk tasks of
3928 3929
 * @it: the task iterator to use
 *
3930 3931 3932 3933
 * Initiate iteration through the tasks of @css.  The caller can call
 * css_task_iter_next() to walk through the tasks until the function
 * returns NULL.  On completion of iteration, css_task_iter_end() must be
 * called.
3934
 */
3935 3936
void css_task_iter_start(struct cgroup_subsys_state *css,
			 struct css_task_iter *it)
3937
{
3938 3939
	/* no one should try to iterate before mounting cgroups */
	WARN_ON_ONCE(!use_task_css_set_links);
3940

3941 3942
	memset(it, 0, sizeof(*it));

3943
	spin_lock_bh(&css_set_lock);
3944

3945 3946 3947 3948 3949 3950 3951
	it->ss = css->ss;

	if (it->ss)
		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
	else
		it->cset_pos = &css->cgroup->cset_links;

T
Tejun Heo 已提交
3952
	it->cset_head = it->cset_pos;
3953

3954
	css_task_iter_advance_css_set(it);
3955

3956
	spin_unlock_bh(&css_set_lock);
3957 3958
}

3959
/**
3960
 * css_task_iter_next - return the next task for the iterator
3961 3962 3963
 * @it: the task iterator being iterated
 *
 * The "next" function for task iteration.  @it should have been
3964 3965
 * initialized via css_task_iter_start().  Returns NULL when the iteration
 * reaches the end.
3966
 */
3967
struct task_struct *css_task_iter_next(struct css_task_iter *it)
3968
{
3969
	if (it->cur_task) {
3970
		put_task_struct(it->cur_task);
3971 3972
		it->cur_task = NULL;
	}
3973

3974
	spin_lock_bh(&css_set_lock);
3975

3976 3977 3978 3979 3980 3981
	if (it->task_pos) {
		it->cur_task = list_entry(it->task_pos, struct task_struct,
					  cg_list);
		get_task_struct(it->cur_task);
		css_task_iter_advance(it);
	}
3982

3983
	spin_unlock_bh(&css_set_lock);
3984 3985

	return it->cur_task;
3986 3987
}

3988
/**
3989
 * css_task_iter_end - finish task iteration
3990 3991
 * @it: the task iterator to finish
 *
3992
 * Finish task iteration started by css_task_iter_start().
3993
 */
3994
void css_task_iter_end(struct css_task_iter *it)
3995
{
3996
	if (it->cur_cset) {
3997
		spin_lock_bh(&css_set_lock);
3998 3999
		list_del(&it->iters_node);
		put_css_set_locked(it->cur_cset);
4000
		spin_unlock_bh(&css_set_lock);
4001 4002 4003 4004
	}

	if (it->cur_task)
		put_task_struct(it->cur_task);
4005 4006 4007
}

/**
4008 4009 4010
 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
4011
 *
4012 4013 4014 4015 4016
 * Locking rules between cgroup_post_fork() and the migration path
 * guarantee that, if a task is forking while being migrated, the new child
 * is guaranteed to be either visible in the source cgroup after the
 * parent's migration is complete or put into the target cgroup.  No task
 * can slip out of migration through forking.
4017
 */
4018
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
4019
{
4020 4021
	LIST_HEAD(preloaded_csets);
	struct cgrp_cset_link *link;
4022
	struct css_task_iter it;
4023
	struct task_struct *task;
4024
	int ret;
4025

4026
	mutex_lock(&cgroup_mutex);
4027

4028
	/* all tasks in @from are being moved, all csets are source */
4029
	spin_lock_bh(&css_set_lock);
4030 4031
	list_for_each_entry(link, &from->cset_links, cset_link)
		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
4032
	spin_unlock_bh(&css_set_lock);
4033

4034 4035 4036
	ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
	if (ret)
		goto out_err;
4037

4038
	/*
R
Rami Rosen 已提交
4039
	 * Migrate tasks one-by-one until @from is empty.  This fails iff
4040 4041
	 * ->can_attach() fails.
	 */
4042
	do {
4043
		css_task_iter_start(&from->self, &it);
4044 4045 4046 4047 4048 4049
		task = css_task_iter_next(&it);
		if (task)
			get_task_struct(task);
		css_task_iter_end(&it);

		if (task) {
4050
			ret = cgroup_migrate(task, false, to);
4051 4052 4053
			put_task_struct(task);
		}
	} while (task && !ret);
4054 4055
out_err:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
4056
	mutex_unlock(&cgroup_mutex);
4057
	return ret;
4058 4059
}

4060
/*
4061
 * Stuff for reading the 'tasks'/'procs' files.
4062 4063 4064 4065 4066 4067 4068 4069
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095
/* which pidlist file are we talking about? */
enum cgroup_filetype {
	CGROUP_FILE_PROCS,
	CGROUP_FILE_TASKS,
};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {
	/*
	 * used to find which pidlist is wanted. doesn't change as long as
	 * this particular list stays in the list.
	*/
	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
	/* array of xids */
	pid_t *list;
	/* how many elements the above list has */
	int length;
	/* each of these stored in a list by its cgroup */
	struct list_head links;
	/* pointer to the cgroup we belong to, for list removal purposes */
	struct cgroup *owner;
4096 4097
	/* for delayed destruction */
	struct delayed_work destroy_dwork;
4098 4099
};

4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112
/*
 * The following two functions "fix" the issue where there are more pids
 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 * TODO: replace with a kernel-wide solution to this problem
 */
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(count * sizeof(pid_t));
	else
		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
4113

4114 4115
static void pidlist_free(void *p)
{
4116
	kvfree(p);
4117 4118
}

4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145
/*
 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 * should be left afterwards.
 */
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
{
	struct cgroup_pidlist *l, *tmp_l;

	mutex_lock(&cgrp->pidlist_mutex);
	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
	mutex_unlock(&cgrp->pidlist_mutex);

	flush_workqueue(cgroup_pidlist_destroy_wq);
	BUG_ON(!list_empty(&cgrp->pidlists));
}

static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
						destroy_dwork);
	struct cgroup_pidlist *tofree = NULL;

	mutex_lock(&l->owner->pidlist_mutex);

	/*
4146 4147
	 * Destroy iff we didn't get queued again.  The state won't change
	 * as destroy_dwork can only be queued while locked.
4148
	 */
4149
	if (!delayed_work_pending(dwork)) {
4150 4151 4152 4153 4154 4155 4156 4157 4158 4159
		list_del(&l->links);
		pidlist_free(l->list);
		put_pid_ns(l->key.ns);
		tofree = l;
	}

	mutex_unlock(&l->owner->pidlist_mutex);
	kfree(tofree);
}

4160
/*
4161
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
4162
 * Returns the number of unique elements.
4163
 */
4164
static int pidlist_uniq(pid_t *list, int length)
4165
{
4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189
	int src, dest = 1;

	/*
	 * we presume the 0th element is unique, so i starts at 1. trivial
	 * edge cases first; no work needs to be done for either
	 */
	if (length == 0 || length == 1)
		return length;
	/* src and dest walk down the list; dest counts unique elements */
	for (src = 1; src < length; src++) {
		/* find next unique element */
		while (list[src] == list[src-1]) {
			src++;
			if (src == length)
				goto after;
		}
		/* dest always points to where the next unique element goes */
		list[dest] = list[src];
		dest++;
	}
after:
	return dest;
}

4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200
/*
 * The two pid files - task and cgroup.procs - guaranteed that the result
 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 * different per namespace, each namespace needs differently sorted list,
 * making it impossible to use, for example, single rbtree of member tasks
 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 * per open file is dangerous, so cgroup had to implement shared pool of
 * pidlists keyed by cgroup and namespace.
 *
 * All this extra complexity was caused by the original implementation
 * committing to an entirely unnecessary property.  In the long term, we
4201 4202 4203
 * want to do away with it.  Explicitly scramble sort order if on the
 * default hierarchy so that no such expectation exists in the new
 * interface.
4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217
 *
 * Scrambling is done by swapping every two consecutive bits, which is
 * non-identity one-to-one mapping which disturbs sort order sufficiently.
 */
static pid_t pid_fry(pid_t pid)
{
	unsigned a = pid & 0x55555555;
	unsigned b = pid & 0xAAAAAAAA;

	return (a << 1) | (b >> 1);
}

static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
{
4218
	if (cgroup_on_dfl(cgrp))
4219 4220 4221 4222 4223
		return pid_fry(pid);
	else
		return pid;
}

4224 4225 4226 4227 4228
static int cmppid(const void *a, const void *b)
{
	return *(pid_t *)a - *(pid_t *)b;
}

4229 4230 4231 4232 4233
static int fried_cmppid(const void *a, const void *b)
{
	return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
}

T
Tejun Heo 已提交
4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{
	struct cgroup_pidlist *l;
	/* don't need task_nsproxy() if we're looking at ourself */
	struct pid_namespace *ns = task_active_pid_ns(current);

	lockdep_assert_held(&cgrp->pidlist_mutex);

	list_for_each_entry(l, &cgrp->pidlists, links)
		if (l->key.type == type && l->key.ns == ns)
			return l;
	return NULL;
}

4249 4250 4251 4252 4253 4254
/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
T
Tejun Heo 已提交
4255 4256
static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
						enum cgroup_filetype type)
4257 4258
{
	struct cgroup_pidlist *l;
4259

T
Tejun Heo 已提交
4260 4261 4262 4263 4264 4265
	lockdep_assert_held(&cgrp->pidlist_mutex);

	l = cgroup_pidlist_find(cgrp, type);
	if (l)
		return l;

4266
	/* entry not found; create a new one */
4267
	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
T
Tejun Heo 已提交
4268
	if (!l)
4269
		return l;
T
Tejun Heo 已提交
4270

4271
	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
4272
	l->key.type = type;
T
Tejun Heo 已提交
4273 4274
	/* don't need task_nsproxy() if we're looking at ourself */
	l->key.ns = get_pid_ns(task_active_pid_ns(current));
4275 4276 4277 4278 4279
	l->owner = cgrp;
	list_add(&l->links, &cgrp->pidlists);
	return l;
}

4280 4281 4282
/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
4283 4284
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
4285 4286 4287 4288
{
	pid_t *array;
	int length;
	int pid, n = 0; /* used for populating the array */
4289
	struct css_task_iter it;
4290
	struct task_struct *tsk;
4291 4292
	struct cgroup_pidlist *l;

4293 4294
	lockdep_assert_held(&cgrp->pidlist_mutex);

4295 4296 4297 4298 4299 4300 4301
	/*
	 * If cgroup gets more users after we read count, we won't have
	 * enough space - tough.  This race is indistinguishable to the
	 * caller from the case that the additional cgroup users didn't
	 * show up until sometime later on.
	 */
	length = cgroup_task_count(cgrp);
4302
	array = pidlist_allocate(length);
4303 4304 4305
	if (!array)
		return -ENOMEM;
	/* now, populate the array */
4306
	css_task_iter_start(&cgrp->self, &it);
4307
	while ((tsk = css_task_iter_next(&it))) {
4308
		if (unlikely(n == length))
4309
			break;
4310
		/* get tgid or pid for procs or tasks file respectively */
4311 4312 4313 4314
		if (type == CGROUP_FILE_PROCS)
			pid = task_tgid_vnr(tsk);
		else
			pid = task_pid_vnr(tsk);
4315 4316
		if (pid > 0) /* make sure to only use valid results */
			array[n++] = pid;
4317
	}
4318
	css_task_iter_end(&it);
4319 4320
	length = n;
	/* now sort & (if procs) strip out duplicates */
4321
	if (cgroup_on_dfl(cgrp))
4322 4323 4324
		sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
	else
		sort(array, length, sizeof(pid_t), cmppid, NULL);
4325
	if (type == CGROUP_FILE_PROCS)
4326
		length = pidlist_uniq(array, length);
T
Tejun Heo 已提交
4327 4328

	l = cgroup_pidlist_find_create(cgrp, type);
4329
	if (!l) {
4330
		pidlist_free(array);
4331
		return -ENOMEM;
4332
	}
T
Tejun Heo 已提交
4333 4334

	/* store array, freeing old if necessary */
4335
	pidlist_free(l->list);
4336 4337
	l->list = array;
	l->length = length;
4338
	*lp = l;
4339
	return 0;
4340 4341
}

B
Balbir Singh 已提交
4342
/**
L
Li Zefan 已提交
4343
 * cgroupstats_build - build and fill cgroupstats
B
Balbir Singh 已提交
4344 4345 4346
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
L
Li Zefan 已提交
4347 4348 4349
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
B
Balbir Singh 已提交
4350 4351 4352
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
T
Tejun Heo 已提交
4353
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4354
	struct cgroup *cgrp;
4355
	struct css_task_iter it;
B
Balbir Singh 已提交
4356
	struct task_struct *tsk;
4357

T
Tejun Heo 已提交
4358 4359 4360 4361 4362
	/* it should be kernfs_node belonging to cgroupfs and is a directory */
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
		return -EINVAL;

4363 4364
	mutex_lock(&cgroup_mutex);

B
Balbir Singh 已提交
4365
	/*
T
Tejun Heo 已提交
4366
	 * We aren't being called from kernfs and there's no guarantee on
4367
	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
T
Tejun Heo 已提交
4368
	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
B
Balbir Singh 已提交
4369
	 */
T
Tejun Heo 已提交
4370 4371
	rcu_read_lock();
	cgrp = rcu_dereference(kn->priv);
4372
	if (!cgrp || cgroup_is_dead(cgrp)) {
T
Tejun Heo 已提交
4373
		rcu_read_unlock();
4374
		mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4375 4376
		return -ENOENT;
	}
4377
	rcu_read_unlock();
B
Balbir Singh 已提交
4378

4379
	css_task_iter_start(&cgrp->self, &it);
4380
	while ((tsk = css_task_iter_next(&it))) {
B
Balbir Singh 已提交
4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399
		switch (tsk->state) {
		case TASK_RUNNING:
			stats->nr_running++;
			break;
		case TASK_INTERRUPTIBLE:
			stats->nr_sleeping++;
			break;
		case TASK_UNINTERRUPTIBLE:
			stats->nr_uninterruptible++;
			break;
		case TASK_STOPPED:
			stats->nr_stopped++;
			break;
		default:
			if (delayacct_is_task_waiting_on_io(tsk))
				stats->nr_io_wait++;
			break;
		}
	}
4400
	css_task_iter_end(&it);
B
Balbir Singh 已提交
4401

4402
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4403
	return 0;
B
Balbir Singh 已提交
4404 4405
}

4406

4407
/*
4408
 * seq_file methods for the tasks/procs files. The seq_file position is the
4409
 * next pid to display; the seq_file iterator is a pointer to the pid
4410
 * in the cgroup->l->list array.
4411
 */
4412

4413
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
4414
{
4415 4416 4417 4418 4419 4420
	/*
	 * Initially we receive a position value that corresponds to
	 * one more than the last pid shown (or 0 on the first call or
	 * after a seek to the start). Use a binary-search to find the
	 * next pid to display, if any
	 */
T
Tejun Heo 已提交
4421
	struct kernfs_open_file *of = s->private;
4422
	struct cgroup *cgrp = seq_css(s)->cgroup;
4423
	struct cgroup_pidlist *l;
4424
	enum cgroup_filetype type = seq_cft(s)->private;
4425
	int index = 0, pid = *pos;
4426 4427 4428 4429 4430
	int *iter, ret;

	mutex_lock(&cgrp->pidlist_mutex);

	/*
4431
	 * !NULL @of->priv indicates that this isn't the first start()
4432
	 * after open.  If the matching pidlist is around, we can use that.
4433
	 * Look for it.  Note that @of->priv can't be used directly.  It
4434 4435
	 * could already have been destroyed.
	 */
4436 4437
	if (of->priv)
		of->priv = cgroup_pidlist_find(cgrp, type);
4438 4439 4440 4441 4442

	/*
	 * Either this is the first start() after open or the matching
	 * pidlist has been destroyed inbetween.  Create a new one.
	 */
4443 4444 4445
	if (!of->priv) {
		ret = pidlist_array_load(cgrp, type,
					 (struct cgroup_pidlist **)&of->priv);
4446 4447 4448
		if (ret)
			return ERR_PTR(ret);
	}
4449
	l = of->priv;
4450 4451

	if (pid) {
4452
		int end = l->length;
S
Stephen Rothwell 已提交
4453

4454 4455
		while (index < end) {
			int mid = (index + end) / 2;
4456
			if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
4457 4458
				index = mid;
				break;
4459
			} else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
4460 4461 4462 4463 4464 4465
				index = mid + 1;
			else
				end = mid;
		}
	}
	/* If we're off the end of the array, we're done */
4466
	if (index >= l->length)
4467 4468
		return NULL;
	/* Update the abstract position to be the actual pid that we found */
4469
	iter = l->list + index;
4470
	*pos = cgroup_pid_fry(cgrp, *iter);
4471 4472 4473
	return iter;
}

4474
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
4475
{
T
Tejun Heo 已提交
4476
	struct kernfs_open_file *of = s->private;
4477
	struct cgroup_pidlist *l = of->priv;
4478

4479 4480
	if (l)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
4481
				 CGROUP_PIDLIST_DESTROY_DELAY);
4482
	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
4483 4484
}

4485
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4486
{
T
Tejun Heo 已提交
4487
	struct kernfs_open_file *of = s->private;
4488
	struct cgroup_pidlist *l = of->priv;
4489 4490
	pid_t *p = v;
	pid_t *end = l->list + l->length;
4491 4492 4493 4494 4495 4496 4497 4498
	/*
	 * Advance to the next pid in the array. If this goes off the
	 * end, we're done
	 */
	p++;
	if (p >= end) {
		return NULL;
	} else {
4499
		*pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
4500 4501 4502 4503
		return p;
	}
}

4504
static int cgroup_pidlist_show(struct seq_file *s, void *v)
4505
{
4506 4507 4508
	seq_printf(s, "%d\n", *(int *)v);

	return 0;
4509
}
4510

4511 4512
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
4513
{
4514
	return notify_on_release(css->cgroup);
4515 4516
}

4517 4518
static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
4519 4520
{
	if (val)
4521
		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4522
	else
4523
		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4524 4525 4526
	return 0;
}

4527 4528
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
4529
{
4530
	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4531 4532
}

4533 4534
static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
4535 4536
{
	if (val)
4537
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4538
	else
4539
		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4540 4541 4542
	return 0;
}

4543 4544
/* cgroup core interface files for the default hierarchy */
static struct cftype cgroup_dfl_base_files[] = {
4545
	{
4546
		.name = "cgroup.procs",
4547
		.file_offset = offsetof(struct cgroup, procs_file),
4548 4549 4550 4551
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4552
		.private = CGROUP_FILE_PROCS,
4553
		.write = cgroup_procs_write,
4554
	},
4555 4556
	{
		.name = "cgroup.controllers",
4557
		.flags = CFTYPE_ONLY_ON_ROOT,
4558 4559 4560 4561
		.seq_show = cgroup_root_controllers_show,
	},
	{
		.name = "cgroup.controllers",
4562
		.flags = CFTYPE_NOT_ON_ROOT,
4563 4564 4565 4566 4567
		.seq_show = cgroup_controllers_show,
	},
	{
		.name = "cgroup.subtree_control",
		.seq_show = cgroup_subtree_control_show,
4568
		.write = cgroup_subtree_control_write,
4569
	},
4570
	{
4571
		.name = "cgroup.events",
4572
		.flags = CFTYPE_NOT_ON_ROOT,
4573
		.file_offset = offsetof(struct cgroup, events_file),
4574
		.seq_show = cgroup_events_show,
4575
	},
4576 4577
	{ }	/* terminate */
};
4578

4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599
/* cgroup core interface files for the legacy hierarchies */
static struct cftype cgroup_legacy_base_files[] = {
	{
		.name = "cgroup.procs",
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
		.private = CGROUP_FILE_PROCS,
		.write = cgroup_procs_write,
	},
	{
		.name = "cgroup.clone_children",
		.read_u64 = cgroup_clone_children_read,
		.write_u64 = cgroup_clone_children_write,
	},
	{
		.name = "cgroup.sane_behavior",
		.flags = CFTYPE_ONLY_ON_ROOT,
		.seq_show = cgroup_sane_behavior_show,
	},
4600 4601
	{
		.name = "tasks",
4602 4603 4604 4605
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4606
		.private = CGROUP_FILE_TASKS,
4607
		.write = cgroup_tasks_write,
4608 4609 4610 4611 4612 4613
	},
	{
		.name = "notify_on_release",
		.read_u64 = cgroup_read_notify_on_release,
		.write_u64 = cgroup_write_notify_on_release,
	},
4614 4615
	{
		.name = "release_agent",
4616
		.flags = CFTYPE_ONLY_ON_ROOT,
4617
		.seq_show = cgroup_release_agent_show,
4618
		.write = cgroup_release_agent_write,
4619
		.max_write_len = PATH_MAX - 1,
4620
	},
T
Tejun Heo 已提交
4621
	{ }	/* terminate */
4622 4623
};

4624 4625 4626 4627 4628 4629 4630
/*
 * css destruction is four-stage process.
 *
 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
 *    Implemented in kill_css().
 *
 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
4631 4632 4633
 *    and thus css_tryget_online() is guaranteed to fail, the css can be
 *    offlined by invoking offline_css().  After offlining, the base ref is
 *    put.  Implemented in css_killed_work_fn().
4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645
 *
 * 3. When the percpu_ref reaches zero, the only possible remaining
 *    accessors are inside RCU read sections.  css_release() schedules the
 *    RCU callback.
 *
 * 4. After the grace period, the css can be freed.  Implemented in
 *    css_free_work_fn().
 *
 * It is actually hairier because both step 2 and 4 require process context
 * and thus involve punting to css->destroy_work adding two additional
 * steps to the already complex sequence.
 */
4646
static void css_free_work_fn(struct work_struct *work)
4647 4648
{
	struct cgroup_subsys_state *css =
4649
		container_of(work, struct cgroup_subsys_state, destroy_work);
4650
	struct cgroup_subsys *ss = css->ss;
4651
	struct cgroup *cgrp = css->cgroup;
4652

4653 4654
	percpu_ref_exit(&css->refcnt);

4655
	if (ss) {
4656
		/* css free path */
4657
		struct cgroup_subsys_state *parent = css->parent;
4658 4659 4660 4661
		int id = css->id;

		ss->css_free(css);
		cgroup_idr_remove(&ss->css_idr, id);
4662
		cgroup_put(cgrp);
4663 4664 4665

		if (parent)
			css_put(parent);
4666 4667 4668 4669
	} else {
		/* cgroup free path */
		atomic_dec(&cgrp->root->nr_cgrps);
		cgroup_pidlist_destroy_all(cgrp);
4670
		cancel_work_sync(&cgrp->release_agent_work);
4671

T
Tejun Heo 已提交
4672
		if (cgroup_parent(cgrp)) {
4673 4674 4675 4676 4677 4678
			/*
			 * We get a ref to the parent, and put the ref when
			 * this cgroup is being freed, so it's guaranteed
			 * that the parent won't be destroyed before its
			 * children.
			 */
T
Tejun Heo 已提交
4679
			cgroup_put(cgroup_parent(cgrp));
4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690
			kernfs_put(cgrp->kn);
			kfree(cgrp);
		} else {
			/*
			 * This is root cgroup's refcnt reaching zero,
			 * which indicates that the root should be
			 * released.
			 */
			cgroup_destroy_root(cgrp->root);
		}
	}
4691 4692
}

4693
static void css_free_rcu_fn(struct rcu_head *rcu_head)
4694 4695
{
	struct cgroup_subsys_state *css =
4696
		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
4697

4698
	INIT_WORK(&css->destroy_work, css_free_work_fn);
4699
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4700 4701
}

4702
static void css_release_work_fn(struct work_struct *work)
4703 4704
{
	struct cgroup_subsys_state *css =
4705
		container_of(work, struct cgroup_subsys_state, destroy_work);
4706
	struct cgroup_subsys *ss = css->ss;
4707
	struct cgroup *cgrp = css->cgroup;
4708

4709 4710
	mutex_lock(&cgroup_mutex);

4711
	css->flags |= CSS_RELEASED;
4712 4713
	list_del_rcu(&css->sibling);

4714 4715
	if (ss) {
		/* css release path */
4716
		cgroup_idr_replace(&ss->css_idr, NULL, css->id);
4717 4718
		if (ss->css_released)
			ss->css_released(css);
4719 4720 4721 4722
	} else {
		/* cgroup release path */
		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
		cgrp->id = -1;
4723 4724 4725 4726 4727 4728 4729 4730 4731

		/*
		 * There are two control paths which try to determine
		 * cgroup from dentry without going through kernfs -
		 * cgroupstats_build() and css_tryget_online_from_dir().
		 * Those are supported by RCU protecting clearing of
		 * cgrp->kn->priv backpointer.
		 */
		RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
4732
	}
4733

4734 4735
	mutex_unlock(&cgroup_mutex);

4736
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4737 4738 4739 4740 4741 4742 4743
}

static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4744 4745
	INIT_WORK(&css->destroy_work, css_release_work_fn);
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4746 4747
}

4748 4749
static void init_and_link_css(struct cgroup_subsys_state *css,
			      struct cgroup_subsys *ss, struct cgroup *cgrp)
4750
{
4751 4752
	lockdep_assert_held(&cgroup_mutex);

4753 4754
	cgroup_get(cgrp);

4755
	memset(css, 0, sizeof(*css));
4756
	css->cgroup = cgrp;
4757
	css->ss = ss;
4758 4759
	INIT_LIST_HEAD(&css->sibling);
	INIT_LIST_HEAD(&css->children);
4760
	css->serial_nr = css_serial_nr_next++;
4761
	atomic_set(&css->online_cnt, 0);
4762

T
Tejun Heo 已提交
4763 4764
	if (cgroup_parent(cgrp)) {
		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
4765 4766
		css_get(css->parent);
	}
4767

4768
	BUG_ON(cgroup_css(cgrp, ss));
4769 4770
}

4771
/* invoke ->css_online() on a new CSS and mark it online if successful */
4772
static int online_css(struct cgroup_subsys_state *css)
4773
{
4774
	struct cgroup_subsys *ss = css->ss;
T
Tejun Heo 已提交
4775 4776
	int ret = 0;

4777 4778
	lockdep_assert_held(&cgroup_mutex);

4779
	if (ss->css_online)
4780
		ret = ss->css_online(css);
4781
	if (!ret) {
4782
		css->flags |= CSS_ONLINE;
4783
		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4784 4785 4786 4787

		atomic_inc(&css->online_cnt);
		if (css->parent)
			atomic_inc(&css->parent->online_cnt);
4788
	}
T
Tejun Heo 已提交
4789
	return ret;
4790 4791
}

4792
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4793
static void offline_css(struct cgroup_subsys_state *css)
4794
{
4795
	struct cgroup_subsys *ss = css->ss;
4796 4797 4798 4799 4800 4801

	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

4802
	if (ss->css_offline)
4803
		ss->css_offline(css);
4804

4805
	css->flags &= ~CSS_ONLINE;
4806
	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4807 4808

	wake_up_all(&css->cgroup->offline_waitq);
4809 4810
}

4811 4812 4813 4814
/**
 * create_css - create a cgroup_subsys_state
 * @cgrp: the cgroup new css will be associated with
 * @ss: the subsys of new css
4815
 * @visible: whether to create control knobs for the new css or not
4816 4817
 *
 * Create a new css associated with @cgrp - @ss pair.  On success, the new
4818 4819
 * css is online and installed in @cgrp with all interface files created if
 * @visible.  Returns 0 on success, -errno on failure.
4820
 */
4821 4822
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible)
4823
{
T
Tejun Heo 已提交
4824
	struct cgroup *parent = cgroup_parent(cgrp);
4825
	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
4826 4827 4828 4829 4830
	struct cgroup_subsys_state *css;
	int err;

	lockdep_assert_held(&cgroup_mutex);

4831
	css = ss->css_alloc(parent_css);
4832 4833 4834
	if (IS_ERR(css))
		return PTR_ERR(css);

4835
	init_and_link_css(css, ss, cgrp);
4836

4837
	err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
4838
	if (err)
4839
		goto err_free_css;
4840

V
Vladimir Davydov 已提交
4841
	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
4842 4843 4844
	if (err < 0)
		goto err_free_percpu_ref;
	css->id = err;
4845

4846
	if (visible) {
4847
		err = css_populate_dir(css, NULL);
4848 4849 4850
		if (err)
			goto err_free_id;
	}
4851 4852

	/* @css is ready to be brought online now, make it visible */
4853
	list_add_tail_rcu(&css->sibling, &parent_css->children);
4854
	cgroup_idr_replace(&ss->css_idr, css, css->id);
4855 4856 4857

	err = online_css(css);
	if (err)
4858
		goto err_list_del;
4859

4860
	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
T
Tejun Heo 已提交
4861
	    cgroup_parent(parent)) {
4862
		pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4863
			current->comm, current->pid, ss->name);
4864
		if (!strcmp(ss->name, "memory"))
4865
			pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4866 4867 4868 4869 4870
		ss->warned_broken_hierarchy = true;
	}

	return 0;

4871 4872
err_list_del:
	list_del_rcu(&css->sibling);
4873
	css_clear_dir(css, NULL);
4874 4875
err_free_id:
	cgroup_idr_remove(&ss->css_idr, css->id);
4876
err_free_percpu_ref:
4877
	percpu_ref_exit(&css->refcnt);
4878
err_free_css:
4879
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4880 4881 4882
	return err;
}

4883 4884
static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
			umode_t mode)
4885
{
4886
	struct cgroup *parent, *cgrp, *tcgrp;
4887
	struct cgroup_root *root;
4888
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
4889
	struct kernfs_node *kn;
4890
	int level, ssid, ret;
4891

4892 4893 4894 4895 4896
	/* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
	 */
	if (strchr(name, '\n'))
		return -EINVAL;

4897 4898 4899 4900
	parent = cgroup_kn_lock_live(parent_kn);
	if (!parent)
		return -ENODEV;
	root = parent->root;
4901
	level = parent->level + 1;
4902

T
Tejun Heo 已提交
4903
	/* allocate the cgroup and its ID, 0 is reserved for the root */
4904 4905
	cgrp = kzalloc(sizeof(*cgrp) +
		       sizeof(cgrp->ancestor_ids[0]) * (level + 1), GFP_KERNEL);
T
Tejun Heo 已提交
4906 4907 4908
	if (!cgrp) {
		ret = -ENOMEM;
		goto out_unlock;
4909 4910
	}

4911
	ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
4912 4913 4914
	if (ret)
		goto out_free_cgrp;

4915 4916 4917 4918
	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
V
Vladimir Davydov 已提交
4919
	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
4920
	if (cgrp->id < 0) {
T
Tejun Heo 已提交
4921
		ret = -ENOMEM;
4922
		goto out_cancel_ref;
4923 4924
	}

4925
	init_cgroup_housekeeping(cgrp);
4926

4927
	cgrp->self.parent = &parent->self;
T
Tejun Heo 已提交
4928
	cgrp->root = root;
4929 4930 4931 4932
	cgrp->level = level;

	for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp))
		cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
4933

4934 4935 4936
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

4937 4938
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4939

T
Tejun Heo 已提交
4940
	/* create the directory */
T
Tejun Heo 已提交
4941
	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
T
Tejun Heo 已提交
4942
	if (IS_ERR(kn)) {
T
Tejun Heo 已提交
4943 4944
		ret = PTR_ERR(kn);
		goto out_free_id;
T
Tejun Heo 已提交
4945 4946
	}
	cgrp->kn = kn;
4947

4948
	/*
4949 4950
	 * This extra ref will be put in cgroup_free_fn() and guarantees
	 * that @cgrp->kn is always accessible.
4951
	 */
4952
	kernfs_get(kn);
4953

4954
	cgrp->self.serial_nr = css_serial_nr_next++;
4955

4956
	/* allocation complete, commit to creation */
4957
	list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
4958
	atomic_inc(&root->nr_cgrps);
4959
	cgroup_get(parent);
4960

4961 4962 4963 4964
	/*
	 * @cgrp is now fully operational.  If something fails after this
	 * point, it'll be released via the normal destruction path.
	 */
4965
	cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4966

T
Tejun Heo 已提交
4967 4968 4969
	ret = cgroup_kn_set_ugid(kn);
	if (ret)
		goto out_destroy;
4970

4971
	ret = css_populate_dir(&cgrp->self, NULL);
T
Tejun Heo 已提交
4972 4973
	if (ret)
		goto out_destroy;
4974

4975
	/* let's create and online css's */
T
Tejun Heo 已提交
4976
	for_each_subsys(ss, ssid) {
4977
		if (parent->subtree_ss_mask & (1 << ssid)) {
4978 4979
			ret = create_css(cgrp, ss,
					 parent->subtree_control & (1 << ssid));
T
Tejun Heo 已提交
4980 4981
			if (ret)
				goto out_destroy;
T
Tejun Heo 已提交
4982
		}
4983
	}
4984

4985 4986
	/*
	 * On the default hierarchy, a child doesn't automatically inherit
4987
	 * subtree_control from the parent.  Each is configured manually.
4988
	 */
4989 4990
	if (!cgroup_on_dfl(cgrp)) {
		cgrp->subtree_control = parent->subtree_control;
4991
		cgroup_refresh_subtree_ss_mask(cgrp);
4992
	}
T
Tejun Heo 已提交
4993 4994

	kernfs_activate(kn);
4995

T
Tejun Heo 已提交
4996 4997
	ret = 0;
	goto out_unlock;
4998

T
Tejun Heo 已提交
4999
out_free_id:
5000
	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
5001
out_cancel_ref:
5002
	percpu_ref_exit(&cgrp->self.refcnt);
T
Tejun Heo 已提交
5003
out_free_cgrp:
5004
	kfree(cgrp);
T
Tejun Heo 已提交
5005
out_unlock:
5006
	cgroup_kn_unlock(parent_kn);
T
Tejun Heo 已提交
5007
	return ret;
5008

T
Tejun Heo 已提交
5009
out_destroy:
5010
	cgroup_destroy_locked(cgrp);
T
Tejun Heo 已提交
5011
	goto out_unlock;
5012 5013
}

5014 5015
/*
 * This is called when the refcnt of a css is confirmed to be killed.
5016 5017
 * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
 * initate destruction and put the css ref from kill_css().
5018 5019
 */
static void css_killed_work_fn(struct work_struct *work)
5020
{
5021 5022
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
5023

5024
	mutex_lock(&cgroup_mutex);
5025

5026 5027 5028 5029 5030 5031 5032 5033
	do {
		offline_css(css);
		css_put(css);
		/* @css can't go away while we're holding cgroup_mutex */
		css = css->parent;
	} while (css && atomic_dec_and_test(&css->online_cnt));

	mutex_unlock(&cgroup_mutex);
5034 5035
}

5036 5037
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
5038 5039 5040 5041
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

5042 5043 5044 5045
	if (atomic_dec_and_test(&css->online_cnt)) {
		INIT_WORK(&css->destroy_work, css_killed_work_fn);
		queue_work(cgroup_destroy_wq, &css->destroy_work);
	}
5046 5047
}

5048 5049 5050 5051 5052 5053
/**
 * kill_css - destroy a css
 * @css: css to destroy
 *
 * This function initiates destruction of @css by removing cgroup interface
 * files and putting its base reference.  ->css_offline() will be invoked
5054 5055
 * asynchronously once css_tryget_online() is guaranteed to fail and when
 * the reference count reaches zero, @css will be released.
5056 5057
 */
static void kill_css(struct cgroup_subsys_state *css)
T
Tejun Heo 已提交
5058
{
5059
	lockdep_assert_held(&cgroup_mutex);
5060

T
Tejun Heo 已提交
5061 5062 5063 5064
	/*
	 * This must happen before css is disassociated with its cgroup.
	 * See seq_css() for details.
	 */
5065
	css_clear_dir(css, NULL);
5066

T
Tejun Heo 已提交
5067 5068 5069 5070 5071 5072 5073 5074 5075
	/*
	 * Killing would put the base ref, but we need to keep it alive
	 * until after ->css_offline().
	 */
	css_get(css);

	/*
	 * cgroup core guarantees that, by the time ->css_offline() is
	 * invoked, no new css reference will be given out via
5076
	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
T
Tejun Heo 已提交
5077 5078 5079 5080 5081 5082 5083
	 * proceed to offlining css's because percpu_ref_kill() doesn't
	 * guarantee that the ref is seen as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.
	 */
	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
5084 5085 5086 5087 5088 5089 5090 5091
}

/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
5092 5093 5094
 * guarantee that css_tryget_online() won't succeed by the time
 * ->css_offline() is invoked.  To satisfy all the requirements,
 * destruction is implemented in the following two steps.
5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
5110 5111
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
5112
{
T
Tejun Heo 已提交
5113
	struct cgroup_subsys_state *css;
T
Tejun Heo 已提交
5114
	int ssid;
5115

5116 5117
	lockdep_assert_held(&cgroup_mutex);

5118 5119 5120 5121 5122
	/*
	 * Only migration can raise populated from zero and we're already
	 * holding cgroup_mutex.
	 */
	if (cgroup_is_populated(cgrp))
5123
		return -EBUSY;
L
Li Zefan 已提交
5124

5125
	/*
5126 5127 5128
	 * Make sure there's no live children.  We can't test emptiness of
	 * ->self.children as dead children linger on it while being
	 * drained; otherwise, "rmdir parent/child parent" may fail.
5129
	 */
5130
	if (css_has_online_children(&cgrp->self))
5131 5132
		return -EBUSY;

5133 5134
	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
5135
	 * creation by disabling cgroup_lock_live_group().
5136
	 */
5137
	cgrp->self.flags &= ~CSS_ONLINE;
5138

5139
	/* initiate massacre of all css's */
T
Tejun Heo 已提交
5140 5141
	for_each_css(css, ssid, cgrp)
		kill_css(css);
5142 5143

	/*
5144 5145
	 * Remove @cgrp directory along with the base files.  @cgrp has an
	 * extra ref on its kn.
5146
	 */
5147
	kernfs_remove(cgrp->kn);
5148

T
Tejun Heo 已提交
5149
	check_for_release(cgroup_parent(cgrp));
T
Tejun Heo 已提交
5150

5151
	/* put the base reference */
5152
	percpu_ref_kill(&cgrp->self.refcnt);
5153

5154 5155 5156
	return 0;
};

T
Tejun Heo 已提交
5157
static int cgroup_rmdir(struct kernfs_node *kn)
5158
{
5159
	struct cgroup *cgrp;
T
Tejun Heo 已提交
5160
	int ret = 0;
5161

5162 5163 5164
	cgrp = cgroup_kn_lock_live(kn);
	if (!cgrp)
		return 0;
5165

5166
	ret = cgroup_destroy_locked(cgrp);
5167

5168
	cgroup_kn_unlock(kn);
5169
	return ret;
5170 5171
}

T
Tejun Heo 已提交
5172 5173 5174 5175 5176 5177 5178 5179
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
	.remount_fs		= cgroup_remount,
	.show_options		= cgroup_show_options,
	.mkdir			= cgroup_mkdir,
	.rmdir			= cgroup_rmdir,
	.rename			= cgroup_rename,
};

5180
static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
5181 5182
{
	struct cgroup_subsys_state *css;
D
Diego Calleja 已提交
5183

5184
	pr_debug("Initializing cgroup subsys %s\n", ss->name);
5185

5186 5187
	mutex_lock(&cgroup_mutex);

5188
	idr_init(&ss->css_idr);
T
Tejun Heo 已提交
5189
	INIT_LIST_HEAD(&ss->cfts);
5190

5191 5192 5193
	/* Create the root cgroup state for this subsystem */
	ss->root = &cgrp_dfl_root;
	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
5194 5195
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
5196
	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
5197 5198 5199 5200 5201 5202 5203

	/*
	 * Root csses are never destroyed and we can't initialize
	 * percpu_ref during early init.  Disable refcnting.
	 */
	css->flags |= CSS_NO_REF;

5204
	if (early) {
5205
		/* allocation can't be done safely during early init */
5206 5207 5208 5209 5210
		css->id = 1;
	} else {
		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
		BUG_ON(css->id < 0);
	}
5211

L
Li Zefan 已提交
5212
	/* Update the init_css_set to contain a subsys
5213
	 * pointer to this state - since the subsystem is
L
Li Zefan 已提交
5214
	 * newly registered, all tasks and hence the
5215
	 * init_css_set is in the subsystem's root cgroup. */
5216
	init_css_set.subsys[ss->id] = css;
5217

5218 5219
	have_fork_callback |= (bool)ss->fork << ss->id;
	have_exit_callback |= (bool)ss->exit << ss->id;
5220
	have_free_callback |= (bool)ss->free << ss->id;
5221
	have_canfork_callback |= (bool)ss->can_fork << ss->id;
5222

L
Li Zefan 已提交
5223 5224 5225 5226 5227
	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

5228
	BUG_ON(online_css(css));
5229

B
Ben Blum 已提交
5230 5231 5232
	mutex_unlock(&cgroup_mutex);
}

5233
/**
L
Li Zefan 已提交
5234 5235 5236 5237
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
5238 5239 5240
 */
int __init cgroup_init_early(void)
{
5241
	static struct cgroup_sb_opts __initdata opts;
5242
	struct cgroup_subsys *ss;
5243
	int i;
5244

5245
	init_cgroup_root(&cgrp_dfl_root, &opts);
5246 5247
	cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;

5248
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
5249

T
Tejun Heo 已提交
5250
	for_each_subsys(ss, i) {
5251
		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
5252 5253
		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
5254
		     ss->id, ss->name);
5255 5256 5257
		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);

5258
		ss->id = i;
5259
		ss->name = cgroup_subsys_name[i];
5260 5261
		if (!ss->legacy_name)
			ss->legacy_name = cgroup_subsys_name[i];
5262 5263

		if (ss->early_init)
5264
			cgroup_init_subsys(ss, true);
5265 5266 5267 5268
	}
	return 0;
}

5269 5270
static unsigned long cgroup_disable_mask __initdata;

5271
/**
L
Li Zefan 已提交
5272 5273 5274 5275
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
5276 5277 5278
 */
int __init cgroup_init(void)
{
5279
	struct cgroup_subsys *ss;
5280
	unsigned long key;
5281
	int ssid;
5282

5283
	BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
5284 5285
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
5286

T
Tejun Heo 已提交
5287 5288
	mutex_lock(&cgroup_mutex);

5289 5290 5291 5292
	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

5293
	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
5294

T
Tejun Heo 已提交
5295 5296
	mutex_unlock(&cgroup_mutex);

5297
	for_each_subsys(ss, ssid) {
5298 5299 5300 5301 5302 5303 5304 5305 5306 5307
		if (ss->early_init) {
			struct cgroup_subsys_state *css =
				init_css_set.subsys[ss->id];

			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
						   GFP_KERNEL);
			BUG_ON(css->id < 0);
		} else {
			cgroup_init_subsys(ss, false);
		}
5308

T
Tejun Heo 已提交
5309 5310
		list_add_tail(&init_css_set.e_cset_node[ssid],
			      &cgrp_dfl_root.cgrp.e_csets[ssid]);
5311 5312

		/*
5313 5314 5315
		 * Setting dfl_root subsys_mask needs to consider the
		 * disabled flag and cftype registration needs kmalloc,
		 * both of which aren't available during early_init.
5316
		 */
5317 5318 5319 5320
		if (cgroup_disable_mask & (1 << ssid)) {
			static_branch_disable(cgroup_subsys_enabled_key[ssid]);
			printk(KERN_INFO "Disabling %s control group subsystem\n",
			       ss->name);
5321
			continue;
5322
		}
5323

5324 5325 5326 5327
		if (cgroup_ssid_no_v1(ssid))
			printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
			       ss->name);

5328 5329
		cgrp_dfl_root.subsys_mask |= 1 << ss->id;

5330 5331 5332
		if (!ss->dfl_cftypes)
			cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;

5333 5334 5335 5336 5337
		if (ss->dfl_cftypes == ss->legacy_cftypes) {
			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
		} else {
			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
5338
		}
5339 5340 5341

		if (ss->bind)
			ss->bind(init_css_set.subsys[ssid]);
5342 5343
	}

5344 5345
	WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
	WARN_ON(register_filesystem(&cgroup_fs_type));
5346
	WARN_ON(register_filesystem(&cgroup2_fs_type));
5347
	WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
5348

T
Tejun Heo 已提交
5349
	return 0;
5350
}
5351

5352 5353 5354 5355 5356
static int __init cgroup_wq_init(void)
{
	/*
	 * There isn't much point in executing destruction path in
	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
5357
	 * Use 1 for @max_active.
5358 5359 5360 5361
	 *
	 * We would prefer to do this in cgroup_init() above, but that
	 * is called before init_workqueues(): so leave this until after.
	 */
5362
	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5363
	BUG_ON(!cgroup_destroy_wq);
5364 5365 5366 5367 5368 5369 5370 5371 5372

	/*
	 * Used to destroy pidlists and separate to serve as flush domain.
	 * Cap @max_active to 1 too.
	 */
	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
						    0, 1);
	BUG_ON(!cgroup_pidlist_destroy_wq);

5373 5374 5375 5376
	return 0;
}
core_initcall(cgroup_wq_init);

5377 5378 5379 5380 5381
/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 */
Z
Zefan Li 已提交
5382 5383
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
		     struct pid *pid, struct task_struct *tsk)
5384
{
T
Tejun Heo 已提交
5385
	char *buf, *path;
5386
	int retval;
5387
	struct cgroup_root *root;
5388 5389

	retval = -ENOMEM;
T
Tejun Heo 已提交
5390
	buf = kmalloc(PATH_MAX, GFP_KERNEL);
5391 5392 5393 5394
	if (!buf)
		goto out;

	mutex_lock(&cgroup_mutex);
5395
	spin_lock_bh(&css_set_lock);
5396

5397
	for_each_root(root) {
5398
		struct cgroup_subsys *ss;
5399
		struct cgroup *cgrp;
T
Tejun Heo 已提交
5400
		int ssid, count = 0;
5401

T
Tejun Heo 已提交
5402
		if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
5403 5404
			continue;

5405
		seq_printf(m, "%d:", root->hierarchy_id);
5406 5407 5408 5409
		if (root != &cgrp_dfl_root)
			for_each_subsys(ss, ssid)
				if (root->subsys_mask & (1 << ssid))
					seq_printf(m, "%s%s", count++ ? "," : "",
5410
						   ss->legacy_name);
5411 5412 5413
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
5414
		seq_putc(m, ':');
5415

5416
		cgrp = task_cgroup_from_root(tsk, root);
5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434

		/*
		 * On traditional hierarchies, all zombie tasks show up as
		 * belonging to the root cgroup.  On the default hierarchy,
		 * while a zombie doesn't show up in "cgroup.procs" and
		 * thus can't be migrated, its /proc/PID/cgroup keeps
		 * reporting the cgroup it belonged to before exiting.  If
		 * the cgroup is removed before the zombie is reaped,
		 * " (deleted)" is appended to the cgroup path.
		 */
		if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
			path = cgroup_path(cgrp, buf, PATH_MAX);
			if (!path) {
				retval = -ENAMETOOLONG;
				goto out_unlock;
			}
		} else {
			path = "/";
T
Tejun Heo 已提交
5435
		}
5436

T
Tejun Heo 已提交
5437
		seq_puts(m, path);
5438 5439 5440 5441 5442

		if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
			seq_puts(m, " (deleted)\n");
		else
			seq_putc(m, '\n');
5443 5444
	}

Z
Zefan Li 已提交
5445
	retval = 0;
5446
out_unlock:
5447
	spin_unlock_bh(&css_set_lock);
5448 5449 5450 5451 5452 5453 5454 5455 5456
	mutex_unlock(&cgroup_mutex);
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
5457
	struct cgroup_subsys *ss;
5458 5459
	int i;

5460
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
B
Ben Blum 已提交
5461 5462 5463 5464 5465
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
5466
	mutex_lock(&cgroup_mutex);
5467 5468

	for_each_subsys(ss, i)
5469
		seq_printf(m, "%s\t%d\t%d\t%d\n",
5470
			   ss->legacy_name, ss->root->hierarchy_id,
5471 5472
			   atomic_read(&ss->root->nr_cgrps),
			   cgroup_ssid_enabled(i));
5473

5474 5475 5476 5477 5478 5479
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
5480
	return single_open(file, proc_cgroupstats_show, NULL);
5481 5482
}

5483
static const struct file_operations proc_cgroupstats_operations = {
5484 5485 5486 5487 5488 5489
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

5490
/**
5491
 * cgroup_fork - initialize cgroup related fields during copy_process()
L
Li Zefan 已提交
5492
 * @child: pointer to task_struct of forking parent process.
5493
 *
5494 5495 5496
 * A task is associated with the init_css_set until cgroup_post_fork()
 * attaches it to the parent's css_set.  Empty cg_list indicates that
 * @child isn't holding reference to its css_set.
5497 5498 5499
 */
void cgroup_fork(struct task_struct *child)
{
5500
	RCU_INIT_POINTER(child->cgroups, &init_css_set);
5501
	INIT_LIST_HEAD(&child->cg_list);
5502 5503
}

5504 5505 5506 5507 5508 5509 5510 5511
/**
 * cgroup_can_fork - called on a new task before the process is exposed
 * @child: the task in question.
 *
 * This calls the subsystem can_fork() callbacks. If the can_fork() callback
 * returns an error, the fork aborts with that error code. This allows for
 * a cgroup subsystem to conditionally allow or deny new forks.
 */
5512
int cgroup_can_fork(struct task_struct *child)
5513 5514 5515 5516
{
	struct cgroup_subsys *ss;
	int i, j, ret;

5517
	do_each_subsys_mask(ss, i, have_canfork_callback) {
5518
		ret = ss->can_fork(child);
5519 5520
		if (ret)
			goto out_revert;
5521
	} while_each_subsys_mask();
5522 5523 5524 5525 5526 5527 5528 5529

	return 0;

out_revert:
	for_each_subsys(ss, j) {
		if (j >= i)
			break;
		if (ss->cancel_fork)
5530
			ss->cancel_fork(child);
5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542
	}

	return ret;
}

/**
 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
 * @child: the task in question
 *
 * This calls the cancel_fork() callbacks if a fork failed *after*
 * cgroup_can_fork() succeded.
 */
5543
void cgroup_cancel_fork(struct task_struct *child)
5544 5545 5546 5547 5548 5549
{
	struct cgroup_subsys *ss;
	int i;

	for_each_subsys(ss, i)
		if (ss->cancel_fork)
5550
			ss->cancel_fork(child);
5551 5552
}

5553
/**
L
Li Zefan 已提交
5554 5555 5556
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
5557 5558 5559
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
5560
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5561
 * list.
L
Li Zefan 已提交
5562
 */
5563
void cgroup_post_fork(struct task_struct *child)
5564
{
5565
	struct cgroup_subsys *ss;
5566 5567
	int i;

5568
	/*
D
Dongsheng Yang 已提交
5569
	 * This may race against cgroup_enable_task_cg_lists().  As that
5570 5571 5572 5573 5574 5575 5576
	 * function sets use_task_css_set_links before grabbing
	 * tasklist_lock and we just went through tasklist_lock to add
	 * @child, it's guaranteed that either we see the set
	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
	 * @child during its iteration.
	 *
	 * If we won the race, @child is associated with %current's
5577
	 * css_set.  Grabbing css_set_lock guarantees both that the
5578 5579 5580 5581 5582 5583
	 * association is stable, and, on completion of the parent's
	 * migration, @child is visible in the source of migration or
	 * already in the destination cgroup.  This guarantee is necessary
	 * when implementing operations which need to migrate all tasks of
	 * a cgroup to another.
	 *
D
Dongsheng Yang 已提交
5584
	 * Note that if we lose to cgroup_enable_task_cg_lists(), @child
5585 5586 5587
	 * will remain in init_css_set.  This is safe because all tasks are
	 * in the init_css_set before cg_links is enabled and there's no
	 * operation which transfers all tasks out of init_css_set.
5588
	 */
5589
	if (use_task_css_set_links) {
5590 5591
		struct css_set *cset;

5592
		spin_lock_bh(&css_set_lock);
5593
		cset = task_css_set(current);
5594 5595
		if (list_empty(&child->cg_list)) {
			get_css_set(cset);
T
Tejun Heo 已提交
5596
			css_set_move_task(child, NULL, cset, false);
5597
		}
5598
		spin_unlock_bh(&css_set_lock);
5599
	}
5600 5601 5602 5603 5604 5605

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
5606
	do_each_subsys_mask(ss, i, have_fork_callback) {
5607
		ss->fork(child);
5608
	} while_each_subsys_mask();
5609
}
5610

5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
5623 5624 5625 5626 5627
 * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
 * call cgroup_exit() while the task is still competent to handle
 * notify_on_release(), then leave the task attached to the root cgroup in
 * each hierarchy for the remainder of its exit.  No need to bother with
 * init_css_set refcnting.  init_css_set never goes away and we can't race
5628
 * with migration path - PF_EXITING is visible to migration path.
5629
 */
5630
void cgroup_exit(struct task_struct *tsk)
5631
{
5632
	struct cgroup_subsys *ss;
5633
	struct css_set *cset;
5634
	int i;
5635 5636

	/*
5637
	 * Unlink from @tsk from its css_set.  As migration path can't race
5638
	 * with us, we can check css_set and cg_list without synchronization.
5639
	 */
5640 5641
	cset = task_css_set(tsk);

5642
	if (!list_empty(&tsk->cg_list)) {
5643
		spin_lock_bh(&css_set_lock);
T
Tejun Heo 已提交
5644
		css_set_move_task(tsk, cset, NULL, false);
5645
		spin_unlock_bh(&css_set_lock);
5646 5647
	} else {
		get_css_set(cset);
5648 5649
	}

5650
	/* see cgroup_post_fork() for details */
5651
	do_each_subsys_mask(ss, i, have_exit_callback) {
5652
		ss->exit(tsk);
5653
	} while_each_subsys_mask();
5654
}
5655

5656 5657 5658
void cgroup_free(struct task_struct *task)
{
	struct css_set *cset = task_css_set(task);
5659 5660 5661
	struct cgroup_subsys *ss;
	int ssid;

5662
	do_each_subsys_mask(ss, ssid, have_free_callback) {
5663
		ss->free(task);
5664
	} while_each_subsys_mask();
5665

5666
	put_css_set(cset);
5667
}
5668

5669
static void check_for_release(struct cgroup *cgrp)
5670
{
5671
	if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
5672 5673
	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
		schedule_work(&cgrp->release_agent_work);
5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
5701 5702 5703 5704 5705
	struct cgroup *cgrp =
		container_of(work, struct cgroup, release_agent_work);
	char *pathbuf = NULL, *agentbuf = NULL, *path;
	char *argv[3], *envp[3];

5706
	mutex_lock(&cgroup_mutex);
5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725

	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
	agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
	if (!pathbuf || !agentbuf)
		goto out;

	path = cgroup_path(cgrp, pathbuf, PATH_MAX);
	if (!path)
		goto out;

	argv[0] = agentbuf;
	argv[1] = path;
	argv[2] = NULL;

	/* minimal command environment */
	envp[0] = "HOME=/";
	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
	envp[2] = NULL;

5726
	mutex_unlock(&cgroup_mutex);
5727
	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
5728
	goto out_free;
5729
out:
5730
	mutex_unlock(&cgroup_mutex);
5731
out_free:
5732 5733
	kfree(agentbuf);
	kfree(pathbuf);
5734
}
5735 5736 5737

static int __init cgroup_disable(char *str)
{
5738
	struct cgroup_subsys *ss;
5739
	char *token;
5740
	int i;
5741 5742 5743 5744

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
5745

T
Tejun Heo 已提交
5746
		for_each_subsys(ss, i) {
5747 5748 5749
			if (strcmp(token, ss->name) &&
			    strcmp(token, ss->legacy_name))
				continue;
5750
			cgroup_disable_mask |= 1 << i;
5751 5752 5753 5754 5755
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
K
KAMEZAWA Hiroyuki 已提交
5756

5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783
static int __init cgroup_no_v1(char *str)
{
	struct cgroup_subsys *ss;
	char *token;
	int i;

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;

		if (!strcmp(token, "all")) {
			cgroup_no_v1_mask = ~0UL;
			break;
		}

		for_each_subsys(ss, i) {
			if (strcmp(token, ss->name) &&
			    strcmp(token, ss->legacy_name))
				continue;

			cgroup_no_v1_mask |= 1 << i;
		}
	}
	return 1;
}
__setup("cgroup_no_v1=", cgroup_no_v1);

5784
/**
5785
 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
5786 5787
 * @dentry: directory dentry of interest
 * @ss: subsystem of interest
5788
 *
5789 5790 5791
 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
 * to get the corresponding css and return it.  If such css doesn't exist
 * or can't be pinned, an ERR_PTR value is returned.
S
Stephane Eranian 已提交
5792
 */
5793 5794
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
						       struct cgroup_subsys *ss)
S
Stephane Eranian 已提交
5795
{
T
Tejun Heo 已提交
5796 5797
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
	struct cgroup_subsys_state *css = NULL;
S
Stephane Eranian 已提交
5798 5799
	struct cgroup *cgrp;

5800
	/* is @dentry a cgroup dir? */
T
Tejun Heo 已提交
5801 5802
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
S
Stephane Eranian 已提交
5803 5804
		return ERR_PTR(-EBADF);

5805 5806
	rcu_read_lock();

T
Tejun Heo 已提交
5807 5808 5809
	/*
	 * This path doesn't originate from kernfs and @kn could already
	 * have been or be removed at any point.  @kn->priv is RCU
5810
	 * protected for this access.  See css_release_work_fn() for details.
T
Tejun Heo 已提交
5811 5812 5813 5814
	 */
	cgrp = rcu_dereference(kn->priv);
	if (cgrp)
		css = cgroup_css(cgrp, ss);
5815

5816
	if (!css || !css_tryget_online(css))
5817 5818 5819 5820
		css = ERR_PTR(-ENOENT);

	rcu_read_unlock();
	return css;
S
Stephane Eranian 已提交
5821 5822
}

5823 5824 5825 5826 5827 5828 5829 5830 5831 5832
/**
 * css_from_id - lookup css by id
 * @id: the cgroup id
 * @ss: cgroup subsys to be looked into
 *
 * Returns the css if there's valid one with @id, otherwise returns NULL.
 * Should be called under rcu_read_lock().
 */
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
5833
	WARN_ON_ONCE(!rcu_read_lock_held());
5834
	return id > 0 ? idr_find(&ss->css_idr, id) : NULL;
S
Stephane Eranian 已提交
5835 5836
}

5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870
/**
 * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
 * @path: path on the default hierarchy
 *
 * Find the cgroup at @path on the default hierarchy, increment its
 * reference count and return it.  Returns pointer to the found cgroup on
 * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
 * if @path points to a non-directory.
 */
struct cgroup *cgroup_get_from_path(const char *path)
{
	struct kernfs_node *kn;
	struct cgroup *cgrp;

	mutex_lock(&cgroup_mutex);

	kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path);
	if (kn) {
		if (kernfs_type(kn) == KERNFS_DIR) {
			cgrp = kn->priv;
			cgroup_get(cgrp);
		} else {
			cgrp = ERR_PTR(-ENOTDIR);
		}
		kernfs_put(kn);
	} else {
		cgrp = ERR_PTR(-ENOENT);
	}

	mutex_unlock(&cgroup_mutex);
	return cgrp;
}
EXPORT_SYMBOL_GPL(cgroup_get_from_path);

T
Tejun Heo 已提交
5871 5872 5873 5874 5875 5876 5877 5878
/*
 * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
 * definition in cgroup-defs.h.
 */
#ifdef CONFIG_SOCK_CGROUP_DATA

#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)

5879
DEFINE_SPINLOCK(cgroup_sk_update_lock);
T
Tejun Heo 已提交
5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923
static bool cgroup_sk_alloc_disabled __read_mostly;

void cgroup_sk_alloc_disable(void)
{
	if (cgroup_sk_alloc_disabled)
		return;
	pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
	cgroup_sk_alloc_disabled = true;
}

#else

#define cgroup_sk_alloc_disabled	false

#endif

void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
	if (cgroup_sk_alloc_disabled)
		return;

	rcu_read_lock();

	while (true) {
		struct css_set *cset;

		cset = task_css_set(current);
		if (likely(cgroup_tryget(cset->dfl_cgrp))) {
			skcd->val = (unsigned long)cset->dfl_cgrp;
			break;
		}
		cpu_relax();
	}

	rcu_read_unlock();
}

void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
	cgroup_put(sock_cgroup_ptr(skcd));
}

#endif	/* CONFIG_SOCK_CGROUP_DATA */

5924
#ifdef CONFIG_CGROUP_DEBUG
5925 5926
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
5927 5928 5929 5930 5931 5932 5933 5934 5935
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

5936
static void debug_css_free(struct cgroup_subsys_state *css)
5937
{
5938
	kfree(css);
5939 5940
}

5941 5942
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5943
{
5944
	return cgroup_task_count(css->cgroup);
5945 5946
}

5947 5948
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5949 5950 5951 5952
{
	return (u64)(unsigned long)current->cgroups;
}

5953
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
L
Li Zefan 已提交
5954
					 struct cftype *cft)
5955 5956 5957 5958
{
	u64 count;

	rcu_read_lock();
5959
	count = atomic_read(&task_css_set(current)->refcount);
5960 5961 5962 5963
	rcu_read_unlock();
	return count;
}

5964
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
5965
{
5966
	struct cgrp_cset_link *link;
5967
	struct css_set *cset;
T
Tejun Heo 已提交
5968 5969 5970 5971 5972
	char *name_buf;

	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name_buf)
		return -ENOMEM;
5973

5974
	spin_lock_bh(&css_set_lock);
5975
	rcu_read_lock();
5976
	cset = rcu_dereference(current->cgroups);
5977
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5978 5979
		struct cgroup *c = link->cgrp;

T
Tejun Heo 已提交
5980
		cgroup_name(c, name_buf, NAME_MAX + 1);
5981
		seq_printf(seq, "Root %d group %s\n",
T
Tejun Heo 已提交
5982
			   c->root->hierarchy_id, name_buf);
5983 5984
	}
	rcu_read_unlock();
5985
	spin_unlock_bh(&css_set_lock);
T
Tejun Heo 已提交
5986
	kfree(name_buf);
5987 5988 5989 5990
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
5991
static int cgroup_css_links_read(struct seq_file *seq, void *v)
5992
{
5993
	struct cgroup_subsys_state *css = seq_css(seq);
5994
	struct cgrp_cset_link *link;
5995

5996
	spin_lock_bh(&css_set_lock);
5997
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5998
		struct css_set *cset = link->cset;
5999 6000
		struct task_struct *task;
		int count = 0;
T
Tejun Heo 已提交
6001

6002
		seq_printf(seq, "css_set %p\n", cset);
T
Tejun Heo 已提交
6003

6004
		list_for_each_entry(task, &cset->tasks, cg_list) {
T
Tejun Heo 已提交
6005 6006 6007 6008 6009 6010 6011 6012 6013
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
		}

		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
6014
		}
T
Tejun Heo 已提交
6015 6016 6017
		continue;
	overflow:
		seq_puts(seq, "  ...\n");
6018
	}
6019
	spin_unlock_bh(&css_set_lock);
6020 6021 6022
	return 0;
}

6023
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
6024
{
6025
	return (!cgroup_is_populated(css->cgroup) &&
Z
Zefan Li 已提交
6026
		!css_has_online_children(&css->cgroup->self));
6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

6045 6046
	{
		.name = "current_css_set_cg_links",
6047
		.seq_show = current_css_set_cg_links_read,
6048 6049 6050 6051
	},

	{
		.name = "cgroup_css_links",
6052
		.seq_show = cgroup_css_links_read,
6053 6054
	},

6055 6056 6057 6058 6059
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

6060 6061
	{ }	/* terminate */
};
6062

6063
struct cgroup_subsys debug_cgrp_subsys = {
6064 6065
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,
6066
	.legacy_cftypes = debug_files,
6067 6068
};
#endif /* CONFIG_CGROUP_DEBUG */