cgroup.c 153.1 KB
Newer Older
1 2 3 4 5 6
/*
 *  Generic process-grouping system.
 *
 *  Based originally on the cpuset system, extracted by Paul Menage
 *  Copyright (C) 2006 Google, Inc
 *
7 8 9 10
 *  Notifications support
 *  Copyright (C) 2009 Nokia Corporation
 *  Author: Kirill A. Shutemov
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *  Copyright notices from the original cpuset code:
 *  --------------------------------------------------
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
 *  2003-10-10 Written by Simon Derr.
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson.
 *  ---------------------------------------------------
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/cgroup.h>
32
#include <linux/cred.h>
33
#include <linux/ctype.h>
34
#include <linux/errno.h>
35
#include <linux/init_task.h>
36 37
#include <linux/kernel.h>
#include <linux/list.h>
38
#include <linux/magic.h>
39 40 41 42
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
43
#include <linux/proc_fs.h>
44 45 46 47
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
48
#include <linux/rwsem.h>
49
#include <linux/percpu-rwsem.h>
50
#include <linux/string.h>
51
#include <linux/sort.h>
52
#include <linux/kmod.h>
B
Balbir Singh 已提交
53 54
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
55
#include <linux/hashtable.h>
L
Li Zefan 已提交
56
#include <linux/pid_namespace.h>
57
#include <linux/idr.h>
58
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
59
#include <linux/kthread.h>
T
Tejun Heo 已提交
60
#include <linux/delay.h>
B
Balbir Singh 已提交
61

A
Arun Sharma 已提交
62
#include <linux/atomic.h>
63

64 65 66 67 68 69 70 71
/*
 * pidlists linger the following amount before being destroyed.  The goal
 * is avoiding frequent destruction in the middle of consecutive read calls
 * Expiring in the middle is a performance problem not a correctness one.
 * 1 sec should be enough.
 */
#define CGROUP_PIDLIST_DESTROY_DELAY	HZ

T
Tejun Heo 已提交
72 73 74
#define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
					 MAX_CFTYPE_NAME + 2)

T
Tejun Heo 已提交
75 76 77 78
/*
 * cgroup_mutex is the master lock.  Any modification to cgroup or its
 * hierarchy must be performed while holding it.
 *
79 80
 * css_set_rwsem protects task->cgroups pointer, the list of css_set
 * objects, and the chain of tasks off each css_set.
T
Tejun Heo 已提交
81
 *
82 83
 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
 * cgroup.h can use them for lockdep annotations.
T
Tejun Heo 已提交
84
 */
T
Tejun Heo 已提交
85 86
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
87 88 89
DECLARE_RWSEM(css_set_rwsem);
EXPORT_SYMBOL_GPL(cgroup_mutex);
EXPORT_SYMBOL_GPL(css_set_rwsem);
T
Tejun Heo 已提交
90
#else
91
static DEFINE_MUTEX(cgroup_mutex);
92
static DECLARE_RWSEM(css_set_rwsem);
T
Tejun Heo 已提交
93 94
#endif

95
/*
96 97
 * Protects cgroup_idr and css_idr so that IDs can be released without
 * grabbing cgroup_mutex.
98 99 100
 */
static DEFINE_SPINLOCK(cgroup_idr_lock);

101 102 103 104 105
/*
 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
 */
static DEFINE_SPINLOCK(release_agent_path_lock);
106

107 108
struct percpu_rw_semaphore cgroup_threadgroup_rwsem;

T
Tejun Heo 已提交
109
#define cgroup_assert_mutex_or_rcu_locked()				\
110 111
	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
			   !lockdep_is_held(&cgroup_mutex),		\
T
Tejun Heo 已提交
112
			   "cgroup_mutex or RCU read lock required");
113

114 115 116 117 118 119 120 121
/*
 * cgroup destruction makes heavy use of work items and there can be a lot
 * of concurrent destructions.  Use a separate workqueue so that cgroup
 * destruction work items don't end up filling up max_active of system_wq
 * which may lead to deadlock.
 */
static struct workqueue_struct *cgroup_destroy_wq;

122 123 124 125 126 127
/*
 * pidlist destructions need to be flushed on cgroup destruction.  Use a
 * separate workqueue as flush domain.
 */
static struct workqueue_struct *cgroup_pidlist_destroy_wq;

T
Tejun Heo 已提交
128
/* generate an array of cgroup subsystem pointers */
129
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
T
Tejun Heo 已提交
130
static struct cgroup_subsys *cgroup_subsys[] = {
131 132
#include <linux/cgroup_subsys.h>
};
133 134 135 136 137
#undef SUBSYS

/* array of cgroup subsystem names */
#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
static const char *cgroup_subsys_name[] = {
138 139
#include <linux/cgroup_subsys.h>
};
140
#undef SUBSYS
141 142

/*
143
 * The default hierarchy, reserved for the subsystems that are otherwise
144 145
 * unattached - it never has more than a single cgroup, and all tasks are
 * part of that cgroup.
146
 */
T
Tejun Heo 已提交
147
struct cgroup_root cgrp_dfl_root;
T
Tejun Heo 已提交
148
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
149

T
Tejun Heo 已提交
150 151 152 153 154
/*
 * The default hierarchy always exists but is hidden until mounted for the
 * first time.  This is for backward compatibility.
 */
static bool cgrp_dfl_root_visible;
155

156 157 158 159 160 161
/*
 * Set by the boot param of the same name and makes subsystems with NULL
 * ->dfl_files to use ->legacy_files on the default hierarchy.
 */
static bool cgroup_legacy_files_on_dfl;

162
/* some controllers are not supported in the default hierarchy */
163
static unsigned long cgrp_dfl_root_inhibit_ss_mask;
164

165 166
/* The list of hierarchy roots */

167 168
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
169

T
Tejun Heo 已提交
170
/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
171
static DEFINE_IDR(cgroup_hierarchy_idr);
172

173
/*
174 175 176 177 178
 * Assign a monotonically increasing serial number to csses.  It guarantees
 * cgroups with bigger numbers are newer than those with smaller numbers.
 * Also, as csses are always appended to the parent's ->children list, it
 * guarantees that sibling csses are always sorted in the ascending serial
 * number order on the list.  Protected by cgroup_mutex.
179
 */
180
static u64 css_serial_nr_next = 1;
181

182 183 184 185
/*
 * These bitmask flags indicate whether tasks in the fork and exit paths have
 * fork/exit handlers to call. This avoids us having to do extra work in the
 * fork/exit path to check which subsystems have fork/exit callbacks.
186
 */
187 188
static unsigned long have_fork_callback __read_mostly;
static unsigned long have_exit_callback __read_mostly;
189

190 191 192
/* Ditto for the can_fork callback. */
static unsigned long have_canfork_callback __read_mostly;

193 194
static struct cftype cgroup_dfl_base_files[];
static struct cftype cgroup_legacy_base_files[];
195

196
static int rebind_subsystems(struct cgroup_root *dst_root,
197
			     unsigned long ss_mask);
198
static int cgroup_destroy_locked(struct cgroup *cgrp);
199 200
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible);
201
static void css_release(struct percpu_ref *ref);
202
static void kill_css(struct cgroup_subsys_state *css);
203 204
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add);
205

206 207 208 209 210 211 212
/* IDR wrappers which synchronize using cgroup_idr_lock */
static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
			    gfp_t gfp_mask)
{
	int ret;

	idr_preload(gfp_mask);
T
Tejun Heo 已提交
213
	spin_lock_bh(&cgroup_idr_lock);
V
Vladimir Davydov 已提交
214
	ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_WAIT);
T
Tejun Heo 已提交
215
	spin_unlock_bh(&cgroup_idr_lock);
216 217 218 219 220 221 222 223
	idr_preload_end();
	return ret;
}

static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
{
	void *ret;

T
Tejun Heo 已提交
224
	spin_lock_bh(&cgroup_idr_lock);
225
	ret = idr_replace(idr, ptr, id);
T
Tejun Heo 已提交
226
	spin_unlock_bh(&cgroup_idr_lock);
227 228 229 230 231
	return ret;
}

static void cgroup_idr_remove(struct idr *idr, int id)
{
T
Tejun Heo 已提交
232
	spin_lock_bh(&cgroup_idr_lock);
233
	idr_remove(idr, id);
T
Tejun Heo 已提交
234
	spin_unlock_bh(&cgroup_idr_lock);
235 236
}

T
Tejun Heo 已提交
237 238 239 240 241 242 243 244 245
static struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
	struct cgroup_subsys_state *parent_css = cgrp->self.parent;

	if (parent_css)
		return container_of(parent_css, struct cgroup, self);
	return NULL;
}

T
Tejun Heo 已提交
246 247 248
/**
 * cgroup_css - obtain a cgroup's css for the specified subsystem
 * @cgrp: the cgroup of interest
249
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
T
Tejun Heo 已提交
250
 *
251 252 253 254 255
 * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
 * function must be called either under cgroup_mutex or rcu_read_lock() and
 * the caller is responsible for pinning the returned css if it wants to
 * keep accessing it outside the said locks.  This function may return
 * %NULL if @cgrp doesn't have @subsys_id enabled.
T
Tejun Heo 已提交
256 257
 */
static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
258
					      struct cgroup_subsys *ss)
T
Tejun Heo 已提交
259
{
260
	if (ss)
261
		return rcu_dereference_check(cgrp->subsys[ss->id],
T
Tejun Heo 已提交
262
					lockdep_is_held(&cgroup_mutex));
263
	else
264
		return &cgrp->self;
T
Tejun Heo 已提交
265
}
266

267 268 269
/**
 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
270
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
271
 *
C
Chen Hanxiao 已提交
272
 * Similar to cgroup_css() but returns the effective css, which is defined
273 274 275 276 277 278 279 280 281 282
 * as the matching css of the nearest ancestor including self which has @ss
 * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
 * function is guaranteed to return non-NULL css.
 */
static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
						struct cgroup_subsys *ss)
{
	lockdep_assert_held(&cgroup_mutex);

	if (!ss)
283
		return &cgrp->self;
284 285 286 287

	if (!(cgrp->root->subsys_mask & (1 << ss->id)))
		return NULL;

T
Tejun Heo 已提交
288 289 290 291
	/*
	 * This function is used while updating css associations and thus
	 * can't test the csses directly.  Use ->child_subsys_mask.
	 */
T
Tejun Heo 已提交
292 293 294
	while (cgroup_parent(cgrp) &&
	       !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
		cgrp = cgroup_parent(cgrp);
295 296

	return cgroup_css(cgrp, ss);
T
Tejun Heo 已提交
297
}
298

T
Tejun Heo 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
/**
 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
 * @ss: the subsystem of interest
 *
 * Find and get the effective css of @cgrp for @ss.  The effective css is
 * defined as the matching css of the nearest ancestor including self which
 * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
 * the root css is returned, so this function always returns a valid css.
 * The returned css must be put using css_put().
 */
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
					     struct cgroup_subsys *ss)
{
	struct cgroup_subsys_state *css;

	rcu_read_lock();

	do {
		css = cgroup_css(cgrp, ss);

		if (css && css_tryget_online(css))
			goto out_unlock;
		cgrp = cgroup_parent(cgrp);
	} while (cgrp);

	css = init_css_set.subsys[ss->id];
	css_get(css);
out_unlock:
	rcu_read_unlock();
	return css;
}

332
/* convenient tests for these bits */
333
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
334
{
335
	return !(cgrp->self.flags & CSS_ONLINE);
336 337
}

T
Tejun Heo 已提交
338
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
339
{
T
Tejun Heo 已提交
340
	struct cgroup *cgrp = of->kn->parent->priv;
T
Tejun Heo 已提交
341
	struct cftype *cft = of_cft(of);
T
Tejun Heo 已提交
342 343 344 345 346 347 348 349 350 351 352 353

	/*
	 * This is open and unprotected implementation of cgroup_css().
	 * seq_css() is only called from a kernfs file operation which has
	 * an active reference on the file.  Because all the subsystem
	 * files are drained before a css is disassociated with a cgroup,
	 * the matching css from the cgroup's subsys table is guaranteed to
	 * be and stay valid until the enclosing operation is complete.
	 */
	if (cft->ss)
		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
	else
354
		return &cgrp->self;
355
}
T
Tejun Heo 已提交
356
EXPORT_SYMBOL_GPL(of_css);
357

358 359 360 361 362 363 364 365 366 367 368 369 370 371
/**
 * cgroup_is_descendant - test ancestry
 * @cgrp: the cgroup to be tested
 * @ancestor: possible ancestor of @cgrp
 *
 * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
 * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
 * and @ancestor are accessible.
 */
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
{
	while (cgrp) {
		if (cgrp == ancestor)
			return true;
T
Tejun Heo 已提交
372
		cgrp = cgroup_parent(cgrp);
373 374 375
	}
	return false;
}
376

377
static int notify_on_release(const struct cgroup *cgrp)
378
{
379
	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
380 381
}

T
Tejun Heo 已提交
382 383 384 385 386 387
/**
 * for_each_css - iterate all css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
388
 * Should be called under cgroup_[tree_]mutex.
T
Tejun Heo 已提交
389 390 391 392 393 394 395 396
 */
#define for_each_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = rcu_dereference_check(			\
				(cgrp)->subsys[(ssid)],			\
				lockdep_is_held(&cgroup_mutex)))) { }	\
		else

397 398 399 400 401 402 403 404 405 406 407 408 409 410
/**
 * for_each_e_css - iterate all effective css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
 * Should be called under cgroup_[tree_]mutex.
 */
#define for_each_e_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
			;						\
		else

411
/**
T
Tejun Heo 已提交
412
 * for_each_subsys - iterate all enabled cgroup subsystems
413
 * @ss: the iteration cursor
414
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
415
 */
416
#define for_each_subsys(ss, ssid)					\
T
Tejun Heo 已提交
417 418
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
419

420 421 422 423 424 425 426 427 428 429 430
/**
 * for_each_subsys_which - filter for_each_subsys with a bitmask
 * @ss: the iteration cursor
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
 * @ss_maskp: a pointer to the bitmask
 *
 * The block will only run for cases where the ssid-th bit (1 << ssid) of
 * mask is set to 1.
 */
#define for_each_subsys_which(ss, ssid, ss_maskp)			\
	if (!CGROUP_SUBSYS_COUNT) /* to avoid spurious gcc warning */	\
431
		(ssid) = 0;						\
432 433 434 435 436 437
	else								\
		for_each_set_bit(ssid, ss_maskp, CGROUP_SUBSYS_COUNT)	\
			if (((ss) = cgroup_subsys[ssid]) && false)	\
				break;					\
			else

438 439
/* iterate across the hierarchies */
#define for_each_root(root)						\
440
	list_for_each_entry((root), &cgroup_roots, root_list)
441

442 443
/* iterate over child cgrps, lock should be held throughout iteration */
#define cgroup_for_each_live_child(child, cgrp)				\
444
	list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
T
Tejun Heo 已提交
445
		if (({ lockdep_assert_held(&cgroup_mutex);		\
446 447 448
		       cgroup_is_dead(child); }))			\
			;						\
		else
449

450
static void cgroup_release_agent(struct work_struct *work);
451
static void check_for_release(struct cgroup *cgrp);
452

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
/*
 * A cgroup can be associated with multiple css_sets as different tasks may
 * belong to different cgroups on different hierarchies.  In the other
 * direction, a css_set is naturally associated with multiple cgroups.
 * This M:N relationship is represented by the following link structure
 * which exists for each association and allows traversing the associations
 * from both sides.
 */
struct cgrp_cset_link {
	/* the cgroup and css_set this link associates */
	struct cgroup		*cgrp;
	struct css_set		*cset;

	/* list of cgrp_cset_links anchored at cgrp->cset_links */
	struct list_head	cset_link;

	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
	struct list_head	cgrp_link;
471 472
};

473 474
/*
 * The default css_set - used by init and its children prior to any
475 476 477 478 479
 * hierarchies being mounted. It contains a pointer to the root state
 * for each subsystem. Also used to anchor the list of css_sets. Not
 * reference-counted, to improve performance when child cgroups
 * haven't been created.
 */
T
Tejun Heo 已提交
480
struct css_set init_css_set = {
481 482 483 484 485 486 487
	.refcount		= ATOMIC_INIT(1),
	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
};
488

489
static int css_set_count	= 1;	/* 1 for init_css_set */
490

491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
/**
 * cgroup_update_populated - updated populated count of a cgroup
 * @cgrp: the target cgroup
 * @populated: inc or dec populated count
 *
 * @cgrp is either getting the first task (css_set) or losing the last.
 * Update @cgrp->populated_cnt accordingly.  The count is propagated
 * towards root so that a given cgroup's populated_cnt is zero iff the
 * cgroup and all its descendants are empty.
 *
 * @cgrp's interface file "cgroup.populated" is zero if
 * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
 * changes from or to zero, userland is notified that the content of the
 * interface file has changed.  This can be used to detect when @cgrp and
 * its descendants become populated or empty.
 */
static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
{
	lockdep_assert_held(&css_set_rwsem);

	do {
		bool trigger;

		if (populated)
			trigger = !cgrp->populated_cnt++;
		else
			trigger = !--cgrp->populated_cnt;

		if (!trigger)
			break;

		if (cgrp->populated_kn)
			kernfs_notify(cgrp->populated_kn);
T
Tejun Heo 已提交
524
		cgrp = cgroup_parent(cgrp);
525 526 527
	} while (cgrp);
}

528 529 530 531 532
/*
 * hash table for cgroup groups. This improves the performance to find
 * an existing css_set. This hash doesn't (currently) take into
 * account cgroups in empty hierarchies.
 */
533
#define CSS_SET_HASH_BITS	7
534
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
535

536
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
537
{
538
	unsigned long key = 0UL;
539 540
	struct cgroup_subsys *ss;
	int i;
541

542
	for_each_subsys(ss, i)
543 544
		key += (unsigned long)css[i];
	key = (key >> 16) ^ key;
545

546
	return key;
547 548
}

Z
Zefan Li 已提交
549
static void put_css_set_locked(struct css_set *cset)
550
{
551
	struct cgrp_cset_link *link, *tmp_link;
T
Tejun Heo 已提交
552 553
	struct cgroup_subsys *ss;
	int ssid;
554

555 556 557
	lockdep_assert_held(&css_set_rwsem);

	if (!atomic_dec_and_test(&cset->refcount))
558
		return;
559

560
	/* This css_set is dead. unlink it and release cgroup refcounts */
T
Tejun Heo 已提交
561 562
	for_each_subsys(ss, ssid)
		list_del(&cset->e_cset_node[ssid]);
563
	hash_del(&cset->hlist);
564 565
	css_set_count--;

566
	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
567
		struct cgroup *cgrp = link->cgrp;
568

569 570
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
L
Li Zefan 已提交
571

572
		/* @cgrp can't go away while we're holding css_set_rwsem */
573 574
		if (list_empty(&cgrp->cset_links)) {
			cgroup_update_populated(cgrp, false);
Z
Zefan Li 已提交
575
			check_for_release(cgrp);
576
		}
577 578

		kfree(link);
579
	}
580

581
	kfree_rcu(cset, rcu_head);
582 583
}

Z
Zefan Li 已提交
584
static void put_css_set(struct css_set *cset)
585 586 587 588 589 590 591 592 593 594
{
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * rwlock
	 */
	if (atomic_add_unless(&cset->refcount, -1, 1))
		return;

	down_write(&css_set_rwsem);
Z
Zefan Li 已提交
595
	put_css_set_locked(cset);
596 597 598
	up_write(&css_set_rwsem);
}

599 600 601
/*
 * refcounted get/put for css_set objects
 */
602
static inline void get_css_set(struct css_set *cset)
603
{
604
	atomic_inc(&cset->refcount);
605 606
}

607
/**
608
 * compare_css_sets - helper function for find_existing_css_set().
609 610
 * @cset: candidate css_set being tested
 * @old_cset: existing css_set for a task
611 612 613
 * @new_cgrp: cgroup that's being entered by the task
 * @template: desired set of css pointers in css_set (pre-calculated)
 *
L
Li Zefan 已提交
614
 * Returns true if "cset" matches "old_cset" except for the hierarchy
615 616
 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
 */
617 618
static bool compare_css_sets(struct css_set *cset,
			     struct css_set *old_cset,
619 620 621 622 623
			     struct cgroup *new_cgrp,
			     struct cgroup_subsys_state *template[])
{
	struct list_head *l1, *l2;

624 625 626 627 628 629
	/*
	 * On the default hierarchy, there can be csets which are
	 * associated with the same set of cgroups but different csses.
	 * Let's first ensure that csses match.
	 */
	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
630 631 632 633
		return false;

	/*
	 * Compare cgroup pointers in order to distinguish between
634 635 636
	 * different cgroups in hierarchies.  As different cgroups may
	 * share the same effective css, this comparison is always
	 * necessary.
637
	 */
638 639
	l1 = &cset->cgrp_links;
	l2 = &old_cset->cgrp_links;
640
	while (1) {
641
		struct cgrp_cset_link *link1, *link2;
642
		struct cgroup *cgrp1, *cgrp2;
643 644 645 646

		l1 = l1->next;
		l2 = l2->next;
		/* See if we reached the end - both lists are equal length. */
647 648
		if (l1 == &cset->cgrp_links) {
			BUG_ON(l2 != &old_cset->cgrp_links);
649 650
			break;
		} else {
651
			BUG_ON(l2 == &old_cset->cgrp_links);
652 653
		}
		/* Locate the cgroups associated with these links. */
654 655 656 657
		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
		cgrp1 = link1->cgrp;
		cgrp2 = link2->cgrp;
658
		/* Hierarchies should be linked in the same order. */
659
		BUG_ON(cgrp1->root != cgrp2->root);
660 661 662 663 664 665 666 667

		/*
		 * If this hierarchy is the hierarchy of the cgroup
		 * that's changing, then we need to check that this
		 * css_set points to the new cgroup; if it's any other
		 * hierarchy, then this css_set should point to the
		 * same cgroup as the old css_set.
		 */
668 669
		if (cgrp1->root == new_cgrp->root) {
			if (cgrp1 != new_cgrp)
670 671
				return false;
		} else {
672
			if (cgrp1 != cgrp2)
673 674 675 676 677 678
				return false;
		}
	}
	return true;
}

679 680 681 682 683
/**
 * find_existing_css_set - init css array and find the matching css_set
 * @old_cset: the css_set that we're using before the cgroup transition
 * @cgrp: the cgroup that we're moving into
 * @template: out param for the new set of csses, should be clear on entry
684
 */
685 686 687
static struct css_set *find_existing_css_set(struct css_set *old_cset,
					struct cgroup *cgrp,
					struct cgroup_subsys_state *template[])
688
{
689
	struct cgroup_root *root = cgrp->root;
690
	struct cgroup_subsys *ss;
691
	struct css_set *cset;
692
	unsigned long key;
693
	int i;
694

B
Ben Blum 已提交
695 696 697 698 699
	/*
	 * Build the set of subsystem state objects that we want to see in the
	 * new css_set. while subsystems can change globally, the entries here
	 * won't change, so no need for locking.
	 */
700
	for_each_subsys(ss, i) {
701
		if (root->subsys_mask & (1UL << i)) {
702 703 704 705 706
			/*
			 * @ss is in this hierarchy, so we want the
			 * effective css from @cgrp.
			 */
			template[i] = cgroup_e_css(cgrp, ss);
707
		} else {
708 709 710 711
			/*
			 * @ss is not in this hierarchy, so we don't want
			 * to change the css.
			 */
712
			template[i] = old_cset->subsys[i];
713 714 715
		}
	}

716
	key = css_set_hash(template);
717 718
	hash_for_each_possible(css_set_table, cset, hlist, key) {
		if (!compare_css_sets(cset, old_cset, cgrp, template))
719 720 721
			continue;

		/* This css_set matches what we need */
722
		return cset;
723
	}
724 725 726 727 728

	/* No existing cgroup group matched */
	return NULL;
}

729
static void free_cgrp_cset_links(struct list_head *links_to_free)
730
{
731
	struct cgrp_cset_link *link, *tmp_link;
732

733 734
	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
		list_del(&link->cset_link);
735 736 737 738
		kfree(link);
	}
}

739 740 741 742 743 744 745
/**
 * allocate_cgrp_cset_links - allocate cgrp_cset_links
 * @count: the number of links to allocate
 * @tmp_links: list_head the allocated links are put on
 *
 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
 * through ->cset_link.  Returns 0 on success or -errno.
746
 */
747
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
748
{
749
	struct cgrp_cset_link *link;
750
	int i;
751 752 753

	INIT_LIST_HEAD(tmp_links);

754
	for (i = 0; i < count; i++) {
755
		link = kzalloc(sizeof(*link), GFP_KERNEL);
756
		if (!link) {
757
			free_cgrp_cset_links(tmp_links);
758 759
			return -ENOMEM;
		}
760
		list_add(&link->cset_link, tmp_links);
761 762 763 764
	}
	return 0;
}

765 766
/**
 * link_css_set - a helper function to link a css_set to a cgroup
767
 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
768
 * @cset: the css_set to be linked
769 770
 * @cgrp: the destination cgroup
 */
771 772
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
			 struct cgroup *cgrp)
773
{
774
	struct cgrp_cset_link *link;
775

776
	BUG_ON(list_empty(tmp_links));
T
Tejun Heo 已提交
777 778 779 780

	if (cgroup_on_dfl(cgrp))
		cset->dfl_cgrp = cgrp;

781 782
	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
	link->cset = cset;
783
	link->cgrp = cgrp;
784 785 786

	if (list_empty(&cgrp->cset_links))
		cgroup_update_populated(cgrp, true);
787
	list_move(&link->cset_link, &cgrp->cset_links);
788

789 790 791 792
	/*
	 * Always add links to the tail of the list so that the list
	 * is sorted by order of hierarchy creation
	 */
793
	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
794 795
}

796 797 798 799 800 801 802
/**
 * find_css_set - return a new css_set with one cgroup updated
 * @old_cset: the baseline css_set
 * @cgrp: the cgroup to be updated
 *
 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
 * substituted into the appropriate hierarchy.
803
 */
804 805
static struct css_set *find_css_set(struct css_set *old_cset,
				    struct cgroup *cgrp)
806
{
807
	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
808
	struct css_set *cset;
809 810
	struct list_head tmp_links;
	struct cgrp_cset_link *link;
T
Tejun Heo 已提交
811
	struct cgroup_subsys *ss;
812
	unsigned long key;
T
Tejun Heo 已提交
813
	int ssid;
814

815 816
	lockdep_assert_held(&cgroup_mutex);

817 818
	/* First see if we already have a cgroup group that matches
	 * the desired set */
819
	down_read(&css_set_rwsem);
820 821 822
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
		get_css_set(cset);
823
	up_read(&css_set_rwsem);
824

825 826
	if (cset)
		return cset;
827

828
	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
829
	if (!cset)
830 831
		return NULL;

832
	/* Allocate all the cgrp_cset_link objects that we'll need */
833
	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
834
		kfree(cset);
835 836 837
		return NULL;
	}

838
	atomic_set(&cset->refcount, 1);
839
	INIT_LIST_HEAD(&cset->cgrp_links);
840
	INIT_LIST_HEAD(&cset->tasks);
T
Tejun Heo 已提交
841
	INIT_LIST_HEAD(&cset->mg_tasks);
842
	INIT_LIST_HEAD(&cset->mg_preload_node);
843
	INIT_LIST_HEAD(&cset->mg_node);
844
	INIT_HLIST_NODE(&cset->hlist);
845 846 847

	/* Copy the set of subsystem state objects generated in
	 * find_existing_css_set() */
848
	memcpy(cset->subsys, template, sizeof(cset->subsys));
849

850
	down_write(&css_set_rwsem);
851
	/* Add reference counts and links from the new css_set. */
852
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
853
		struct cgroup *c = link->cgrp;
854

855 856
		if (c->root == cgrp->root)
			c = cgrp;
857
		link_css_set(&tmp_links, cset, c);
858
	}
859

860
	BUG_ON(!list_empty(&tmp_links));
861 862

	css_set_count++;
863

T
Tejun Heo 已提交
864
	/* Add @cset to the hash table */
865 866
	key = css_set_hash(cset->subsys);
	hash_add(css_set_table, &cset->hlist, key);
867

T
Tejun Heo 已提交
868 869 870 871
	for_each_subsys(ss, ssid)
		list_add_tail(&cset->e_cset_node[ssid],
			      &cset->subsys[ssid]->cgroup->e_csets[ssid]);

872
	up_write(&css_set_rwsem);
873

874
	return cset;
875 876
}

877
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
878
{
879
	struct cgroup *root_cgrp = kf_root->kn->priv;
T
Tejun Heo 已提交
880

881
	return root_cgrp->root;
T
Tejun Heo 已提交
882 883
}

884
static int cgroup_init_root_id(struct cgroup_root *root)
885 886 887 888 889
{
	int id;

	lockdep_assert_held(&cgroup_mutex);

890
	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
891 892 893 894 895 896 897
	if (id < 0)
		return id;

	root->hierarchy_id = id;
	return 0;
}

898
static void cgroup_exit_root_id(struct cgroup_root *root)
899 900 901 902 903 904 905 906 907
{
	lockdep_assert_held(&cgroup_mutex);

	if (root->hierarchy_id) {
		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
		root->hierarchy_id = 0;
	}
}

908
static void cgroup_free_root(struct cgroup_root *root)
909 910
{
	if (root) {
C
Chen Hanxiao 已提交
911
		/* hierarchy ID should already have been released */
912 913 914 915 916 917 918
		WARN_ON_ONCE(root->hierarchy_id);

		idr_destroy(&root->cgroup_idr);
		kfree(root);
	}
}

919
static void cgroup_destroy_root(struct cgroup_root *root)
920
{
921
	struct cgroup *cgrp = &root->cgrp;
922 923
	struct cgrp_cset_link *link, *tmp_link;

T
Tejun Heo 已提交
924
	mutex_lock(&cgroup_mutex);
925

T
Tejun Heo 已提交
926
	BUG_ON(atomic_read(&root->nr_cgrps));
927
	BUG_ON(!list_empty(&cgrp->self.children));
928 929

	/* Rebind all subsystems back to the default hierarchy */
930
	rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
931 932

	/*
933 934
	 * Release all the links from cset_links to this hierarchy's
	 * root cgroup
935
	 */
936
	down_write(&css_set_rwsem);
937 938 939 940 941 942

	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
		kfree(link);
	}
943
	up_write(&css_set_rwsem);
944 945 946 947 948 949 950 951 952 953

	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
		cgroup_root_count--;
	}

	cgroup_exit_root_id(root);

	mutex_unlock(&cgroup_mutex);

T
Tejun Heo 已提交
954
	kernfs_destroy_root(root->kf_root);
955 956 957
	cgroup_free_root(root);
}

958 959
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
960
					    struct cgroup_root *root)
961 962 963
{
	struct cgroup *res = NULL;

964 965 966
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

967
	if (cset == &init_css_set) {
968
		res = &root->cgrp;
969
	} else {
970 971 972
		struct cgrp_cset_link *link;

		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
973
			struct cgroup *c = link->cgrp;
974

975 976 977 978 979 980
			if (c->root == root) {
				res = c;
				break;
			}
		}
	}
981

982 983 984 985
	BUG_ON(!res);
	return res;
}

986
/*
987 988 989 990
 * Return the cgroup for "task" from the given hierarchy. Must be
 * called with cgroup_mutex and css_set_rwsem held.
 */
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
991
					    struct cgroup_root *root)
992 993 994 995 996 997 998 999 1000
{
	/*
	 * No need to lock the task - since we hold cgroup_mutex the
	 * task can't change groups, so the only thing that can happen
	 * is that it exits and its css is set back to init_css_set.
	 */
	return cset_cgroup_from_root(task_css_set(task), root);
}

1001 1002 1003 1004 1005 1006
/*
 * A task must hold cgroup_mutex to modify cgroups.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding cgroup_mutex can't rely on the count
 * field not changing.  However, if the count goes to zero, then only
1007
 * cgroup_attach_task() can increment it again.  Because a count of zero
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
 * means that no tasks are currently attached, therefore there is no
 * way a task attached to that cgroup can fork (the other way to
 * increment the count).  So code holding cgroup_mutex can safely
 * assume that if the count is zero, it will stay zero. Similarly, if
 * a task holds cgroup_mutex on a cgroup with zero count, it
 * knows that the cgroup won't be removed, as cgroup_rmdir()
 * needs that mutex.
 *
 * A cgroup can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cgroups is empty.  Since all
 * tasks in the system use _some_ cgroup, and since there is always at
1019
 * least one task in the system (init, pid == 1), therefore, root cgroup
1020
 * always has either children cgroups and/or using tasks.  So we don't
1021
 * need a special hack to ensure that root cgroup cannot be deleted.
1022 1023
 *
 * P.S.  One more locking exception.  RCU is used to guard the
1024
 * update of a tasks cgroup pointer by cgroup_attach_task()
1025 1026
 */

1027
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
T
Tejun Heo 已提交
1028
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1029
static const struct file_operations proc_cgroupstats_operations;
1030

T
Tejun Heo 已提交
1031 1032
static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
			      char *buf)
1033
{
1034 1035
	struct cgroup_subsys *ss = cft->ss;

T
Tejun Heo 已提交
1036 1037 1038
	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
1039 1040
			 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
			 cft->name);
T
Tejun Heo 已提交
1041 1042 1043
	else
		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
	return buf;
1044 1045
}

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
 * returns cft->mode if ->mode is not 0
 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
 * returns S_IRUGO if it has only a read handler
 * returns S_IWUSR if it has only a write hander
 */
static umode_t cgroup_file_mode(const struct cftype *cft)
1056
{
1057
	umode_t mode = 0;
1058

1059 1060 1061 1062 1063 1064
	if (cft->mode)
		return cft->mode;

	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
		mode |= S_IRUGO;

1065
	if (cft->write_u64 || cft->write_s64 || cft->write)
1066 1067 1068
		mode |= S_IWUSR;

	return mode;
1069 1070
}

1071
static void cgroup_get(struct cgroup *cgrp)
1072
{
T
Tejun Heo 已提交
1073
	WARN_ON_ONCE(cgroup_is_dead(cgrp));
1074
	css_get(&cgrp->self);
1075 1076
}

1077 1078 1079 1080 1081
static bool cgroup_tryget(struct cgroup *cgrp)
{
	return css_tryget(&cgrp->self);
}

1082
static void cgroup_put(struct cgroup *cgrp)
1083
{
1084
	css_put(&cgrp->self);
1085 1086
}

1087
/**
1088
 * cgroup_calc_child_subsys_mask - calculate child_subsys_mask
1089
 * @cgrp: the target cgroup
1090
 * @subtree_control: the new subtree_control mask to consider
1091 1092 1093 1094 1095
 *
 * On the default hierarchy, a subsystem may request other subsystems to be
 * enabled together through its ->depends_on mask.  In such cases, more
 * subsystems than specified in "cgroup.subtree_control" may be enabled.
 *
1096 1097 1098
 * This function calculates which subsystems need to be enabled if
 * @subtree_control is to be applied to @cgrp.  The returned mask is always
 * a superset of @subtree_control and follows the usual hierarchy rules.
1099
 */
1100 1101
static unsigned long cgroup_calc_child_subsys_mask(struct cgroup *cgrp,
						  unsigned long subtree_control)
1102
{
1103
	struct cgroup *parent = cgroup_parent(cgrp);
1104
	unsigned long cur_ss_mask = subtree_control;
1105 1106 1107 1108 1109
	struct cgroup_subsys *ss;
	int ssid;

	lockdep_assert_held(&cgroup_mutex);

1110 1111
	if (!cgroup_on_dfl(cgrp))
		return cur_ss_mask;
1112 1113

	while (true) {
1114
		unsigned long new_ss_mask = cur_ss_mask;
1115

1116 1117
		for_each_subsys_which(ss, ssid, &cur_ss_mask)
			new_ss_mask |= ss->depends_on;
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133

		/*
		 * Mask out subsystems which aren't available.  This can
		 * happen only if some depended-upon subsystems were bound
		 * to non-default hierarchies.
		 */
		if (parent)
			new_ss_mask &= parent->child_subsys_mask;
		else
			new_ss_mask &= cgrp->root->subsys_mask;

		if (new_ss_mask == cur_ss_mask)
			break;
		cur_ss_mask = new_ss_mask;
	}

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
	return cur_ss_mask;
}

/**
 * cgroup_refresh_child_subsys_mask - update child_subsys_mask
 * @cgrp: the target cgroup
 *
 * Update @cgrp->child_subsys_mask according to the current
 * @cgrp->subtree_control using cgroup_calc_child_subsys_mask().
 */
static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
{
	cgrp->child_subsys_mask =
		cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control);
1148 1149
}

1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
/**
 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper undoes cgroup_kn_lock_live() and should be invoked before
 * the method finishes if locking succeeded.  Note that once this function
 * returns the cgroup returned by cgroup_kn_lock_live() may become
 * inaccessible any time.  If the caller intends to continue to access the
 * cgroup, it should pin it before invoking this function.
 */
static void cgroup_kn_unlock(struct kernfs_node *kn)
1161
{
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;

	mutex_unlock(&cgroup_mutex);

	kernfs_unbreak_active_protection(kn);
	cgroup_put(cgrp);
1173 1174
}

1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
/**
 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper is to be used by a cgroup kernfs method currently servicing
 * @kn.  It breaks the active protection, performs cgroup locking and
 * verifies that the associated cgroup is alive.  Returns the cgroup if
 * alive; otherwise, %NULL.  A successful return should be undone by a
 * matching cgroup_kn_unlock() invocation.
 *
 * Any cgroup kernfs method implementation which requires locking the
 * associated cgroup should use this helper.  It avoids nesting cgroup
 * locking under kernfs active protection and allows all kernfs operations
 * including self-removal.
 */
static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
T
Tejun Heo 已提交
1191
{
1192 1193 1194 1195 1196 1197
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;
T
Tejun Heo 已提交
1198

1199
	/*
1200
	 * We're gonna grab cgroup_mutex which nests outside kernfs
1201 1202 1203
	 * active_ref.  cgroup liveliness check alone provides enough
	 * protection against removal.  Ensure @cgrp stays accessible and
	 * break the active_ref protection.
1204
	 */
1205 1206
	if (!cgroup_tryget(cgrp))
		return NULL;
1207 1208
	kernfs_break_active_protection(kn);

T
Tejun Heo 已提交
1209
	mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1210

1211 1212 1213 1214 1215
	if (!cgroup_is_dead(cgrp))
		return cgrp;

	cgroup_kn_unlock(kn);
	return NULL;
1216
}
T
Tejun Heo 已提交
1217

1218
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
T
Tejun Heo 已提交
1219
{
T
Tejun Heo 已提交
1220
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
1221

1222
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
1223
	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
T
Tejun Heo 已提交
1224 1225
}

1226
/**
1227
 * cgroup_clear_dir - remove subsys files in a cgroup directory
1228
 * @cgrp: target cgroup
1229 1230
 * @subsys_mask: mask of the subsystem ids whose files should be removed
 */
1231
static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
T
Tejun Heo 已提交
1232
{
1233
	struct cgroup_subsys *ss;
1234
	int i;
T
Tejun Heo 已提交
1235

1236
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
1237
		struct cftype *cfts;
1238

1239
		if (!(subsys_mask & (1 << i)))
1240
			continue;
T
Tejun Heo 已提交
1241 1242
		list_for_each_entry(cfts, &ss->cfts, node)
			cgroup_addrm_files(cgrp, cfts, false);
1243
	}
1244 1245
}

1246 1247
static int rebind_subsystems(struct cgroup_root *dst_root,
			     unsigned long ss_mask)
1248
{
1249
	struct cgroup_subsys *ss;
1250
	unsigned long tmp_ss_mask;
T
Tejun Heo 已提交
1251
	int ssid, i, ret;
1252

T
Tejun Heo 已提交
1253
	lockdep_assert_held(&cgroup_mutex);
1254

1255
	for_each_subsys_which(ss, ssid, &ss_mask) {
1256 1257
		/* if @ss has non-root csses attached to it, can't move */
		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
T
Tejun Heo 已提交
1258
			return -EBUSY;
1259

1260
		/* can't move between two non-dummy roots either */
1261
		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1262
			return -EBUSY;
1263 1264
	}

1265 1266 1267 1268 1269 1270
	/* skip creating root files on dfl_root for inhibited subsystems */
	tmp_ss_mask = ss_mask;
	if (dst_root == &cgrp_dfl_root)
		tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;

	ret = cgroup_populate_dir(&dst_root->cgrp, tmp_ss_mask);
T
Tejun Heo 已提交
1271 1272
	if (ret) {
		if (dst_root != &cgrp_dfl_root)
1273
			return ret;
1274

T
Tejun Heo 已提交
1275 1276 1277 1278 1279 1280 1281
		/*
		 * Rebinding back to the default root is not allowed to
		 * fail.  Using both default and non-default roots should
		 * be rare.  Moving subsystems back and forth even more so.
		 * Just warn about it and continue.
		 */
		if (cgrp_dfl_root_visible) {
1282
			pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
1283
				ret, ss_mask);
1284
			pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
T
Tejun Heo 已提交
1285
		}
1286
	}
1287 1288 1289 1290 1291

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
1292 1293
	for_each_subsys_which(ss, ssid, &ss_mask)
		cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
1294

1295
	for_each_subsys_which(ss, ssid, &ss_mask) {
1296
		struct cgroup_root *src_root;
1297
		struct cgroup_subsys_state *css;
T
Tejun Heo 已提交
1298
		struct css_set *cset;
1299

1300
		src_root = ss->root;
1301
		css = cgroup_css(&src_root->cgrp, ss);
1302

1303
		WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
1304

1305 1306
		RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
		rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
1307
		ss->root = dst_root;
1308
		css->cgroup = &dst_root->cgrp;
1309

T
Tejun Heo 已提交
1310 1311 1312 1313 1314 1315
		down_write(&css_set_rwsem);
		hash_for_each(css_set_table, i, cset, hlist)
			list_move_tail(&cset->e_cset_node[ss->id],
				       &dst_root->cgrp.e_csets[ss->id]);
		up_write(&css_set_rwsem);

1316
		src_root->subsys_mask &= ~(1 << ssid);
1317 1318
		src_root->cgrp.subtree_control &= ~(1 << ssid);
		cgroup_refresh_child_subsys_mask(&src_root->cgrp);
1319

1320
		/* default hierarchy doesn't enable controllers by default */
1321
		dst_root->subsys_mask |= 1 << ssid;
1322 1323 1324 1325
		if (dst_root != &cgrp_dfl_root) {
			dst_root->cgrp.subtree_control |= 1 << ssid;
			cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
		}
1326

1327 1328
		if (ss->bind)
			ss->bind(css);
1329 1330
	}

T
Tejun Heo 已提交
1331
	kernfs_activate(dst_root->cgrp.kn);
1332 1333 1334
	return 0;
}

T
Tejun Heo 已提交
1335 1336
static int cgroup_show_options(struct seq_file *seq,
			       struct kernfs_root *kf_root)
1337
{
1338
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1339
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
1340
	int ssid;
1341

1342 1343 1344
	if (root != &cgrp_dfl_root)
		for_each_subsys(ss, ssid)
			if (root->subsys_mask & (1 << ssid))
1345
				seq_show_option(seq, ss->legacy_name, NULL);
1346
	if (root->flags & CGRP_ROOT_NOPREFIX)
1347
		seq_puts(seq, ",noprefix");
1348
	if (root->flags & CGRP_ROOT_XATTR)
A
Aristeu Rozanski 已提交
1349
		seq_puts(seq, ",xattr");
1350 1351

	spin_lock(&release_agent_path_lock);
1352
	if (strlen(root->release_agent_path))
1353 1354
		seq_show_option(seq, "release_agent",
				root->release_agent_path);
1355 1356
	spin_unlock(&release_agent_path_lock);

1357
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1358
		seq_puts(seq, ",clone_children");
1359
	if (strlen(root->name))
1360
		seq_show_option(seq, "name", root->name);
1361 1362 1363 1364
	return 0;
}

struct cgroup_sb_opts {
1365
	unsigned long subsys_mask;
1366
	unsigned int flags;
1367
	char *release_agent;
1368
	bool cpuset_clone_children;
1369
	char *name;
1370 1371
	/* User explicitly requested empty subsystem */
	bool none;
1372 1373
};

B
Ben Blum 已提交
1374
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1375
{
1376 1377
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
1378
	unsigned long mask = -1UL;
1379
	struct cgroup_subsys *ss;
1380
	int nr_opts = 0;
1381
	int i;
1382 1383

#ifdef CONFIG_CPUSETS
1384
	mask = ~(1U << cpuset_cgrp_id);
1385
#endif
1386

1387
	memset(opts, 0, sizeof(*opts));
1388 1389

	while ((token = strsep(&o, ",")) != NULL) {
1390 1391
		nr_opts++;

1392 1393
		if (!*token)
			return -EINVAL;
1394
		if (!strcmp(token, "none")) {
1395 1396
			/* Explicitly have no subsystems */
			opts->none = true;
1397 1398 1399 1400 1401 1402 1403 1404 1405
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
1406 1407 1408 1409
		if (!strcmp(token, "__DEVEL__sane_behavior")) {
			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
			continue;
		}
1410
		if (!strcmp(token, "noprefix")) {
1411
			opts->flags |= CGRP_ROOT_NOPREFIX;
1412 1413 1414
			continue;
		}
		if (!strcmp(token, "clone_children")) {
1415
			opts->cpuset_clone_children = true;
1416 1417
			continue;
		}
A
Aristeu Rozanski 已提交
1418
		if (!strcmp(token, "xattr")) {
1419
			opts->flags |= CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
1420 1421
			continue;
		}
1422
		if (!strncmp(token, "release_agent=", 14)) {
1423 1424 1425
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
1426
			opts->release_agent =
1427
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1428 1429
			if (!opts->release_agent)
				return -ENOMEM;
1430 1431 1432
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
1450
					      MAX_CGROUP_ROOT_NAMELEN - 1,
1451 1452 1453
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
1454 1455 1456 1457

			continue;
		}

1458
		for_each_subsys(ss, i) {
1459
			if (strcmp(token, ss->legacy_name))
1460 1461 1462 1463 1464 1465 1466
				continue;
			if (ss->disabled)
				continue;

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
1467
			opts->subsys_mask |= (1 << i);
1468 1469 1470 1471 1472 1473 1474 1475
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

1476
	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
1477
		pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
1478 1479
		if (nr_opts != 1) {
			pr_err("sane_behavior: no other mount options allowed\n");
1480 1481
			return -EINVAL;
		}
1482
		return 0;
1483 1484
	}

1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
	/*
	 * If the 'all' option was specified select all the subsystems,
	 * otherwise if 'none', 'name=' and a subsystem name options were
	 * not specified, let's default to 'all'
	 */
	if (all_ss || (!one_ss && !opts->none && !opts->name))
		for_each_subsys(ss, i)
			if (!ss->disabled)
				opts->subsys_mask |= (1 << i);

	/*
	 * We either have to specify by name or by subsystems. (So all
	 * empty hierarchies must have a name).
	 */
	if (!opts->subsys_mask && !opts->name)
		return -EINVAL;

1502 1503 1504 1505 1506
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
1507
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1508 1509
		return -EINVAL;

1510
	/* Can't specify "none" and some subsystems */
1511
	if (opts->subsys_mask && opts->none)
1512 1513
		return -EINVAL;

1514 1515 1516
	return 0;
}

T
Tejun Heo 已提交
1517
static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1518 1519
{
	int ret = 0;
1520
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1521
	struct cgroup_sb_opts opts;
1522
	unsigned long added_mask, removed_mask;
1523

1524 1525
	if (root == &cgrp_dfl_root) {
		pr_err("remount is not allowed\n");
1526 1527 1528
		return -EINVAL;
	}

1529 1530 1531 1532 1533 1534 1535
	mutex_lock(&cgroup_mutex);

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

1536
	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1537
		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1538
			task_tgid_nr(current), current->comm);
1539

1540 1541
	added_mask = opts.subsys_mask & ~root->subsys_mask;
	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1542

B
Ben Blum 已提交
1543
	/* Don't allow flags or name to change at remount */
T
Tejun Heo 已提交
1544
	if ((opts.flags ^ root->flags) ||
B
Ben Blum 已提交
1545
	    (opts.name && strcmp(opts.name, root->name))) {
1546
		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
T
Tejun Heo 已提交
1547
		       opts.flags, opts.name ?: "", root->flags, root->name);
1548 1549 1550 1551
		ret = -EINVAL;
		goto out_unlock;
	}

1552
	/* remounting is not allowed for populated hierarchies */
1553
	if (!list_empty(&root->cgrp.self.children)) {
1554
		ret = -EBUSY;
1555
		goto out_unlock;
B
Ben Blum 已提交
1556
	}
1557

1558
	ret = rebind_subsystems(root, added_mask);
1559
	if (ret)
1560
		goto out_unlock;
1561

1562
	rebind_subsystems(&cgrp_dfl_root, removed_mask);
1563

1564 1565
	if (opts.release_agent) {
		spin_lock(&release_agent_path_lock);
1566
		strcpy(root->release_agent_path, opts.release_agent);
1567 1568
		spin_unlock(&release_agent_path_lock);
	}
1569
 out_unlock:
1570
	kfree(opts.release_agent);
1571
	kfree(opts.name);
1572 1573 1574 1575
	mutex_unlock(&cgroup_mutex);
	return ret;
}

1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
/*
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
 * words after the first mount.
 */
static bool use_task_css_set_links __read_mostly;

static void cgroup_enable_task_cg_lists(void)
{
	struct task_struct *p, *g;

1588
	down_write(&css_set_rwsem);
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610

	if (use_task_css_set_links)
		goto out_unlock;

	use_task_css_set_links = true;

	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
	do_each_thread(g, p) {
		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
			     task_css_set(p) != &init_css_set);

		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
1611 1612
		 * Do it while holding siglock so that we don't end up
		 * racing against cgroup_exit().
1613
		 */
1614
		spin_lock_irq(&p->sighand->siglock);
1615 1616 1617 1618 1619 1620
		if (!(p->flags & PF_EXITING)) {
			struct css_set *cset = task_css_set(p);

			list_add(&p->cg_list, &cset->tasks);
			get_css_set(cset);
		}
1621
		spin_unlock_irq(&p->sighand->siglock);
1622 1623 1624
	} while_each_thread(g, p);
	read_unlock(&tasklist_lock);
out_unlock:
1625
	up_write(&css_set_rwsem);
1626
}
1627

1628 1629
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
T
Tejun Heo 已提交
1630 1631 1632
	struct cgroup_subsys *ss;
	int ssid;

1633 1634
	INIT_LIST_HEAD(&cgrp->self.sibling);
	INIT_LIST_HEAD(&cgrp->self.children);
1635
	INIT_LIST_HEAD(&cgrp->cset_links);
1636 1637
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
1638
	cgrp->self.cgroup = cgrp;
1639
	cgrp->self.flags |= CSS_ONLINE;
T
Tejun Heo 已提交
1640 1641 1642

	for_each_subsys(ss, ssid)
		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1643 1644

	init_waitqueue_head(&cgrp->offline_waitq);
1645
	INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
1646
}
1647

1648
static void init_cgroup_root(struct cgroup_root *root,
1649
			     struct cgroup_sb_opts *opts)
1650
{
1651
	struct cgroup *cgrp = &root->cgrp;
1652

1653
	INIT_LIST_HEAD(&root->root_list);
1654
	atomic_set(&root->nr_cgrps, 1);
1655
	cgrp->root = root;
1656
	init_cgroup_housekeeping(cgrp);
1657
	idr_init(&root->cgroup_idr);
1658 1659 1660 1661 1662 1663

	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
1664
	if (opts->cpuset_clone_children)
1665
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1666 1667
}

1668
static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
1669
{
1670
	LIST_HEAD(tmp_links);
1671
	struct cgroup *root_cgrp = &root->cgrp;
1672
	struct cftype *base_files;
1673 1674
	struct css_set *cset;
	int i, ret;
1675

1676
	lockdep_assert_held(&cgroup_mutex);
1677

V
Vladimir Davydov 已提交
1678
	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
1679
	if (ret < 0)
T
Tejun Heo 已提交
1680
		goto out;
1681
	root_cgrp->id = ret;
1682

1683 1684
	ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
			      GFP_KERNEL);
1685 1686 1687
	if (ret)
		goto out;

1688
	/*
1689
	 * We're accessing css_set_count without locking css_set_rwsem here,
1690 1691 1692 1693 1694 1695
	 * but that's OK - it can only be increased by someone holding
	 * cgroup_lock, and that's us. The worst that can happen is that we
	 * have some link structures left over
	 */
	ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
	if (ret)
1696
		goto cancel_ref;
1697

1698
	ret = cgroup_init_root_id(root);
1699
	if (ret)
1700
		goto cancel_ref;
1701

T
Tejun Heo 已提交
1702 1703 1704 1705 1706 1707 1708 1709
	root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
					   KERNFS_ROOT_CREATE_DEACTIVATED,
					   root_cgrp);
	if (IS_ERR(root->kf_root)) {
		ret = PTR_ERR(root->kf_root);
		goto exit_root_id;
	}
	root_cgrp->kn = root->kf_root->kn;
1710

1711 1712 1713 1714 1715 1716
	if (root == &cgrp_dfl_root)
		base_files = cgroup_dfl_base_files;
	else
		base_files = cgroup_legacy_base_files;

	ret = cgroup_addrm_files(root_cgrp, base_files, true);
1717
	if (ret)
T
Tejun Heo 已提交
1718
		goto destroy_root;
1719

1720
	ret = rebind_subsystems(root, ss_mask);
1721
	if (ret)
T
Tejun Heo 已提交
1722
		goto destroy_root;
1723

1724 1725 1726 1727 1728 1729 1730
	/*
	 * There must be no failure case after here, since rebinding takes
	 * care of subsystems' refcounts, which are explicitly dropped in
	 * the failure exit path.
	 */
	list_add(&root->root_list, &cgroup_roots);
	cgroup_root_count++;
A
Al Viro 已提交
1731

1732
	/*
1733
	 * Link the root cgroup in this hierarchy into all the css_set
1734 1735
	 * objects.
	 */
1736
	down_write(&css_set_rwsem);
1737 1738
	hash_for_each(css_set_table, i, cset, hlist)
		link_css_set(&tmp_links, cset, root_cgrp);
1739
	up_write(&css_set_rwsem);
1740

1741
	BUG_ON(!list_empty(&root_cgrp->self.children));
1742
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1743

T
Tejun Heo 已提交
1744
	kernfs_activate(root_cgrp->kn);
1745
	ret = 0;
T
Tejun Heo 已提交
1746
	goto out;
1747

T
Tejun Heo 已提交
1748 1749 1750 1751
destroy_root:
	kernfs_destroy_root(root->kf_root);
	root->kf_root = NULL;
exit_root_id:
1752
	cgroup_exit_root_id(root);
1753
cancel_ref:
1754
	percpu_ref_exit(&root_cgrp->self.refcnt);
T
Tejun Heo 已提交
1755
out:
1756 1757
	free_cgrp_cset_links(&tmp_links);
	return ret;
1758 1759
}

A
Al Viro 已提交
1760
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1761
			 int flags, const char *unused_dev_name,
A
Al Viro 已提交
1762
			 void *data)
1763
{
1764
	struct super_block *pinned_sb = NULL;
1765
	struct cgroup_subsys *ss;
1766
	struct cgroup_root *root;
1767
	struct cgroup_sb_opts opts;
T
Tejun Heo 已提交
1768
	struct dentry *dentry;
1769
	int ret;
1770
	int i;
L
Li Zefan 已提交
1771
	bool new_sb;
1772

1773 1774 1775 1776 1777 1778
	/*
	 * The first time anyone tries to mount a cgroup, enable the list
	 * linking each css_set to its tasks and fix up all existing tasks.
	 */
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();
1779

B
Ben Blum 已提交
1780
	mutex_lock(&cgroup_mutex);
1781 1782

	/* First find the desired set of subsystems */
1783
	ret = parse_cgroupfs_options(data, &opts);
1784
	if (ret)
1785
		goto out_unlock;
1786

T
Tejun Heo 已提交
1787
	/* look for a matching existing root */
1788
	if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
T
Tejun Heo 已提交
1789 1790 1791 1792 1793
		cgrp_dfl_root_visible = true;
		root = &cgrp_dfl_root;
		cgroup_get(&root->cgrp);
		ret = 0;
		goto out_unlock;
1794 1795
	}

1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
	/*
	 * Destruction of cgroup root is asynchronous, so subsystems may
	 * still be dying after the previous unmount.  Let's drain the
	 * dying subsystems.  We just need to ensure that the ones
	 * unmounted previously finish dying and don't care about new ones
	 * starting.  Testing ref liveliness is good enough.
	 */
	for_each_subsys(ss, i) {
		if (!(opts.subsys_mask & (1 << i)) ||
		    ss->root == &cgrp_dfl_root)
			continue;

		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
			mutex_unlock(&cgroup_mutex);
			msleep(10);
			ret = restart_syscall();
			goto out_free;
		}
		cgroup_put(&ss->root->cgrp);
	}

1817
	for_each_root(root) {
T
Tejun Heo 已提交
1818
		bool name_match = false;
1819

1820
		if (root == &cgrp_dfl_root)
1821
			continue;
1822

B
Ben Blum 已提交
1823
		/*
T
Tejun Heo 已提交
1824 1825 1826
		 * If we asked for a name then it must match.  Also, if
		 * name matches but sybsys_mask doesn't, we should fail.
		 * Remember whether name matched.
B
Ben Blum 已提交
1827
		 */
T
Tejun Heo 已提交
1828 1829 1830 1831 1832
		if (opts.name) {
			if (strcmp(opts.name, root->name))
				continue;
			name_match = true;
		}
1833

1834
		/*
T
Tejun Heo 已提交
1835 1836
		 * If we asked for subsystems (or explicitly for no
		 * subsystems) then they must match.
1837
		 */
T
Tejun Heo 已提交
1838
		if ((opts.subsys_mask || opts.none) &&
1839
		    (opts.subsys_mask != root->subsys_mask)) {
T
Tejun Heo 已提交
1840 1841 1842 1843 1844
			if (!name_match)
				continue;
			ret = -EBUSY;
			goto out_unlock;
		}
1845

1846 1847
		if (root->flags ^ opts.flags)
			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1848

T
Tejun Heo 已提交
1849
		/*
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
		 * We want to reuse @root whose lifetime is governed by its
		 * ->cgrp.  Let's check whether @root is alive and keep it
		 * that way.  As cgroup_kill_sb() can happen anytime, we
		 * want to block it by pinning the sb so that @root doesn't
		 * get killed before mount is complete.
		 *
		 * With the sb pinned, tryget_live can reliably indicate
		 * whether @root can be reused.  If it's being killed,
		 * drain it.  We can use wait_queue for the wait but this
		 * path is super cold.  Let's just sleep a bit and retry.
T
Tejun Heo 已提交
1860
		 */
1861 1862 1863
		pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
		if (IS_ERR(pinned_sb) ||
		    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
T
Tejun Heo 已提交
1864
			mutex_unlock(&cgroup_mutex);
1865 1866
			if (!IS_ERR_OR_NULL(pinned_sb))
				deactivate_super(pinned_sb);
T
Tejun Heo 已提交
1867
			msleep(10);
1868 1869
			ret = restart_syscall();
			goto out_free;
T
Tejun Heo 已提交
1870
		}
1871

T
Tejun Heo 已提交
1872
		ret = 0;
T
Tejun Heo 已提交
1873
		goto out_unlock;
1874 1875
	}

1876
	/*
1877 1878 1879
	 * No such thing, create a new one.  name= matching without subsys
	 * specification is allowed for already existing hierarchies but we
	 * can't create new one without subsys specification.
1880
	 */
1881 1882 1883
	if (!opts.subsys_mask && !opts.none) {
		ret = -EINVAL;
		goto out_unlock;
1884 1885
	}

1886 1887 1888
	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root) {
		ret = -ENOMEM;
T
Tejun Heo 已提交
1889
		goto out_unlock;
1890
	}
1891

1892 1893
	init_cgroup_root(root, &opts);

T
Tejun Heo 已提交
1894
	ret = cgroup_setup_root(root, opts.subsys_mask);
T
Tejun Heo 已提交
1895 1896
	if (ret)
		cgroup_free_root(root);
1897

1898
out_unlock:
1899
	mutex_unlock(&cgroup_mutex);
1900
out_free:
1901 1902
	kfree(opts.release_agent);
	kfree(opts.name);
A
Aristeu Rozanski 已提交
1903

T
Tejun Heo 已提交
1904
	if (ret)
1905
		return ERR_PTR(ret);
T
Tejun Heo 已提交
1906

1907 1908
	dentry = kernfs_mount(fs_type, flags, root->kf_root,
				CGROUP_SUPER_MAGIC, &new_sb);
L
Li Zefan 已提交
1909
	if (IS_ERR(dentry) || !new_sb)
1910
		cgroup_put(&root->cgrp);
1911 1912 1913 1914 1915 1916 1917 1918 1919 1920

	/*
	 * If @pinned_sb, we're reusing an existing root and holding an
	 * extra ref on its sb.  Mount is complete.  Put the extra ref.
	 */
	if (pinned_sb) {
		WARN_ON(new_sb);
		deactivate_super(pinned_sb);
	}

T
Tejun Heo 已提交
1921 1922 1923 1924 1925 1926
	return dentry;
}

static void cgroup_kill_sb(struct super_block *sb)
{
	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
1927
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
T
Tejun Heo 已提交
1928

1929 1930 1931 1932
	/*
	 * If @root doesn't have any mounts or children, start killing it.
	 * This prevents new mounts by disabling percpu_ref_tryget_live().
	 * cgroup_mount() may wait for @root's release.
1933 1934
	 *
	 * And don't kill the default root.
1935
	 */
1936
	if (!list_empty(&root->cgrp.self.children) ||
1937
	    root == &cgrp_dfl_root)
1938 1939 1940 1941
		cgroup_put(&root->cgrp);
	else
		percpu_ref_kill(&root->cgrp.self.refcnt);

T
Tejun Heo 已提交
1942
	kernfs_kill_sb(sb);
1943 1944 1945 1946
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
A
Al Viro 已提交
1947
	.mount = cgroup_mount,
1948 1949 1950
	.kill_sb = cgroup_kill_sb,
};

1951
/**
1952
 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1953 1954 1955 1956
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
1957 1958 1959 1960 1961
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
T
Tejun Heo 已提交
1962
 * Return value is the same as kernfs_path().
1963
 */
T
Tejun Heo 已提交
1964
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1965
{
1966
	struct cgroup_root *root;
1967
	struct cgroup *cgrp;
T
Tejun Heo 已提交
1968 1969
	int hierarchy_id = 1;
	char *path = NULL;
1970 1971

	mutex_lock(&cgroup_mutex);
1972
	down_read(&css_set_rwsem);
1973

1974 1975
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

1976 1977
	if (root) {
		cgrp = task_cgroup_from_root(task, root);
T
Tejun Heo 已提交
1978
		path = cgroup_path(cgrp, buf, buflen);
1979 1980
	} else {
		/* if no hierarchy exists, everyone is in "/" */
T
Tejun Heo 已提交
1981 1982
		if (strlcpy(buf, "/", buflen) < buflen)
			path = buf;
1983 1984
	}

1985
	up_read(&css_set_rwsem);
1986
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
1987
	return path;
1988
}
1989
EXPORT_SYMBOL_GPL(task_cgroup_path);
1990

1991
/* used to track tasks and other necessary states during migration */
1992
struct cgroup_taskset {
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
	/* the src and dst cset list running through cset->mg_node */
	struct list_head	src_csets;
	struct list_head	dst_csets;

	/*
	 * Fields for cgroup_taskset_*() iteration.
	 *
	 * Before migration is committed, the target migration tasks are on
	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
	 * or ->dst_csets depending on whether migration is committed.
	 *
	 * ->cur_csets and ->cur_task point to the current task position
	 * during iteration.
	 */
	struct list_head	*csets;
	struct css_set		*cur_cset;
	struct task_struct	*cur_task;
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
};

/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
 *
 * @tset iteration is initialized and the first task is returned.
 */
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
{
2021 2022 2023 2024
	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
	tset->cur_task = NULL;

	return cgroup_taskset_next(tset);
2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035
}

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
{
2036 2037
	struct css_set *cset = tset->cur_cset;
	struct task_struct *task = tset->cur_task;
2038

2039 2040 2041 2042 2043 2044
	while (&cset->mg_node != tset->csets) {
		if (!task)
			task = list_first_entry(&cset->mg_tasks,
						struct task_struct, cg_list);
		else
			task = list_next_entry(task, cg_list);
2045

2046 2047 2048 2049 2050
		if (&task->cg_list != &cset->mg_tasks) {
			tset->cur_cset = cset;
			tset->cur_task = task;
			return task;
		}
2051

2052 2053 2054
		cset = list_next_entry(cset, mg_node);
		task = NULL;
	}
2055

2056
	return NULL;
2057 2058
}

2059
/**
B
Ben Blum 已提交
2060
 * cgroup_task_migrate - move a task from one cgroup to another.
2061
 * @old_cgrp: the cgroup @tsk is being migrated from
2062 2063
 * @tsk: the task being migrated
 * @new_cset: the new css_set @tsk is being attached to
B
Ben Blum 已提交
2064
 *
2065
 * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
B
Ben Blum 已提交
2066
 */
2067 2068 2069
static void cgroup_task_migrate(struct cgroup *old_cgrp,
				struct task_struct *tsk,
				struct css_set *new_cset)
B
Ben Blum 已提交
2070
{
2071
	struct css_set *old_cset;
B
Ben Blum 已提交
2072

2073 2074 2075
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

B
Ben Blum 已提交
2076
	/*
2077 2078 2079
	 * We are synchronized through cgroup_threadgroup_rwsem against
	 * PF_EXITING setting such that we can't race against cgroup_exit()
	 * changing the css_set to init_css_set and dropping the old one.
B
Ben Blum 已提交
2080
	 */
2081
	WARN_ON_ONCE(tsk->flags & PF_EXITING);
2082
	old_cset = task_css_set(tsk);
B
Ben Blum 已提交
2083

2084
	get_css_set(new_cset);
2085
	rcu_assign_pointer(tsk->cgroups, new_cset);
B
Ben Blum 已提交
2086

2087 2088 2089 2090 2091 2092 2093
	/*
	 * Use move_tail so that cgroup_taskset_first() still returns the
	 * leader after migration.  This works because cgroup_migrate()
	 * ensures that the dst_cset of the leader is the first on the
	 * tset's dst_csets list.
	 */
	list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
B
Ben Blum 已提交
2094 2095

	/*
2096 2097 2098
	 * We just gained a reference on old_cset by taking it from the
	 * task. As trading it for new_cset is protected by cgroup_mutex,
	 * we're safe to drop it here; it will be freed under RCU.
B
Ben Blum 已提交
2099
	 */
Z
Zefan Li 已提交
2100
	put_css_set_locked(old_cset);
B
Ben Blum 已提交
2101 2102
}

L
Li Zefan 已提交
2103
/**
2104 2105
 * cgroup_migrate_finish - cleanup after attach
 * @preloaded_csets: list of preloaded css_sets
B
Ben Blum 已提交
2106
 *
2107 2108
 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
 * those functions for details.
B
Ben Blum 已提交
2109
 */
2110
static void cgroup_migrate_finish(struct list_head *preloaded_csets)
B
Ben Blum 已提交
2111
{
2112
	struct css_set *cset, *tmp_cset;
B
Ben Blum 已提交
2113

2114 2115 2116 2117 2118 2119 2120
	lockdep_assert_held(&cgroup_mutex);

	down_write(&css_set_rwsem);
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
		cset->mg_src_cgrp = NULL;
		cset->mg_dst_cset = NULL;
		list_del_init(&cset->mg_preload_node);
Z
Zefan Li 已提交
2121
		put_css_set_locked(cset);
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
	}
	up_write(&css_set_rwsem);
}

/**
 * cgroup_migrate_add_src - add a migration source css_set
 * @src_cset: the source css_set to add
 * @dst_cgrp: the destination cgroup
 * @preloaded_csets: list of preloaded css_sets
 *
 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
 * @src_cset and add it to @preloaded_csets, which should later be cleaned
 * up by cgroup_migrate_finish().
 *
2136 2137 2138 2139 2140
 * This function may be called without holding cgroup_threadgroup_rwsem
 * even if the target is a process.  Threads may be created and destroyed
 * but as long as cgroup_mutex is not dropped, no new css_set can be put
 * into play and the preloaded css_sets are guaranteed to cover all
 * migrations.
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166
 */
static void cgroup_migrate_add_src(struct css_set *src_cset,
				   struct cgroup *dst_cgrp,
				   struct list_head *preloaded_csets)
{
	struct cgroup *src_cgrp;

	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);

	if (!list_empty(&src_cset->mg_preload_node))
		return;

	WARN_ON(src_cset->mg_src_cgrp);
	WARN_ON(!list_empty(&src_cset->mg_tasks));
	WARN_ON(!list_empty(&src_cset->mg_node));

	src_cset->mg_src_cgrp = src_cgrp;
	get_css_set(src_cset);
	list_add(&src_cset->mg_preload_node, preloaded_csets);
}

/**
 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2167
 * @dst_cgrp: the destination cgroup (may be %NULL)
2168 2169 2170 2171
 * @preloaded_csets: list of preloaded source css_sets
 *
 * Tasks are about to be moved to @dst_cgrp and all the source css_sets
 * have been preloaded to @preloaded_csets.  This function looks up and
2172 2173 2174
 * pins all destination css_sets, links each to its source, and append them
 * to @preloaded_csets.  If @dst_cgrp is %NULL, the destination of each
 * source css_set is assumed to be its cgroup on the default hierarchy.
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
 *
 * This function must be called after cgroup_migrate_add_src() has been
 * called on each migration source css_set.  After migration is performed
 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
 * @preloaded_csets.
 */
static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
				      struct list_head *preloaded_csets)
{
	LIST_HEAD(csets);
2185
	struct css_set *src_cset, *tmp_cset;
2186 2187 2188

	lockdep_assert_held(&cgroup_mutex);

2189 2190 2191 2192
	/*
	 * Except for the root, child_subsys_mask must be zero for a cgroup
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
2193
	if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) &&
2194 2195 2196
	    dst_cgrp->child_subsys_mask)
		return -EBUSY;

2197
	/* look up the dst cset for each src cset and link it to src */
2198
	list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
2199 2200
		struct css_set *dst_cset;

2201 2202
		dst_cset = find_css_set(src_cset,
					dst_cgrp ?: src_cset->dfl_cgrp);
2203 2204 2205 2206
		if (!dst_cset)
			goto err;

		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2207 2208 2209 2210 2211 2212 2213 2214 2215

		/*
		 * If src cset equals dst, it's noop.  Drop the src.
		 * cgroup_migrate() will skip the cset too.  Note that we
		 * can't handle src == dst as some nodes are used by both.
		 */
		if (src_cset == dst_cset) {
			src_cset->mg_src_cgrp = NULL;
			list_del_init(&src_cset->mg_preload_node);
Z
Zefan Li 已提交
2216 2217
			put_css_set(src_cset);
			put_css_set(dst_cset);
2218 2219 2220
			continue;
		}

2221 2222 2223 2224 2225
		src_cset->mg_dst_cset = dst_cset;

		if (list_empty(&dst_cset->mg_preload_node))
			list_add(&dst_cset->mg_preload_node, &csets);
		else
Z
Zefan Li 已提交
2226
			put_css_set(dst_cset);
2227 2228
	}

2229
	list_splice_tail(&csets, preloaded_csets);
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
	return 0;
err:
	cgroup_migrate_finish(&csets);
	return -ENOMEM;
}

/**
 * cgroup_migrate - migrate a process or task to a cgroup
 * @cgrp: the destination cgroup
 * @leader: the leader of the process or the task to migrate
 * @threadgroup: whether @leader points to the whole process or a single task
 *
 * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
2243
 * process, the caller must be holding cgroup_threadgroup_rwsem.  The
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
 * caller is also responsible for invoking cgroup_migrate_add_src() and
 * cgroup_migrate_prepare_dst() on the targets before invoking this
 * function and following up with cgroup_migrate_finish().
 *
 * As long as a controller's ->can_attach() doesn't fail, this function is
 * guaranteed to succeed.  This means that, excluding ->can_attach()
 * failure, when migrating multiple targets, the success or failure can be
 * decided for all targets by invoking group_migrate_prepare_dst() before
 * actually starting migrating.
 */
static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
			  bool threadgroup)
B
Ben Blum 已提交
2256
{
2257 2258 2259 2260 2261
	struct cgroup_taskset tset = {
		.src_csets	= LIST_HEAD_INIT(tset.src_csets),
		.dst_csets	= LIST_HEAD_INIT(tset.dst_csets),
		.csets		= &tset.src_csets,
	};
T
Tejun Heo 已提交
2262
	struct cgroup_subsys_state *css, *failed_css = NULL;
2263 2264 2265
	struct css_set *cset, *tmp_cset;
	struct task_struct *task, *tmp_task;
	int i, ret;
B
Ben Blum 已提交
2266

2267 2268 2269 2270 2271
	/*
	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 */
2272
	down_write(&css_set_rwsem);
2273
	rcu_read_lock();
2274
	task = leader;
B
Ben Blum 已提交
2275
	do {
2276 2277
		/* @task either already exited or can't exit until the end */
		if (task->flags & PF_EXITING)
2278
			goto next;
2279

2280 2281
		/* leave @task alone if post_fork() hasn't linked it yet */
		if (list_empty(&task->cg_list))
2282
			goto next;
2283

2284
		cset = task_css_set(task);
2285
		if (!cset->mg_src_cgrp)
2286
			goto next;
2287

2288
		/*
2289 2290
		 * cgroup_taskset_first() must always return the leader.
		 * Take care to avoid disturbing the ordering.
2291
		 */
2292 2293 2294 2295 2296 2297
		list_move_tail(&task->cg_list, &cset->mg_tasks);
		if (list_empty(&cset->mg_node))
			list_add_tail(&cset->mg_node, &tset.src_csets);
		if (list_empty(&cset->mg_dst_cset->mg_node))
			list_move_tail(&cset->mg_dst_cset->mg_node,
				       &tset.dst_csets);
2298
	next:
2299 2300
		if (!threadgroup)
			break;
2301
	} while_each_thread(leader, task);
2302
	rcu_read_unlock();
2303
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
2304

2305
	/* methods shouldn't be called if no task is actually migrating */
2306 2307
	if (list_empty(&tset.src_csets))
		return 0;
2308

2309
	/* check that we can legitimately attach to the cgroup */
2310
	for_each_e_css(css, i, cgrp) {
T
Tejun Heo 已提交
2311
		if (css->ss->can_attach) {
2312 2313
			ret = css->ss->can_attach(css, &tset);
			if (ret) {
T
Tejun Heo 已提交
2314
				failed_css = css;
B
Ben Blum 已提交
2315 2316 2317 2318 2319 2320
				goto out_cancel_attach;
			}
		}
	}

	/*
2321 2322 2323
	 * Now that we're guaranteed success, proceed to move all tasks to
	 * the new cgroup.  There are no failure cases after here, so this
	 * is the commit point.
B
Ben Blum 已提交
2324
	 */
2325
	down_write(&css_set_rwsem);
2326 2327 2328 2329
	list_for_each_entry(cset, &tset.src_csets, mg_node) {
		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
			cgroup_task_migrate(cset->mg_src_cgrp, task,
					    cset->mg_dst_cset);
B
Ben Blum 已提交
2330
	}
2331
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
2332 2333

	/*
2334 2335 2336
	 * Migration is committed, all target tasks are now on dst_csets.
	 * Nothing is sensitive to fork() after this point.  Notify
	 * controllers that migration is complete.
B
Ben Blum 已提交
2337
	 */
2338
	tset.csets = &tset.dst_csets;
B
Ben Blum 已提交
2339

2340
	for_each_e_css(css, i, cgrp)
T
Tejun Heo 已提交
2341 2342
		if (css->ss->attach)
			css->ss->attach(css, &tset);
B
Ben Blum 已提交
2343

2344
	ret = 0;
2345 2346
	goto out_release_tset;

B
Ben Blum 已提交
2347
out_cancel_attach:
2348
	for_each_e_css(css, i, cgrp) {
2349 2350 2351 2352
		if (css == failed_css)
			break;
		if (css->ss->cancel_attach)
			css->ss->cancel_attach(css, &tset);
B
Ben Blum 已提交
2353
	}
2354 2355 2356 2357
out_release_tset:
	down_write(&css_set_rwsem);
	list_splice_init(&tset.dst_csets, &tset.src_csets);
	list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
2358
		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2359 2360 2361
		list_del_init(&cset->mg_node);
	}
	up_write(&css_set_rwsem);
2362
	return ret;
B
Ben Blum 已提交
2363 2364
}

2365 2366 2367 2368 2369 2370
/**
 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
 * @dst_cgrp: the cgroup to attach to
 * @leader: the task or the leader of the threadgroup to be attached
 * @threadgroup: attach the whole threadgroup?
 *
2371
 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399
 */
static int cgroup_attach_task(struct cgroup *dst_cgrp,
			      struct task_struct *leader, bool threadgroup)
{
	LIST_HEAD(preloaded_csets);
	struct task_struct *task;
	int ret;

	/* look up all src csets */
	down_read(&css_set_rwsem);
	rcu_read_lock();
	task = leader;
	do {
		cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
				       &preloaded_csets);
		if (!threadgroup)
			break;
	} while_each_thread(leader, task);
	rcu_read_unlock();
	up_read(&css_set_rwsem);

	/* prepare dst csets and commit */
	ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
	if (!ret)
		ret = cgroup_migrate(dst_cgrp, leader, threadgroup);

	cgroup_migrate_finish(&preloaded_csets);
	return ret;
B
Ben Blum 已提交
2400 2401
}

2402 2403 2404
static int cgroup_procs_write_permission(struct task_struct *task,
					 struct cgroup *dst_cgrp,
					 struct kernfs_open_file *of)
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
{
	const struct cred *cred = current_cred();
	const struct cred *tcred = get_task_cred(task);
	int ret = 0;

	/*
	 * even if we're attaching all tasks in the thread group, we only
	 * need to check permissions on one of them.
	 */
	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
	    !uid_eq(cred->euid, tcred->uid) &&
	    !uid_eq(cred->euid, tcred->suid))
		ret = -EACCES;

2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438
	if (!ret && cgroup_on_dfl(dst_cgrp)) {
		struct super_block *sb = of->file->f_path.dentry->d_sb;
		struct cgroup *cgrp;
		struct inode *inode;

		down_read(&css_set_rwsem);
		cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
		up_read(&css_set_rwsem);

		while (!cgroup_is_descendant(dst_cgrp, cgrp))
			cgrp = cgroup_parent(cgrp);

		ret = -ENOMEM;
		inode = kernfs_get_inode(sb, cgrp->procs_kn);
		if (inode) {
			ret = inode_permission(inode, MAY_WRITE);
			iput(inode);
		}
	}

2439 2440 2441 2442
	put_cred(tcred);
	return ret;
}

B
Ben Blum 已提交
2443 2444
/*
 * Find the task_struct of the task to attach by vpid and pass it along to the
2445
 * function to attach either it or all tasks in its threadgroup. Will lock
2446
 * cgroup_mutex and threadgroup.
2447
 */
2448 2449
static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
				    size_t nbytes, loff_t off, bool threadgroup)
2450 2451
{
	struct task_struct *tsk;
2452
	struct cgroup *cgrp;
2453
	pid_t pid;
2454 2455
	int ret;

2456 2457 2458
	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
		return -EINVAL;

2459 2460
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
B
Ben Blum 已提交
2461 2462
		return -ENODEV;

T
Tejun Heo 已提交
2463
	percpu_down_write(&cgroup_threadgroup_rwsem);
2464
	rcu_read_lock();
2465
	if (pid) {
2466
		tsk = find_task_by_vpid(pid);
B
Ben Blum 已提交
2467
		if (!tsk) {
S
SeongJae Park 已提交
2468
			ret = -ESRCH;
T
Tejun Heo 已提交
2469
			goto out_unlock_rcu;
2470
		}
2471
	} else {
2472
		tsk = current;
2473
	}
2474 2475

	if (threadgroup)
2476
		tsk = tsk->group_leader;
2477 2478

	/*
2479
	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2480 2481 2482
	 * trapped in a cpuset, or RT worker may be born in a cgroup
	 * with no rt_runtime allocated.  Just say no.
	 */
2483
	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2484
		ret = -EINVAL;
T
Tejun Heo 已提交
2485
		goto out_unlock_rcu;
2486 2487
	}

2488 2489 2490
	get_task_struct(tsk);
	rcu_read_unlock();

2491
	ret = cgroup_procs_write_permission(tsk, cgrp, of);
2492 2493
	if (!ret)
		ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2494

2495
	put_task_struct(tsk);
T
Tejun Heo 已提交
2496 2497 2498 2499 2500 2501
	goto out_unlock_threadgroup;

out_unlock_rcu:
	rcu_read_unlock();
out_unlock_threadgroup:
	percpu_up_write(&cgroup_threadgroup_rwsem);
2502
	cgroup_kn_unlock(of->kn);
2503
	return ret ?: nbytes;
2504 2505
}

2506 2507 2508 2509 2510 2511 2512
/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
2513
	struct cgroup_root *root;
2514 2515
	int retval = 0;

T
Tejun Heo 已提交
2516
	mutex_lock(&cgroup_mutex);
2517
	for_each_root(root) {
2518 2519
		struct cgroup *from_cgrp;

2520
		if (root == &cgrp_dfl_root)
2521 2522
			continue;

2523 2524 2525
		down_read(&css_set_rwsem);
		from_cgrp = task_cgroup_from_root(from, root);
		up_read(&css_set_rwsem);
2526

L
Li Zefan 已提交
2527
		retval = cgroup_attach_task(from_cgrp, tsk, false);
2528 2529 2530
		if (retval)
			break;
	}
T
Tejun Heo 已提交
2531
	mutex_unlock(&cgroup_mutex);
2532 2533 2534 2535 2536

	return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);

2537 2538
static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
B
Ben Blum 已提交
2539
{
2540
	return __cgroup_procs_write(of, buf, nbytes, off, false);
B
Ben Blum 已提交
2541 2542
}

2543 2544
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
2545
{
2546
	return __cgroup_procs_write(of, buf, nbytes, off, true);
2547 2548
}

2549 2550
static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
					  char *buf, size_t nbytes, loff_t off)
2551
{
2552
	struct cgroup *cgrp;
2553

2554
	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
2555

2556 2557
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
2558
		return -ENODEV;
2559
	spin_lock(&release_agent_path_lock);
2560 2561
	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
		sizeof(cgrp->root->release_agent_path));
2562
	spin_unlock(&release_agent_path_lock);
2563
	cgroup_kn_unlock(of->kn);
2564
	return nbytes;
2565 2566
}

2567
static int cgroup_release_agent_show(struct seq_file *seq, void *v)
2568
{
2569
	struct cgroup *cgrp = seq_css(seq)->cgroup;
2570

2571
	spin_lock(&release_agent_path_lock);
2572
	seq_puts(seq, cgrp->root->release_agent_path);
2573
	spin_unlock(&release_agent_path_lock);
2574 2575 2576 2577
	seq_putc(seq, '\n');
	return 0;
}

2578
static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
2579
{
2580
	seq_puts(seq, "0\n");
2581 2582 2583
	return 0;
}

2584
static void cgroup_print_ss_mask(struct seq_file *seq, unsigned long ss_mask)
2585
{
2586 2587 2588
	struct cgroup_subsys *ss;
	bool printed = false;
	int ssid;
2589

2590 2591 2592 2593 2594
	for_each_subsys_which(ss, ssid, &ss_mask) {
		if (printed)
			seq_putc(seq, ' ');
		seq_printf(seq, "%s", ss->name);
		printed = true;
2595
	}
2596 2597
	if (printed)
		seq_putc(seq, '\n');
2598 2599
}

2600 2601
/* show controllers which are currently attached to the default hierarchy */
static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
2602
{
2603 2604
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2605 2606
	cgroup_print_ss_mask(seq, cgrp->root->subsys_mask &
			     ~cgrp_dfl_root_inhibit_ss_mask);
2607
	return 0;
2608 2609
}

2610 2611
/* show controllers which are enabled from the parent */
static int cgroup_controllers_show(struct seq_file *seq, void *v)
2612
{
2613 2614
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2615
	cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
2616
	return 0;
2617 2618
}

2619 2620
/* show controllers which are enabled for a given cgroup's children */
static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2621
{
2622 2623
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2624
	cgroup_print_ss_mask(seq, cgrp->subtree_control);
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
	return 0;
}

/**
 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
 * @cgrp: root of the subtree to update csses for
 *
 * @cgrp's child_subsys_mask has changed and its subtree's (self excluded)
 * css associations need to be updated accordingly.  This function looks up
 * all css_sets which are attached to the subtree, creates the matching
 * updated css_sets and migrates the tasks to the new ones.
 */
static int cgroup_update_dfl_csses(struct cgroup *cgrp)
{
	LIST_HEAD(preloaded_csets);
	struct cgroup_subsys_state *css;
	struct css_set *src_cset;
	int ret;

	lockdep_assert_held(&cgroup_mutex);

T
Tejun Heo 已提交
2646 2647
	percpu_down_write(&cgroup_threadgroup_rwsem);

2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713
	/* look up all csses currently attached to @cgrp's subtree */
	down_read(&css_set_rwsem);
	css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
		struct cgrp_cset_link *link;

		/* self is not affected by child_subsys_mask change */
		if (css->cgroup == cgrp)
			continue;

		list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
			cgroup_migrate_add_src(link->cset, cgrp,
					       &preloaded_csets);
	}
	up_read(&css_set_rwsem);

	/* NULL dst indicates self on default hierarchy */
	ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
	if (ret)
		goto out_finish;

	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
		struct task_struct *last_task = NULL, *task;

		/* src_csets precede dst_csets, break on the first dst_cset */
		if (!src_cset->mg_src_cgrp)
			break;

		/*
		 * All tasks in src_cset need to be migrated to the
		 * matching dst_cset.  Empty it process by process.  We
		 * walk tasks but migrate processes.  The leader might even
		 * belong to a different cset but such src_cset would also
		 * be among the target src_csets because the default
		 * hierarchy enforces per-process membership.
		 */
		while (true) {
			down_read(&css_set_rwsem);
			task = list_first_entry_or_null(&src_cset->tasks,
						struct task_struct, cg_list);
			if (task) {
				task = task->group_leader;
				WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp);
				get_task_struct(task);
			}
			up_read(&css_set_rwsem);

			if (!task)
				break;

			/* guard against possible infinite loop */
			if (WARN(last_task == task,
				 "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n"))
				goto out_finish;
			last_task = task;

			ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);

			put_task_struct(task);

			if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
				goto out_finish;
		}
	}

out_finish:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
2714
	percpu_up_write(&cgroup_threadgroup_rwsem);
2715 2716 2717 2718
	return ret;
}

/* change the enabled child controllers for a cgroup in the default hierarchy */
2719 2720 2721
static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
2722
{
2723 2724
	unsigned long enable = 0, disable = 0;
	unsigned long css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
2725
	struct cgroup *cgrp, *child;
2726
	struct cgroup_subsys *ss;
2727
	char *tok;
2728 2729 2730
	int ssid, ret;

	/*
2731 2732
	 * Parse input - space separated list of subsystem names prefixed
	 * with either + or -.
2733
	 */
2734 2735
	buf = strstrip(buf);
	while ((tok = strsep(&buf, " "))) {
2736 2737
		unsigned long tmp_ss_mask = ~cgrp_dfl_root_inhibit_ss_mask;

2738 2739
		if (tok[0] == '\0')
			continue;
2740 2741
		for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
			if (ss->disabled || strcmp(tok + 1, ss->name))
2742 2743 2744
				continue;

			if (*tok == '+') {
2745 2746
				enable |= 1 << ssid;
				disable &= ~(1 << ssid);
2747
			} else if (*tok == '-') {
2748 2749
				disable |= 1 << ssid;
				enable &= ~(1 << ssid);
2750 2751 2752 2753 2754 2755 2756 2757 2758
			} else {
				return -EINVAL;
			}
			break;
		}
		if (ssid == CGROUP_SUBSYS_COUNT)
			return -EINVAL;
	}

2759 2760 2761
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
		return -ENODEV;
2762 2763 2764

	for_each_subsys(ss, ssid) {
		if (enable & (1 << ssid)) {
2765
			if (cgrp->subtree_control & (1 << ssid)) {
2766 2767 2768 2769
				enable &= ~(1 << ssid);
				continue;
			}

2770 2771 2772
			/* unavailable or not enabled on the parent? */
			if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
			    (cgroup_parent(cgrp) &&
2773
			     !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
2774 2775 2776
				ret = -ENOENT;
				goto out_unlock;
			}
2777
		} else if (disable & (1 << ssid)) {
2778
			if (!(cgrp->subtree_control & (1 << ssid))) {
2779 2780 2781 2782 2783 2784
				disable &= ~(1 << ssid);
				continue;
			}

			/* a child has it enabled? */
			cgroup_for_each_live_child(child, cgrp) {
2785
				if (child->subtree_control & (1 << ssid)) {
2786
					ret = -EBUSY;
2787
					goto out_unlock;
2788 2789 2790 2791 2792 2793 2794
				}
			}
		}
	}

	if (!enable && !disable) {
		ret = 0;
2795
		goto out_unlock;
2796 2797 2798
	}

	/*
2799
	 * Except for the root, subtree_control must be zero for a cgroup
2800 2801
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
2802
	if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
2803 2804 2805 2806 2807
		ret = -EBUSY;
		goto out_unlock;
	}

	/*
2808 2809 2810 2811
	 * Update subsys masks and calculate what needs to be done.  More
	 * subsystems than specified may need to be enabled or disabled
	 * depending on subsystem dependencies.
	 */
2812 2813 2814 2815
	old_sc = cgrp->subtree_control;
	old_ss = cgrp->child_subsys_mask;
	new_sc = (old_sc | enable) & ~disable;
	new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc);
2816

2817 2818
	css_enable = ~old_ss & new_ss;
	css_disable = old_ss & ~new_ss;
2819 2820
	enable |= css_enable;
	disable |= css_disable;
2821

2822 2823 2824 2825 2826 2827
	/*
	 * Because css offlining is asynchronous, userland might try to
	 * re-enable the same controller while the previous instance is
	 * still around.  In such cases, wait till it's gone using
	 * offline_waitq.
	 */
2828
	for_each_subsys_which(ss, ssid, &css_enable) {
2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846
		cgroup_for_each_live_child(child, cgrp) {
			DEFINE_WAIT(wait);

			if (!cgroup_css(child, ss))
				continue;

			cgroup_get(child);
			prepare_to_wait(&child->offline_waitq, &wait,
					TASK_UNINTERRUPTIBLE);
			cgroup_kn_unlock(of->kn);
			schedule();
			finish_wait(&child->offline_waitq, &wait);
			cgroup_put(child);

			return restart_syscall();
		}
	}

2847 2848 2849
	cgrp->subtree_control = new_sc;
	cgrp->child_subsys_mask = new_ss;

2850 2851 2852 2853 2854
	/*
	 * Create new csses or make the existing ones visible.  A css is
	 * created invisible if it's being implicitly enabled through
	 * dependency.  An invisible css is made visible when the userland
	 * explicitly enables it.
2855 2856 2857 2858 2859 2860
	 */
	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
2861 2862 2863 2864 2865
			if (css_enable & (1 << ssid))
				ret = create_css(child, ss,
					cgrp->subtree_control & (1 << ssid));
			else
				ret = cgroup_populate_dir(child, 1 << ssid);
2866 2867 2868 2869 2870
			if (ret)
				goto err_undo_css;
		}
	}

2871 2872 2873 2874 2875
	/*
	 * At this point, cgroup_e_css() results reflect the new csses
	 * making the following cgroup_update_dfl_csses() properly update
	 * css associations of all tasks in the subtree.
	 */
2876 2877 2878 2879
	ret = cgroup_update_dfl_csses(cgrp);
	if (ret)
		goto err_undo_css;

2880 2881 2882
	/*
	 * All tasks are migrated out of disabled csses.  Kill or hide
	 * them.  A css is hidden when the userland requests it to be
2883 2884 2885 2886
	 * disabled while other subsystems are still depending on it.  The
	 * css must not actively control resources and be in the vanilla
	 * state if it's made visible again later.  Controllers which may
	 * be depended upon should provide ->css_reset() for this purpose.
2887
	 */
2888 2889 2890 2891
	for_each_subsys(ss, ssid) {
		if (!(disable & (1 << ssid)))
			continue;

2892
		cgroup_for_each_live_child(child, cgrp) {
2893 2894 2895 2896 2897
			struct cgroup_subsys_state *css = cgroup_css(child, ss);

			if (css_disable & (1 << ssid)) {
				kill_css(css);
			} else {
2898
				cgroup_clear_dir(child, 1 << ssid);
2899 2900 2901
				if (ss->css_reset)
					ss->css_reset(css);
			}
2902
		}
2903 2904
	}

2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922
	/*
	 * The effective csses of all the descendants (excluding @cgrp) may
	 * have changed.  Subsystems can optionally subscribe to this event
	 * by implementing ->css_e_css_changed() which is invoked if any of
	 * the effective csses seen from the css's cgroup may have changed.
	 */
	for_each_subsys(ss, ssid) {
		struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss);
		struct cgroup_subsys_state *css;

		if (!ss->css_e_css_changed || !this_css)
			continue;

		css_for_each_descendant_pre(css, this_css)
			if (css != this_css)
				ss->css_e_css_changed(css);
	}

2923 2924 2925
	kernfs_activate(cgrp->kn);
	ret = 0;
out_unlock:
2926
	cgroup_kn_unlock(of->kn);
2927
	return ret ?: nbytes;
2928 2929

err_undo_css:
2930 2931
	cgrp->subtree_control = old_sc;
	cgrp->child_subsys_mask = old_ss;
2932 2933 2934 2935 2936 2937 2938

	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
			struct cgroup_subsys_state *css = cgroup_css(child, ss);
2939 2940 2941 2942 2943

			if (!css)
				continue;

			if (css_enable & (1 << ssid))
2944
				kill_css(css);
2945 2946
			else
				cgroup_clear_dir(child, 1 << ssid);
2947 2948 2949 2950 2951
		}
	}
	goto out_unlock;
}

2952 2953 2954 2955 2956 2957
static int cgroup_populated_show(struct seq_file *seq, void *v)
{
	seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt);
	return 0;
}

T
Tejun Heo 已提交
2958 2959
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
				 size_t nbytes, loff_t off)
2960
{
T
Tejun Heo 已提交
2961 2962 2963
	struct cgroup *cgrp = of->kn->parent->priv;
	struct cftype *cft = of->kn->priv;
	struct cgroup_subsys_state *css;
2964
	int ret;
2965

T
Tejun Heo 已提交
2966 2967 2968
	if (cft->write)
		return cft->write(of, buf, nbytes, off);

T
Tejun Heo 已提交
2969 2970 2971 2972 2973 2974 2975 2976 2977
	/*
	 * kernfs guarantees that a file isn't deleted with operations in
	 * flight, which means that the matching css is and stays alive and
	 * doesn't need to be pinned.  The RCU locking is not necessary
	 * either.  It's just for the convenience of using cgroup_css().
	 */
	rcu_read_lock();
	css = cgroup_css(cgrp, cft->ss);
	rcu_read_unlock();
2978

2979
	if (cft->write_u64) {
2980 2981 2982 2983 2984 2985 2986 2987 2988
		unsigned long long v;
		ret = kstrtoull(buf, 0, &v);
		if (!ret)
			ret = cft->write_u64(css, cft, v);
	} else if (cft->write_s64) {
		long long v;
		ret = kstrtoll(buf, 0, &v);
		if (!ret)
			ret = cft->write_s64(css, cft, v);
2989
	} else {
2990
		ret = -EINVAL;
2991
	}
T
Tejun Heo 已提交
2992

2993
	return ret ?: nbytes;
2994 2995
}

2996
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
2997
{
T
Tejun Heo 已提交
2998
	return seq_cft(seq)->seq_start(seq, ppos);
2999 3000
}

3001
static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
3002
{
T
Tejun Heo 已提交
3003
	return seq_cft(seq)->seq_next(seq, v, ppos);
3004 3005
}

3006
static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
3007
{
T
Tejun Heo 已提交
3008
	seq_cft(seq)->seq_stop(seq, v);
3009 3010
}

3011
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
3012
{
3013 3014
	struct cftype *cft = seq_cft(m);
	struct cgroup_subsys_state *css = seq_css(m);
3015

3016 3017
	if (cft->seq_show)
		return cft->seq_show(m, arg);
3018

3019
	if (cft->read_u64)
3020 3021 3022 3023 3024 3025
		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
	else if (cft->read_s64)
		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
	else
		return -EINVAL;
	return 0;
3026 3027
}

T
Tejun Heo 已提交
3028 3029 3030 3031
static struct kernfs_ops cgroup_kf_single_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_show		= cgroup_seqfile_show,
3032 3033
};

T
Tejun Heo 已提交
3034 3035 3036 3037 3038 3039 3040 3041
static struct kernfs_ops cgroup_kf_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_start		= cgroup_seqfile_start,
	.seq_next		= cgroup_seqfile_next,
	.seq_stop		= cgroup_seqfile_stop,
	.seq_show		= cgroup_seqfile_show,
};
3042 3043 3044 3045

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
T
Tejun Heo 已提交
3046 3047
static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
			 const char *new_name_str)
3048
{
T
Tejun Heo 已提交
3049
	struct cgroup *cgrp = kn->priv;
3050 3051
	int ret;

T
Tejun Heo 已提交
3052
	if (kernfs_type(kn) != KERNFS_DIR)
3053
		return -ENOTDIR;
T
Tejun Heo 已提交
3054
	if (kn->parent != new_parent)
3055
		return -EIO;
3056

3057 3058
	/*
	 * This isn't a proper migration and its usefulness is very
3059
	 * limited.  Disallow on the default hierarchy.
3060
	 */
3061
	if (cgroup_on_dfl(cgrp))
3062
		return -EPERM;
L
Li Zefan 已提交
3063

3064
	/*
T
Tejun Heo 已提交
3065
	 * We're gonna grab cgroup_mutex which nests outside kernfs
3066
	 * active_ref.  kernfs_rename() doesn't require active_ref
T
Tejun Heo 已提交
3067
	 * protection.  Break them before grabbing cgroup_mutex.
3068 3069 3070
	 */
	kernfs_break_active_protection(new_parent);
	kernfs_break_active_protection(kn);
L
Li Zefan 已提交
3071

T
Tejun Heo 已提交
3072
	mutex_lock(&cgroup_mutex);
L
Li Zefan 已提交
3073

T
Tejun Heo 已提交
3074
	ret = kernfs_rename(kn, new_parent, new_name_str);
L
Li Zefan 已提交
3075

T
Tejun Heo 已提交
3076
	mutex_unlock(&cgroup_mutex);
3077 3078 3079

	kernfs_unbreak_active_protection(kn);
	kernfs_unbreak_active_protection(new_parent);
T
Tejun Heo 已提交
3080
	return ret;
L
Li Zefan 已提交
3081 3082
}

3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096
/* set uid and gid of cgroup dirs and files to that of the creator */
static int cgroup_kn_set_ugid(struct kernfs_node *kn)
{
	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
			       .ia_uid = current_fsuid(),
			       .ia_gid = current_fsgid(), };

	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
		return 0;

	return kernfs_setattr(kn, &iattr);
}

3097
static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
3098
{
T
Tejun Heo 已提交
3099
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
3100 3101
	struct kernfs_node *kn;
	struct lock_class_key *key = NULL;
3102
	int ret;
T
Tejun Heo 已提交
3103

T
Tejun Heo 已提交
3104 3105 3106 3107 3108
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	key = &cft->lockdep_key;
#endif
	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
T
Tejun Heo 已提交
3109
				  NULL, key);
3110 3111 3112 3113
	if (IS_ERR(kn))
		return PTR_ERR(kn);

	ret = cgroup_kn_set_ugid(kn);
3114
	if (ret) {
3115
		kernfs_remove(kn);
3116 3117 3118
		return ret;
	}

3119 3120 3121
	if (cft->write == cgroup_procs_write)
		cgrp->procs_kn = kn;
	else if (cft->seq_show == cgroup_populated_show)
3122
		cgrp->populated_kn = kn;
3123
	return 0;
3124 3125
}

3126 3127 3128 3129 3130 3131 3132
/**
 * cgroup_addrm_files - add or remove files to a cgroup directory
 * @cgrp: the target cgroup
 * @cfts: array of cftypes to be added
 * @is_add: whether to add or remove
 *
 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3133 3134 3135
 * For removals, this function never fails.  If addition fails, this
 * function doesn't remove files already added.  The caller is responsible
 * for cleaning up.
3136
 */
3137 3138
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add)
3139
{
A
Aristeu Rozanski 已提交
3140
	struct cftype *cft;
3141 3142
	int ret;

3143
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
3144 3145

	for (cft = cfts; cft->name[0] != '\0'; cft++) {
3146
		/* does cft->flags tell us to skip this file on @cgrp? */
3147
		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
T
Tejun Heo 已提交
3148
			continue;
3149
		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3150
			continue;
T
Tejun Heo 已提交
3151
		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3152
			continue;
T
Tejun Heo 已提交
3153
		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3154 3155
			continue;

3156
		if (is_add) {
3157
			ret = cgroup_add_file(cgrp, cft);
3158
			if (ret) {
3159 3160
				pr_warn("%s: failed to add %s, err=%d\n",
					__func__, cft->name, ret);
3161 3162
				return ret;
			}
3163 3164
		} else {
			cgroup_rm_file(cgrp, cft);
T
Tejun Heo 已提交
3165
		}
3166
	}
3167
	return 0;
3168 3169
}

3170
static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3171 3172
{
	LIST_HEAD(pending);
3173
	struct cgroup_subsys *ss = cfts[0].ss;
3174
	struct cgroup *root = &ss->root->cgrp;
3175
	struct cgroup_subsys_state *css;
3176
	int ret = 0;
3177

3178
	lockdep_assert_held(&cgroup_mutex);
3179 3180

	/* add/rm files for all cgroups created before */
3181
	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3182 3183
		struct cgroup *cgrp = css->cgroup;

3184 3185 3186
		if (cgroup_is_dead(cgrp))
			continue;

3187
		ret = cgroup_addrm_files(cgrp, cfts, is_add);
3188 3189
		if (ret)
			break;
3190
	}
3191 3192 3193

	if (is_add && !ret)
		kernfs_activate(root->kn);
3194
	return ret;
3195 3196
}

3197
static void cgroup_exit_cftypes(struct cftype *cfts)
3198
{
3199
	struct cftype *cft;
3200

T
Tejun Heo 已提交
3201 3202 3203 3204 3205
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		/* free copy for custom atomic_write_len, see init_cftypes() */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
			kfree(cft->kf_ops);
		cft->kf_ops = NULL;
3206
		cft->ss = NULL;
3207 3208

		/* revert flags set by cgroup core while adding @cfts */
3209
		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
T
Tejun Heo 已提交
3210
	}
3211 3212
}

T
Tejun Heo 已提交
3213
static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3214 3215 3216
{
	struct cftype *cft;

T
Tejun Heo 已提交
3217 3218 3219
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		struct kernfs_ops *kf_ops;

T
Tejun Heo 已提交
3220 3221
		WARN_ON(cft->ss || cft->kf_ops);

T
Tejun Heo 已提交
3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238
		if (cft->seq_start)
			kf_ops = &cgroup_kf_ops;
		else
			kf_ops = &cgroup_kf_single_ops;

		/*
		 * Ugh... if @cft wants a custom max_write_len, we need to
		 * make a copy of kf_ops to set its atomic_write_len.
		 */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
			if (!kf_ops) {
				cgroup_exit_cftypes(cfts);
				return -ENOMEM;
			}
			kf_ops->atomic_write_len = cft->max_write_len;
		}
3239

T
Tejun Heo 已提交
3240
		cft->kf_ops = kf_ops;
3241
		cft->ss = ss;
T
Tejun Heo 已提交
3242
	}
3243

T
Tejun Heo 已提交
3244
	return 0;
3245 3246
}

3247 3248
static int cgroup_rm_cftypes_locked(struct cftype *cfts)
{
3249
	lockdep_assert_held(&cgroup_mutex);
3250 3251 3252 3253 3254 3255 3256 3257

	if (!cfts || !cfts[0].ss)
		return -ENOENT;

	list_del(&cfts->node);
	cgroup_apply_cftypes(cfts, false);
	cgroup_exit_cftypes(cfts);
	return 0;
3258 3259
}

3260 3261 3262 3263
/**
 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
3264 3265 3266
 * Unregister @cfts.  Files described by @cfts are removed from all
 * existing cgroups and all future cgroups won't have them either.  This
 * function can be called anytime whether @cfts' subsys is attached or not.
3267 3268
 *
 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3269
 * registered.
3270
 */
3271
int cgroup_rm_cftypes(struct cftype *cfts)
3272
{
3273
	int ret;
3274

3275
	mutex_lock(&cgroup_mutex);
3276
	ret = cgroup_rm_cftypes_locked(cfts);
3277
	mutex_unlock(&cgroup_mutex);
3278
	return ret;
T
Tejun Heo 已提交
3279 3280
}

3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294
/**
 * cgroup_add_cftypes - add an array of cftypes to a subsystem
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Register @cfts to @ss.  Files described by @cfts are created for all
 * existing cgroups to which @ss is attached and all future cgroups will
 * have them too.  This function can be called anytime whether @ss is
 * attached or not.
 *
 * Returns 0 on successful registration, -errno on failure.  Note that this
 * function currently returns 0 as long as @cfts registration is successful
 * even if some file creation attempts on existing cgroups fail.
 */
3295
static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3296
{
3297
	int ret;
3298

3299 3300 3301
	if (ss->disabled)
		return 0;

3302 3303
	if (!cfts || cfts[0].name[0] == '\0')
		return 0;
3304

T
Tejun Heo 已提交
3305 3306 3307
	ret = cgroup_init_cftypes(ss, cfts);
	if (ret)
		return ret;
3308

3309
	mutex_lock(&cgroup_mutex);
3310

T
Tejun Heo 已提交
3311
	list_add_tail(&cfts->node, &ss->cfts);
3312
	ret = cgroup_apply_cftypes(cfts, true);
3313
	if (ret)
3314
		cgroup_rm_cftypes_locked(cfts);
3315

3316
	mutex_unlock(&cgroup_mutex);
3317
	return ret;
3318 3319
}

3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332
/**
 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the default hierarchy.
 */
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
	struct cftype *cft;

	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3333
		cft->flags |= __CFTYPE_ONLY_ON_DFL;
3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
	return cgroup_add_cftypes(ss, cfts);
}

/**
 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the legacy hierarchies.
 */
3345 3346
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
3347 3348
	struct cftype *cft;

3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359
	/*
	 * If legacy_flies_on_dfl, we want to show the legacy files on the
	 * dfl hierarchy but iff the target subsystem hasn't been updated
	 * for the dfl hierarchy yet.
	 */
	if (!cgroup_legacy_files_on_dfl ||
	    ss->dfl_cftypes != ss->legacy_cftypes) {
		for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
			cft->flags |= __CFTYPE_NOT_ON_DFL;
	}

3360 3361 3362
	return cgroup_add_cftypes(ss, cfts);
}

L
Li Zefan 已提交
3363 3364 3365 3366 3367 3368
/**
 * cgroup_task_count - count the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 *
 * Return the number of tasks in the cgroup.
 */
3369
static int cgroup_task_count(const struct cgroup *cgrp)
3370 3371
{
	int count = 0;
3372
	struct cgrp_cset_link *link;
3373

3374
	down_read(&css_set_rwsem);
3375 3376
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
3377
	up_read(&css_set_rwsem);
3378 3379 3380
	return count;
}

3381
/**
3382
 * css_next_child - find the next child of a given css
3383 3384
 * @pos: the current position (%NULL to initiate traversal)
 * @parent: css whose children to walk
3385
 *
3386
 * This function returns the next child of @parent and should be called
3387
 * under either cgroup_mutex or RCU read lock.  The only requirement is
3388 3389 3390 3391 3392 3393 3394 3395 3396
 * that @parent and @pos are accessible.  The next sibling is guaranteed to
 * be returned regardless of their states.
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3397
 */
3398 3399
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
					   struct cgroup_subsys_state *parent)
3400
{
3401
	struct cgroup_subsys_state *next;
3402

T
Tejun Heo 已提交
3403
	cgroup_assert_mutex_or_rcu_locked();
3404 3405

	/*
3406 3407 3408 3409 3410 3411 3412 3413 3414 3415
	 * @pos could already have been unlinked from the sibling list.
	 * Once a cgroup is removed, its ->sibling.next is no longer
	 * updated when its next sibling changes.  CSS_RELEASED is set when
	 * @pos is taken off list, at which time its next pointer is valid,
	 * and, as releases are serialized, the one pointed to by the next
	 * pointer is guaranteed to not have started release yet.  This
	 * implies that if we observe !CSS_RELEASED on @pos in this RCU
	 * critical section, the one pointed to by its next pointer is
	 * guaranteed to not have finished its RCU grace period even if we
	 * have dropped rcu_read_lock() inbetween iterations.
3416
	 *
3417 3418 3419 3420 3421 3422 3423
	 * If @pos has CSS_RELEASED set, its next pointer can't be
	 * dereferenced; however, as each css is given a monotonically
	 * increasing unique serial number and always appended to the
	 * sibling list, the next one can be found by walking the parent's
	 * children until the first css with higher serial number than
	 * @pos's.  While this path can be slower, it happens iff iteration
	 * races against release and the race window is very small.
3424
	 */
3425
	if (!pos) {
3426 3427 3428
		next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
	} else if (likely(!(pos->flags & CSS_RELEASED))) {
		next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3429
	} else {
3430
		list_for_each_entry_rcu(next, &parent->children, sibling)
3431 3432
			if (next->serial_nr > pos->serial_nr)
				break;
3433 3434
	}

3435 3436
	/*
	 * @next, if not pointing to the head, can be dereferenced and is
3437
	 * the next sibling.
3438
	 */
3439 3440
	if (&next->sibling != &parent->children)
		return next;
3441
	return NULL;
3442 3443
}

3444
/**
3445
 * css_next_descendant_pre - find the next descendant for pre-order walk
3446
 * @pos: the current position (%NULL to initiate traversal)
3447
 * @root: css whose descendants to walk
3448
 *
3449
 * To be used by css_for_each_descendant_pre().  Find the next descendant
3450 3451
 * to visit for pre-order traversal of @root's descendants.  @root is
 * included in the iteration and the first node to be visited.
3452
 *
3453 3454 3455 3456
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @root are accessible and @pos is a descendant of @root.
3457 3458 3459 3460 3461 3462 3463
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3464
 */
3465 3466 3467
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
			struct cgroup_subsys_state *root)
3468
{
3469
	struct cgroup_subsys_state *next;
3470

T
Tejun Heo 已提交
3471
	cgroup_assert_mutex_or_rcu_locked();
3472

3473
	/* if first iteration, visit @root */
3474
	if (!pos)
3475
		return root;
3476 3477

	/* visit the first child if exists */
3478
	next = css_next_child(NULL, pos);
3479 3480 3481 3482
	if (next)
		return next;

	/* no child, visit my or the closest ancestor's next sibling */
3483
	while (pos != root) {
T
Tejun Heo 已提交
3484
		next = css_next_child(pos, pos->parent);
3485
		if (next)
3486
			return next;
T
Tejun Heo 已提交
3487
		pos = pos->parent;
3488
	}
3489 3490 3491 3492

	return NULL;
}

3493
/**
3494 3495
 * css_rightmost_descendant - return the rightmost descendant of a css
 * @pos: css of interest
3496
 *
3497 3498
 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
 * is returned.  This can be used during pre-order traversal to skip
3499
 * subtree of @pos.
3500
 *
3501 3502 3503 3504
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct rightmost descendant as
 * long as @pos is accessible.
3505
 */
3506 3507
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
3508
{
3509
	struct cgroup_subsys_state *last, *tmp;
3510

T
Tejun Heo 已提交
3511
	cgroup_assert_mutex_or_rcu_locked();
3512 3513 3514 3515 3516

	do {
		last = pos;
		/* ->prev isn't RCU safe, walk ->next till the end */
		pos = NULL;
3517
		css_for_each_child(tmp, last)
3518 3519 3520 3521 3522 3523
			pos = tmp;
	} while (pos);

	return last;
}

3524 3525
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
3526
{
3527
	struct cgroup_subsys_state *last;
3528 3529 3530

	do {
		last = pos;
3531
		pos = css_next_child(NULL, pos);
3532 3533 3534 3535 3536 3537
	} while (pos);

	return last;
}

/**
3538
 * css_next_descendant_post - find the next descendant for post-order walk
3539
 * @pos: the current position (%NULL to initiate traversal)
3540
 * @root: css whose descendants to walk
3541
 *
3542
 * To be used by css_for_each_descendant_post().  Find the next descendant
3543 3544
 * to visit for post-order traversal of @root's descendants.  @root is
 * included in the iteration and the last node to be visited.
3545
 *
3546 3547 3548 3549 3550
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @cgroup are accessible and @pos is a descendant of
 * @cgroup.
3551 3552 3553 3554 3555 3556 3557
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3558
 */
3559 3560 3561
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
			 struct cgroup_subsys_state *root)
3562
{
3563
	struct cgroup_subsys_state *next;
3564

T
Tejun Heo 已提交
3565
	cgroup_assert_mutex_or_rcu_locked();
3566

3567 3568 3569
	/* if first iteration, visit leftmost descendant which may be @root */
	if (!pos)
		return css_leftmost_descendant(root);
3570

3571 3572 3573 3574
	/* if we visited @root, we're done */
	if (pos == root)
		return NULL;

3575
	/* if there's an unvisited sibling, visit its leftmost descendant */
T
Tejun Heo 已提交
3576
	next = css_next_child(pos, pos->parent);
3577
	if (next)
3578
		return css_leftmost_descendant(next);
3579 3580

	/* no sibling left, visit parent */
T
Tejun Heo 已提交
3581
	return pos->parent;
3582 3583
}

3584 3585 3586 3587 3588 3589 3590 3591 3592
/**
 * css_has_online_children - does a css have online children
 * @css: the target css
 *
 * Returns %true if @css has any online children; otherwise, %false.  This
 * function can be called from any context but the caller is responsible
 * for synchronizing against on/offlining as necessary.
 */
bool css_has_online_children(struct cgroup_subsys_state *css)
3593
{
3594 3595
	struct cgroup_subsys_state *child;
	bool ret = false;
3596 3597

	rcu_read_lock();
3598
	css_for_each_child(child, css) {
3599
		if (child->flags & CSS_ONLINE) {
3600 3601
			ret = true;
			break;
3602 3603 3604
		}
	}
	rcu_read_unlock();
3605
	return ret;
3606 3607
}

3608
/**
3609
 * css_advance_task_iter - advance a task itererator to the next css_set
3610 3611 3612
 * @it: the iterator to advance
 *
 * Advance @it to the next css_set to walk.
3613
 */
3614
static void css_advance_task_iter(struct css_task_iter *it)
3615
{
T
Tejun Heo 已提交
3616
	struct list_head *l = it->cset_pos;
3617 3618 3619 3620 3621 3622
	struct cgrp_cset_link *link;
	struct css_set *cset;

	/* Advance to the next non-empty css_set */
	do {
		l = l->next;
T
Tejun Heo 已提交
3623 3624
		if (l == it->cset_head) {
			it->cset_pos = NULL;
3625 3626
			return;
		}
3627 3628 3629 3630 3631 3632 3633 3634

		if (it->ss) {
			cset = container_of(l, struct css_set,
					    e_cset_node[it->ss->id]);
		} else {
			link = list_entry(l, struct cgrp_cset_link, cset_link);
			cset = link->cset;
		}
T
Tejun Heo 已提交
3635 3636
	} while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));

T
Tejun Heo 已提交
3637
	it->cset_pos = l;
T
Tejun Heo 已提交
3638 3639

	if (!list_empty(&cset->tasks))
T
Tejun Heo 已提交
3640
		it->task_pos = cset->tasks.next;
T
Tejun Heo 已提交
3641
	else
T
Tejun Heo 已提交
3642 3643 3644 3645
		it->task_pos = cset->mg_tasks.next;

	it->tasks_head = &cset->tasks;
	it->mg_tasks_head = &cset->mg_tasks;
3646 3647
}

3648
/**
3649 3650
 * css_task_iter_start - initiate task iteration
 * @css: the css to walk tasks of
3651 3652
 * @it: the task iterator to use
 *
3653 3654 3655 3656
 * Initiate iteration through the tasks of @css.  The caller can call
 * css_task_iter_next() to walk through the tasks until the function
 * returns NULL.  On completion of iteration, css_task_iter_end() must be
 * called.
3657 3658 3659 3660 3661
 *
 * Note that this function acquires a lock which is released when the
 * iteration finishes.  The caller can't sleep while iteration is in
 * progress.
 */
3662 3663
void css_task_iter_start(struct cgroup_subsys_state *css,
			 struct css_task_iter *it)
3664
	__acquires(css_set_rwsem)
3665
{
3666 3667
	/* no one should try to iterate before mounting cgroups */
	WARN_ON_ONCE(!use_task_css_set_links);
3668

3669
	down_read(&css_set_rwsem);
3670

3671 3672 3673 3674 3675 3676 3677
	it->ss = css->ss;

	if (it->ss)
		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
	else
		it->cset_pos = &css->cgroup->cset_links;

T
Tejun Heo 已提交
3678
	it->cset_head = it->cset_pos;
3679

3680
	css_advance_task_iter(it);
3681 3682
}

3683
/**
3684
 * css_task_iter_next - return the next task for the iterator
3685 3686 3687
 * @it: the task iterator being iterated
 *
 * The "next" function for task iteration.  @it should have been
3688 3689
 * initialized via css_task_iter_start().  Returns NULL when the iteration
 * reaches the end.
3690
 */
3691
struct task_struct *css_task_iter_next(struct css_task_iter *it)
3692 3693
{
	struct task_struct *res;
T
Tejun Heo 已提交
3694
	struct list_head *l = it->task_pos;
3695 3696

	/* If the iterator cg is NULL, we have no tasks */
T
Tejun Heo 已提交
3697
	if (!it->cset_pos)
3698 3699
		return NULL;
	res = list_entry(l, struct task_struct, cg_list);
T
Tejun Heo 已提交
3700 3701 3702 3703 3704 3705

	/*
	 * Advance iterator to find next entry.  cset->tasks is consumed
	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
	 * next cset.
	 */
3706
	l = l->next;
T
Tejun Heo 已提交
3707

T
Tejun Heo 已提交
3708 3709
	if (l == it->tasks_head)
		l = it->mg_tasks_head->next;
T
Tejun Heo 已提交
3710

T
Tejun Heo 已提交
3711
	if (l == it->mg_tasks_head)
3712
		css_advance_task_iter(it);
T
Tejun Heo 已提交
3713
	else
T
Tejun Heo 已提交
3714
		it->task_pos = l;
T
Tejun Heo 已提交
3715

3716 3717 3718
	return res;
}

3719
/**
3720
 * css_task_iter_end - finish task iteration
3721 3722
 * @it: the task iterator to finish
 *
3723
 * Finish task iteration started by css_task_iter_start().
3724
 */
3725
void css_task_iter_end(struct css_task_iter *it)
3726
	__releases(css_set_rwsem)
3727
{
3728
	up_read(&css_set_rwsem);
3729 3730 3731
}

/**
3732 3733 3734
 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
3735
 *
3736 3737 3738 3739 3740
 * Locking rules between cgroup_post_fork() and the migration path
 * guarantee that, if a task is forking while being migrated, the new child
 * is guaranteed to be either visible in the source cgroup after the
 * parent's migration is complete or put into the target cgroup.  No task
 * can slip out of migration through forking.
3741
 */
3742
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
3743
{
3744 3745
	LIST_HEAD(preloaded_csets);
	struct cgrp_cset_link *link;
3746
	struct css_task_iter it;
3747
	struct task_struct *task;
3748
	int ret;
3749

3750
	mutex_lock(&cgroup_mutex);
3751

3752 3753 3754 3755 3756
	/* all tasks in @from are being moved, all csets are source */
	down_read(&css_set_rwsem);
	list_for_each_entry(link, &from->cset_links, cset_link)
		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
	up_read(&css_set_rwsem);
3757

3758 3759 3760
	ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
	if (ret)
		goto out_err;
3761

3762 3763 3764 3765
	/*
	 * Migrate tasks one-by-one until @form is empty.  This fails iff
	 * ->can_attach() fails.
	 */
3766
	do {
3767
		css_task_iter_start(&from->self, &it);
3768 3769 3770 3771 3772 3773
		task = css_task_iter_next(&it);
		if (task)
			get_task_struct(task);
		css_task_iter_end(&it);

		if (task) {
3774
			ret = cgroup_migrate(to, task, false);
3775 3776 3777
			put_task_struct(task);
		}
	} while (task && !ret);
3778 3779
out_err:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
3780
	mutex_unlock(&cgroup_mutex);
3781
	return ret;
3782 3783
}

3784
/*
3785
 * Stuff for reading the 'tasks'/'procs' files.
3786 3787 3788 3789 3790 3791 3792 3793
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819
/* which pidlist file are we talking about? */
enum cgroup_filetype {
	CGROUP_FILE_PROCS,
	CGROUP_FILE_TASKS,
};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {
	/*
	 * used to find which pidlist is wanted. doesn't change as long as
	 * this particular list stays in the list.
	*/
	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
	/* array of xids */
	pid_t *list;
	/* how many elements the above list has */
	int length;
	/* each of these stored in a list by its cgroup */
	struct list_head links;
	/* pointer to the cgroup we belong to, for list removal purposes */
	struct cgroup *owner;
3820 3821
	/* for delayed destruction */
	struct delayed_work destroy_dwork;
3822 3823
};

3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836
/*
 * The following two functions "fix" the issue where there are more pids
 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 * TODO: replace with a kernel-wide solution to this problem
 */
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(count * sizeof(pid_t));
	else
		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
3837

3838 3839
static void pidlist_free(void *p)
{
3840
	kvfree(p);
3841 3842
}

3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869
/*
 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 * should be left afterwards.
 */
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
{
	struct cgroup_pidlist *l, *tmp_l;

	mutex_lock(&cgrp->pidlist_mutex);
	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
	mutex_unlock(&cgrp->pidlist_mutex);

	flush_workqueue(cgroup_pidlist_destroy_wq);
	BUG_ON(!list_empty(&cgrp->pidlists));
}

static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
						destroy_dwork);
	struct cgroup_pidlist *tofree = NULL;

	mutex_lock(&l->owner->pidlist_mutex);

	/*
3870 3871
	 * Destroy iff we didn't get queued again.  The state won't change
	 * as destroy_dwork can only be queued while locked.
3872
	 */
3873
	if (!delayed_work_pending(dwork)) {
3874 3875 3876 3877 3878 3879 3880 3881 3882 3883
		list_del(&l->links);
		pidlist_free(l->list);
		put_pid_ns(l->key.ns);
		tofree = l;
	}

	mutex_unlock(&l->owner->pidlist_mutex);
	kfree(tofree);
}

3884
/*
3885
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3886
 * Returns the number of unique elements.
3887
 */
3888
static int pidlist_uniq(pid_t *list, int length)
3889
{
3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913
	int src, dest = 1;

	/*
	 * we presume the 0th element is unique, so i starts at 1. trivial
	 * edge cases first; no work needs to be done for either
	 */
	if (length == 0 || length == 1)
		return length;
	/* src and dest walk down the list; dest counts unique elements */
	for (src = 1; src < length; src++) {
		/* find next unique element */
		while (list[src] == list[src-1]) {
			src++;
			if (src == length)
				goto after;
		}
		/* dest always points to where the next unique element goes */
		list[dest] = list[src];
		dest++;
	}
after:
	return dest;
}

3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924
/*
 * The two pid files - task and cgroup.procs - guaranteed that the result
 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 * different per namespace, each namespace needs differently sorted list,
 * making it impossible to use, for example, single rbtree of member tasks
 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 * per open file is dangerous, so cgroup had to implement shared pool of
 * pidlists keyed by cgroup and namespace.
 *
 * All this extra complexity was caused by the original implementation
 * committing to an entirely unnecessary property.  In the long term, we
3925 3926 3927
 * want to do away with it.  Explicitly scramble sort order if on the
 * default hierarchy so that no such expectation exists in the new
 * interface.
3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941
 *
 * Scrambling is done by swapping every two consecutive bits, which is
 * non-identity one-to-one mapping which disturbs sort order sufficiently.
 */
static pid_t pid_fry(pid_t pid)
{
	unsigned a = pid & 0x55555555;
	unsigned b = pid & 0xAAAAAAAA;

	return (a << 1) | (b >> 1);
}

static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
{
3942
	if (cgroup_on_dfl(cgrp))
3943 3944 3945 3946 3947
		return pid_fry(pid);
	else
		return pid;
}

3948 3949 3950 3951 3952
static int cmppid(const void *a, const void *b)
{
	return *(pid_t *)a - *(pid_t *)b;
}

3953 3954 3955 3956 3957
static int fried_cmppid(const void *a, const void *b)
{
	return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
}

T
Tejun Heo 已提交
3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{
	struct cgroup_pidlist *l;
	/* don't need task_nsproxy() if we're looking at ourself */
	struct pid_namespace *ns = task_active_pid_ns(current);

	lockdep_assert_held(&cgrp->pidlist_mutex);

	list_for_each_entry(l, &cgrp->pidlists, links)
		if (l->key.type == type && l->key.ns == ns)
			return l;
	return NULL;
}

3973 3974 3975 3976 3977 3978
/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
T
Tejun Heo 已提交
3979 3980
static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
						enum cgroup_filetype type)
3981 3982
{
	struct cgroup_pidlist *l;
3983

T
Tejun Heo 已提交
3984 3985 3986 3987 3988 3989
	lockdep_assert_held(&cgrp->pidlist_mutex);

	l = cgroup_pidlist_find(cgrp, type);
	if (l)
		return l;

3990
	/* entry not found; create a new one */
3991
	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
T
Tejun Heo 已提交
3992
	if (!l)
3993
		return l;
T
Tejun Heo 已提交
3994

3995
	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
3996
	l->key.type = type;
T
Tejun Heo 已提交
3997 3998
	/* don't need task_nsproxy() if we're looking at ourself */
	l->key.ns = get_pid_ns(task_active_pid_ns(current));
3999 4000 4001 4002 4003
	l->owner = cgrp;
	list_add(&l->links, &cgrp->pidlists);
	return l;
}

4004 4005 4006
/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
4007 4008
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
4009 4010 4011 4012
{
	pid_t *array;
	int length;
	int pid, n = 0; /* used for populating the array */
4013
	struct css_task_iter it;
4014
	struct task_struct *tsk;
4015 4016
	struct cgroup_pidlist *l;

4017 4018
	lockdep_assert_held(&cgrp->pidlist_mutex);

4019 4020 4021 4022 4023 4024 4025
	/*
	 * If cgroup gets more users after we read count, we won't have
	 * enough space - tough.  This race is indistinguishable to the
	 * caller from the case that the additional cgroup users didn't
	 * show up until sometime later on.
	 */
	length = cgroup_task_count(cgrp);
4026
	array = pidlist_allocate(length);
4027 4028 4029
	if (!array)
		return -ENOMEM;
	/* now, populate the array */
4030
	css_task_iter_start(&cgrp->self, &it);
4031
	while ((tsk = css_task_iter_next(&it))) {
4032
		if (unlikely(n == length))
4033
			break;
4034
		/* get tgid or pid for procs or tasks file respectively */
4035 4036 4037 4038
		if (type == CGROUP_FILE_PROCS)
			pid = task_tgid_vnr(tsk);
		else
			pid = task_pid_vnr(tsk);
4039 4040
		if (pid > 0) /* make sure to only use valid results */
			array[n++] = pid;
4041
	}
4042
	css_task_iter_end(&it);
4043 4044
	length = n;
	/* now sort & (if procs) strip out duplicates */
4045
	if (cgroup_on_dfl(cgrp))
4046 4047 4048
		sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
	else
		sort(array, length, sizeof(pid_t), cmppid, NULL);
4049
	if (type == CGROUP_FILE_PROCS)
4050
		length = pidlist_uniq(array, length);
T
Tejun Heo 已提交
4051 4052

	l = cgroup_pidlist_find_create(cgrp, type);
4053
	if (!l) {
4054
		pidlist_free(array);
4055
		return -ENOMEM;
4056
	}
T
Tejun Heo 已提交
4057 4058

	/* store array, freeing old if necessary */
4059
	pidlist_free(l->list);
4060 4061
	l->list = array;
	l->length = length;
4062
	*lp = l;
4063
	return 0;
4064 4065
}

B
Balbir Singh 已提交
4066
/**
L
Li Zefan 已提交
4067
 * cgroupstats_build - build and fill cgroupstats
B
Balbir Singh 已提交
4068 4069 4070
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
L
Li Zefan 已提交
4071 4072 4073
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
B
Balbir Singh 已提交
4074 4075 4076
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
T
Tejun Heo 已提交
4077
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4078
	struct cgroup *cgrp;
4079
	struct css_task_iter it;
B
Balbir Singh 已提交
4080
	struct task_struct *tsk;
4081

T
Tejun Heo 已提交
4082 4083 4084 4085 4086
	/* it should be kernfs_node belonging to cgroupfs and is a directory */
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
		return -EINVAL;

4087 4088
	mutex_lock(&cgroup_mutex);

B
Balbir Singh 已提交
4089
	/*
T
Tejun Heo 已提交
4090
	 * We aren't being called from kernfs and there's no guarantee on
4091
	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
T
Tejun Heo 已提交
4092
	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
B
Balbir Singh 已提交
4093
	 */
T
Tejun Heo 已提交
4094 4095
	rcu_read_lock();
	cgrp = rcu_dereference(kn->priv);
4096
	if (!cgrp || cgroup_is_dead(cgrp)) {
T
Tejun Heo 已提交
4097
		rcu_read_unlock();
4098
		mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4099 4100
		return -ENOENT;
	}
4101
	rcu_read_unlock();
B
Balbir Singh 已提交
4102

4103
	css_task_iter_start(&cgrp->self, &it);
4104
	while ((tsk = css_task_iter_next(&it))) {
B
Balbir Singh 已提交
4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123
		switch (tsk->state) {
		case TASK_RUNNING:
			stats->nr_running++;
			break;
		case TASK_INTERRUPTIBLE:
			stats->nr_sleeping++;
			break;
		case TASK_UNINTERRUPTIBLE:
			stats->nr_uninterruptible++;
			break;
		case TASK_STOPPED:
			stats->nr_stopped++;
			break;
		default:
			if (delayacct_is_task_waiting_on_io(tsk))
				stats->nr_io_wait++;
			break;
		}
	}
4124
	css_task_iter_end(&it);
B
Balbir Singh 已提交
4125

4126
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4127
	return 0;
B
Balbir Singh 已提交
4128 4129
}

4130

4131
/*
4132
 * seq_file methods for the tasks/procs files. The seq_file position is the
4133
 * next pid to display; the seq_file iterator is a pointer to the pid
4134
 * in the cgroup->l->list array.
4135
 */
4136

4137
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
4138
{
4139 4140 4141 4142 4143 4144
	/*
	 * Initially we receive a position value that corresponds to
	 * one more than the last pid shown (or 0 on the first call or
	 * after a seek to the start). Use a binary-search to find the
	 * next pid to display, if any
	 */
T
Tejun Heo 已提交
4145
	struct kernfs_open_file *of = s->private;
4146
	struct cgroup *cgrp = seq_css(s)->cgroup;
4147
	struct cgroup_pidlist *l;
4148
	enum cgroup_filetype type = seq_cft(s)->private;
4149
	int index = 0, pid = *pos;
4150 4151 4152 4153 4154
	int *iter, ret;

	mutex_lock(&cgrp->pidlist_mutex);

	/*
4155
	 * !NULL @of->priv indicates that this isn't the first start()
4156
	 * after open.  If the matching pidlist is around, we can use that.
4157
	 * Look for it.  Note that @of->priv can't be used directly.  It
4158 4159
	 * could already have been destroyed.
	 */
4160 4161
	if (of->priv)
		of->priv = cgroup_pidlist_find(cgrp, type);
4162 4163 4164 4165 4166

	/*
	 * Either this is the first start() after open or the matching
	 * pidlist has been destroyed inbetween.  Create a new one.
	 */
4167 4168 4169
	if (!of->priv) {
		ret = pidlist_array_load(cgrp, type,
					 (struct cgroup_pidlist **)&of->priv);
4170 4171 4172
		if (ret)
			return ERR_PTR(ret);
	}
4173
	l = of->priv;
4174 4175

	if (pid) {
4176
		int end = l->length;
S
Stephen Rothwell 已提交
4177

4178 4179
		while (index < end) {
			int mid = (index + end) / 2;
4180
			if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
4181 4182
				index = mid;
				break;
4183
			} else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
4184 4185 4186 4187 4188 4189
				index = mid + 1;
			else
				end = mid;
		}
	}
	/* If we're off the end of the array, we're done */
4190
	if (index >= l->length)
4191 4192
		return NULL;
	/* Update the abstract position to be the actual pid that we found */
4193
	iter = l->list + index;
4194
	*pos = cgroup_pid_fry(cgrp, *iter);
4195 4196 4197
	return iter;
}

4198
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
4199
{
T
Tejun Heo 已提交
4200
	struct kernfs_open_file *of = s->private;
4201
	struct cgroup_pidlist *l = of->priv;
4202

4203 4204
	if (l)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
4205
				 CGROUP_PIDLIST_DESTROY_DELAY);
4206
	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
4207 4208
}

4209
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4210
{
T
Tejun Heo 已提交
4211
	struct kernfs_open_file *of = s->private;
4212
	struct cgroup_pidlist *l = of->priv;
4213 4214
	pid_t *p = v;
	pid_t *end = l->list + l->length;
4215 4216 4217 4218 4219 4220 4221 4222
	/*
	 * Advance to the next pid in the array. If this goes off the
	 * end, we're done
	 */
	p++;
	if (p >= end) {
		return NULL;
	} else {
4223
		*pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
4224 4225 4226 4227
		return p;
	}
}

4228
static int cgroup_pidlist_show(struct seq_file *s, void *v)
4229
{
4230 4231 4232
	seq_printf(s, "%d\n", *(int *)v);

	return 0;
4233
}
4234

4235 4236
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
4237
{
4238
	return notify_on_release(css->cgroup);
4239 4240
}

4241 4242
static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
4243 4244
{
	if (val)
4245
		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4246
	else
4247
		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4248 4249 4250
	return 0;
}

4251 4252
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
4253
{
4254
	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4255 4256
}

4257 4258
static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
4259 4260
{
	if (val)
4261
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4262
	else
4263
		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4264 4265 4266
	return 0;
}

4267 4268
/* cgroup core interface files for the default hierarchy */
static struct cftype cgroup_dfl_base_files[] = {
4269
	{
4270
		.name = "cgroup.procs",
4271 4272 4273 4274
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4275
		.private = CGROUP_FILE_PROCS,
4276
		.write = cgroup_procs_write,
B
Ben Blum 已提交
4277
		.mode = S_IRUGO | S_IWUSR,
4278
	},
4279 4280
	{
		.name = "cgroup.controllers",
4281
		.flags = CFTYPE_ONLY_ON_ROOT,
4282 4283 4284 4285
		.seq_show = cgroup_root_controllers_show,
	},
	{
		.name = "cgroup.controllers",
4286
		.flags = CFTYPE_NOT_ON_ROOT,
4287 4288 4289 4290 4291
		.seq_show = cgroup_controllers_show,
	},
	{
		.name = "cgroup.subtree_control",
		.seq_show = cgroup_subtree_control_show,
4292
		.write = cgroup_subtree_control_write,
4293
	},
4294 4295
	{
		.name = "cgroup.populated",
4296
		.flags = CFTYPE_NOT_ON_ROOT,
4297 4298
		.seq_show = cgroup_populated_show,
	},
4299 4300
	{ }	/* terminate */
};
4301

4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323
/* cgroup core interface files for the legacy hierarchies */
static struct cftype cgroup_legacy_base_files[] = {
	{
		.name = "cgroup.procs",
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
		.private = CGROUP_FILE_PROCS,
		.write = cgroup_procs_write,
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "cgroup.clone_children",
		.read_u64 = cgroup_clone_children_read,
		.write_u64 = cgroup_clone_children_write,
	},
	{
		.name = "cgroup.sane_behavior",
		.flags = CFTYPE_ONLY_ON_ROOT,
		.seq_show = cgroup_sane_behavior_show,
	},
4324 4325
	{
		.name = "tasks",
4326 4327 4328 4329
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4330
		.private = CGROUP_FILE_TASKS,
4331
		.write = cgroup_tasks_write,
4332 4333 4334 4335 4336 4337 4338
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "notify_on_release",
		.read_u64 = cgroup_read_notify_on_release,
		.write_u64 = cgroup_write_notify_on_release,
	},
4339 4340
	{
		.name = "release_agent",
4341
		.flags = CFTYPE_ONLY_ON_ROOT,
4342
		.seq_show = cgroup_release_agent_show,
4343
		.write = cgroup_release_agent_write,
4344
		.max_write_len = PATH_MAX - 1,
4345
	},
T
Tejun Heo 已提交
4346
	{ }	/* terminate */
4347 4348
};

4349
/**
4350
 * cgroup_populate_dir - create subsys files in a cgroup directory
4351 4352
 * @cgrp: target cgroup
 * @subsys_mask: mask of the subsystem ids whose files should be added
4353 4354
 *
 * On failure, no file is added.
4355
 */
4356
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
4357 4358
{
	struct cgroup_subsys *ss;
4359
	int i, ret = 0;
4360

4361
	/* process cftsets of each subsystem */
4362
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
4363
		struct cftype *cfts;
4364

4365
		if (!(subsys_mask & (1 << i)))
4366
			continue;
4367

T
Tejun Heo 已提交
4368 4369
		list_for_each_entry(cfts, &ss->cfts, node) {
			ret = cgroup_addrm_files(cgrp, cfts, true);
4370 4371 4372
			if (ret < 0)
				goto err;
		}
4373 4374
	}
	return 0;
4375 4376 4377
err:
	cgroup_clear_dir(cgrp, subsys_mask);
	return ret;
4378 4379
}

4380 4381 4382 4383 4384 4385 4386
/*
 * css destruction is four-stage process.
 *
 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
 *    Implemented in kill_css().
 *
 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
4387 4388 4389
 *    and thus css_tryget_online() is guaranteed to fail, the css can be
 *    offlined by invoking offline_css().  After offlining, the base ref is
 *    put.  Implemented in css_killed_work_fn().
4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401
 *
 * 3. When the percpu_ref reaches zero, the only possible remaining
 *    accessors are inside RCU read sections.  css_release() schedules the
 *    RCU callback.
 *
 * 4. After the grace period, the css can be freed.  Implemented in
 *    css_free_work_fn().
 *
 * It is actually hairier because both step 2 and 4 require process context
 * and thus involve punting to css->destroy_work adding two additional
 * steps to the already complex sequence.
 */
4402
static void css_free_work_fn(struct work_struct *work)
4403 4404
{
	struct cgroup_subsys_state *css =
4405
		container_of(work, struct cgroup_subsys_state, destroy_work);
4406
	struct cgroup_subsys *ss = css->ss;
4407
	struct cgroup *cgrp = css->cgroup;
4408

4409 4410
	percpu_ref_exit(&css->refcnt);

4411
	if (ss) {
4412
		/* css free path */
4413 4414
		int id = css->id;

4415 4416
		if (css->parent)
			css_put(css->parent);
4417

4418 4419
		ss->css_free(css);
		cgroup_idr_remove(&ss->css_idr, id);
4420 4421 4422 4423 4424
		cgroup_put(cgrp);
	} else {
		/* cgroup free path */
		atomic_dec(&cgrp->root->nr_cgrps);
		cgroup_pidlist_destroy_all(cgrp);
4425
		cancel_work_sync(&cgrp->release_agent_work);
4426

T
Tejun Heo 已提交
4427
		if (cgroup_parent(cgrp)) {
4428 4429 4430 4431 4432 4433
			/*
			 * We get a ref to the parent, and put the ref when
			 * this cgroup is being freed, so it's guaranteed
			 * that the parent won't be destroyed before its
			 * children.
			 */
T
Tejun Heo 已提交
4434
			cgroup_put(cgroup_parent(cgrp));
4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445
			kernfs_put(cgrp->kn);
			kfree(cgrp);
		} else {
			/*
			 * This is root cgroup's refcnt reaching zero,
			 * which indicates that the root should be
			 * released.
			 */
			cgroup_destroy_root(cgrp->root);
		}
	}
4446 4447
}

4448
static void css_free_rcu_fn(struct rcu_head *rcu_head)
4449 4450
{
	struct cgroup_subsys_state *css =
4451
		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
4452

4453
	INIT_WORK(&css->destroy_work, css_free_work_fn);
4454
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4455 4456
}

4457
static void css_release_work_fn(struct work_struct *work)
4458 4459
{
	struct cgroup_subsys_state *css =
4460
		container_of(work, struct cgroup_subsys_state, destroy_work);
4461
	struct cgroup_subsys *ss = css->ss;
4462
	struct cgroup *cgrp = css->cgroup;
4463

4464 4465
	mutex_lock(&cgroup_mutex);

4466
	css->flags |= CSS_RELEASED;
4467 4468
	list_del_rcu(&css->sibling);

4469 4470
	if (ss) {
		/* css release path */
4471
		cgroup_idr_replace(&ss->css_idr, NULL, css->id);
4472 4473
		if (ss->css_released)
			ss->css_released(css);
4474 4475 4476 4477
	} else {
		/* cgroup release path */
		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
		cgrp->id = -1;
4478 4479 4480 4481 4482 4483 4484 4485 4486

		/*
		 * There are two control paths which try to determine
		 * cgroup from dentry without going through kernfs -
		 * cgroupstats_build() and css_tryget_online_from_dir().
		 * Those are supported by RCU protecting clearing of
		 * cgrp->kn->priv backpointer.
		 */
		RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
4487
	}
4488

4489 4490
	mutex_unlock(&cgroup_mutex);

4491
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4492 4493 4494 4495 4496 4497 4498
}

static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4499 4500
	INIT_WORK(&css->destroy_work, css_release_work_fn);
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4501 4502
}

4503 4504
static void init_and_link_css(struct cgroup_subsys_state *css,
			      struct cgroup_subsys *ss, struct cgroup *cgrp)
4505
{
4506 4507
	lockdep_assert_held(&cgroup_mutex);

4508 4509
	cgroup_get(cgrp);

4510
	memset(css, 0, sizeof(*css));
4511
	css->cgroup = cgrp;
4512
	css->ss = ss;
4513 4514
	INIT_LIST_HEAD(&css->sibling);
	INIT_LIST_HEAD(&css->children);
4515
	css->serial_nr = css_serial_nr_next++;
4516

T
Tejun Heo 已提交
4517 4518
	if (cgroup_parent(cgrp)) {
		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
4519 4520
		css_get(css->parent);
	}
4521

4522
	BUG_ON(cgroup_css(cgrp, ss));
4523 4524
}

4525
/* invoke ->css_online() on a new CSS and mark it online if successful */
4526
static int online_css(struct cgroup_subsys_state *css)
4527
{
4528
	struct cgroup_subsys *ss = css->ss;
T
Tejun Heo 已提交
4529 4530
	int ret = 0;

4531 4532
	lockdep_assert_held(&cgroup_mutex);

4533
	if (ss->css_online)
4534
		ret = ss->css_online(css);
4535
	if (!ret) {
4536
		css->flags |= CSS_ONLINE;
4537
		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4538
	}
T
Tejun Heo 已提交
4539
	return ret;
4540 4541
}

4542
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4543
static void offline_css(struct cgroup_subsys_state *css)
4544
{
4545
	struct cgroup_subsys *ss = css->ss;
4546 4547 4548 4549 4550 4551

	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

4552
	if (ss->css_offline)
4553
		ss->css_offline(css);
4554

4555
	css->flags &= ~CSS_ONLINE;
4556
	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4557 4558

	wake_up_all(&css->cgroup->offline_waitq);
4559 4560
}

4561 4562 4563 4564
/**
 * create_css - create a cgroup_subsys_state
 * @cgrp: the cgroup new css will be associated with
 * @ss: the subsys of new css
4565
 * @visible: whether to create control knobs for the new css or not
4566 4567
 *
 * Create a new css associated with @cgrp - @ss pair.  On success, the new
4568 4569
 * css is online and installed in @cgrp with all interface files created if
 * @visible.  Returns 0 on success, -errno on failure.
4570
 */
4571 4572
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible)
4573
{
T
Tejun Heo 已提交
4574
	struct cgroup *parent = cgroup_parent(cgrp);
4575
	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
4576 4577 4578 4579 4580
	struct cgroup_subsys_state *css;
	int err;

	lockdep_assert_held(&cgroup_mutex);

4581
	css = ss->css_alloc(parent_css);
4582 4583 4584
	if (IS_ERR(css))
		return PTR_ERR(css);

4585
	init_and_link_css(css, ss, cgrp);
4586

4587
	err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
4588
	if (err)
4589
		goto err_free_css;
4590

V
Vladimir Davydov 已提交
4591
	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
4592 4593 4594
	if (err < 0)
		goto err_free_percpu_ref;
	css->id = err;
4595

4596 4597 4598 4599 4600
	if (visible) {
		err = cgroup_populate_dir(cgrp, 1 << ss->id);
		if (err)
			goto err_free_id;
	}
4601 4602

	/* @css is ready to be brought online now, make it visible */
4603
	list_add_tail_rcu(&css->sibling, &parent_css->children);
4604
	cgroup_idr_replace(&ss->css_idr, css, css->id);
4605 4606 4607

	err = online_css(css);
	if (err)
4608
		goto err_list_del;
4609

4610
	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
T
Tejun Heo 已提交
4611
	    cgroup_parent(parent)) {
4612
		pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4613
			current->comm, current->pid, ss->name);
4614
		if (!strcmp(ss->name, "memory"))
4615
			pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4616 4617 4618 4619 4620
		ss->warned_broken_hierarchy = true;
	}

	return 0;

4621 4622
err_list_del:
	list_del_rcu(&css->sibling);
4623
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4624 4625
err_free_id:
	cgroup_idr_remove(&ss->css_idr, css->id);
4626
err_free_percpu_ref:
4627
	percpu_ref_exit(&css->refcnt);
4628
err_free_css:
4629
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4630 4631 4632
	return err;
}

4633 4634
static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
			umode_t mode)
4635
{
4636 4637
	struct cgroup *parent, *cgrp;
	struct cgroup_root *root;
4638
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
4639
	struct kernfs_node *kn;
4640
	struct cftype *base_files;
4641
	int ssid, ret;
4642

4643 4644 4645 4646 4647
	/* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
	 */
	if (strchr(name, '\n'))
		return -EINVAL;

4648 4649 4650 4651
	parent = cgroup_kn_lock_live(parent_kn);
	if (!parent)
		return -ENODEV;
	root = parent->root;
4652

T
Tejun Heo 已提交
4653
	/* allocate the cgroup and its ID, 0 is reserved for the root */
4654
	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
T
Tejun Heo 已提交
4655 4656 4657
	if (!cgrp) {
		ret = -ENOMEM;
		goto out_unlock;
4658 4659
	}

4660
	ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
4661 4662 4663
	if (ret)
		goto out_free_cgrp;

4664 4665 4666 4667
	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
V
Vladimir Davydov 已提交
4668
	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
4669
	if (cgrp->id < 0) {
T
Tejun Heo 已提交
4670
		ret = -ENOMEM;
4671
		goto out_cancel_ref;
4672 4673
	}

4674
	init_cgroup_housekeeping(cgrp);
4675

4676
	cgrp->self.parent = &parent->self;
T
Tejun Heo 已提交
4677
	cgrp->root = root;
4678

4679 4680 4681
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

4682 4683
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4684

T
Tejun Heo 已提交
4685
	/* create the directory */
T
Tejun Heo 已提交
4686
	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
T
Tejun Heo 已提交
4687
	if (IS_ERR(kn)) {
T
Tejun Heo 已提交
4688 4689
		ret = PTR_ERR(kn);
		goto out_free_id;
T
Tejun Heo 已提交
4690 4691
	}
	cgrp->kn = kn;
4692

4693
	/*
4694 4695
	 * This extra ref will be put in cgroup_free_fn() and guarantees
	 * that @cgrp->kn is always accessible.
4696
	 */
4697
	kernfs_get(kn);
4698

4699
	cgrp->self.serial_nr = css_serial_nr_next++;
4700

4701
	/* allocation complete, commit to creation */
4702
	list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
4703
	atomic_inc(&root->nr_cgrps);
4704
	cgroup_get(parent);
4705

4706 4707 4708 4709
	/*
	 * @cgrp is now fully operational.  If something fails after this
	 * point, it'll be released via the normal destruction path.
	 */
4710
	cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4711

T
Tejun Heo 已提交
4712 4713 4714
	ret = cgroup_kn_set_ugid(kn);
	if (ret)
		goto out_destroy;
4715

4716 4717 4718 4719 4720 4721
	if (cgroup_on_dfl(cgrp))
		base_files = cgroup_dfl_base_files;
	else
		base_files = cgroup_legacy_base_files;

	ret = cgroup_addrm_files(cgrp, base_files, true);
T
Tejun Heo 已提交
4722 4723
	if (ret)
		goto out_destroy;
4724

4725
	/* let's create and online css's */
T
Tejun Heo 已提交
4726
	for_each_subsys(ss, ssid) {
4727
		if (parent->child_subsys_mask & (1 << ssid)) {
4728 4729
			ret = create_css(cgrp, ss,
					 parent->subtree_control & (1 << ssid));
T
Tejun Heo 已提交
4730 4731
			if (ret)
				goto out_destroy;
T
Tejun Heo 已提交
4732
		}
4733
	}
4734

4735 4736
	/*
	 * On the default hierarchy, a child doesn't automatically inherit
4737
	 * subtree_control from the parent.  Each is configured manually.
4738
	 */
4739 4740 4741 4742
	if (!cgroup_on_dfl(cgrp)) {
		cgrp->subtree_control = parent->subtree_control;
		cgroup_refresh_child_subsys_mask(cgrp);
	}
T
Tejun Heo 已提交
4743 4744

	kernfs_activate(kn);
4745

T
Tejun Heo 已提交
4746 4747
	ret = 0;
	goto out_unlock;
4748

T
Tejun Heo 已提交
4749
out_free_id:
4750
	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
4751
out_cancel_ref:
4752
	percpu_ref_exit(&cgrp->self.refcnt);
T
Tejun Heo 已提交
4753
out_free_cgrp:
4754
	kfree(cgrp);
T
Tejun Heo 已提交
4755
out_unlock:
4756
	cgroup_kn_unlock(parent_kn);
T
Tejun Heo 已提交
4757
	return ret;
4758

T
Tejun Heo 已提交
4759
out_destroy:
4760
	cgroup_destroy_locked(cgrp);
T
Tejun Heo 已提交
4761
	goto out_unlock;
4762 4763
}

4764 4765
/*
 * This is called when the refcnt of a css is confirmed to be killed.
4766 4767
 * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
 * initate destruction and put the css ref from kill_css().
4768 4769
 */
static void css_killed_work_fn(struct work_struct *work)
4770
{
4771 4772
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
4773

4774
	mutex_lock(&cgroup_mutex);
4775
	offline_css(css);
4776
	mutex_unlock(&cgroup_mutex);
4777 4778

	css_put(css);
4779 4780
}

4781 4782
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
4783 4784 4785 4786
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4787
	INIT_WORK(&css->destroy_work, css_killed_work_fn);
4788
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4789 4790
}

4791 4792 4793 4794 4795 4796
/**
 * kill_css - destroy a css
 * @css: css to destroy
 *
 * This function initiates destruction of @css by removing cgroup interface
 * files and putting its base reference.  ->css_offline() will be invoked
4797 4798
 * asynchronously once css_tryget_online() is guaranteed to fail and when
 * the reference count reaches zero, @css will be released.
4799 4800
 */
static void kill_css(struct cgroup_subsys_state *css)
T
Tejun Heo 已提交
4801
{
4802
	lockdep_assert_held(&cgroup_mutex);
4803

T
Tejun Heo 已提交
4804 4805 4806 4807
	/*
	 * This must happen before css is disassociated with its cgroup.
	 * See seq_css() for details.
	 */
4808
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4809

T
Tejun Heo 已提交
4810 4811 4812 4813 4814 4815 4816 4817 4818
	/*
	 * Killing would put the base ref, but we need to keep it alive
	 * until after ->css_offline().
	 */
	css_get(css);

	/*
	 * cgroup core guarantees that, by the time ->css_offline() is
	 * invoked, no new css reference will be given out via
4819
	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
T
Tejun Heo 已提交
4820 4821 4822 4823 4824 4825 4826
	 * proceed to offlining css's because percpu_ref_kill() doesn't
	 * guarantee that the ref is seen as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.
	 */
	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
4827 4828 4829 4830 4831 4832 4833 4834
}

/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
4835 4836 4837
 * guarantee that css_tryget_online() won't succeed by the time
 * ->css_offline() is invoked.  To satisfy all the requirements,
 * destruction is implemented in the following two steps.
4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
4853 4854
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4855
{
T
Tejun Heo 已提交
4856
	struct cgroup_subsys_state *css;
4857
	bool empty;
T
Tejun Heo 已提交
4858
	int ssid;
4859

4860 4861
	lockdep_assert_held(&cgroup_mutex);

4862
	/*
4863
	 * css_set_rwsem synchronizes access to ->cset_links and prevents
4864
	 * @cgrp from being removed while put_css_set() is in progress.
4865
	 */
4866
	down_read(&css_set_rwsem);
4867
	empty = list_empty(&cgrp->cset_links);
4868
	up_read(&css_set_rwsem);
4869
	if (!empty)
4870
		return -EBUSY;
L
Li Zefan 已提交
4871

4872
	/*
4873 4874 4875
	 * Make sure there's no live children.  We can't test emptiness of
	 * ->self.children as dead children linger on it while being
	 * drained; otherwise, "rmdir parent/child parent" may fail.
4876
	 */
4877
	if (css_has_online_children(&cgrp->self))
4878 4879
		return -EBUSY;

4880 4881
	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
4882
	 * creation by disabling cgroup_lock_live_group().
4883
	 */
4884
	cgrp->self.flags &= ~CSS_ONLINE;
4885

4886
	/* initiate massacre of all css's */
T
Tejun Heo 已提交
4887 4888
	for_each_css(css, ssid, cgrp)
		kill_css(css);
4889 4890

	/*
4891 4892
	 * Remove @cgrp directory along with the base files.  @cgrp has an
	 * extra ref on its kn.
4893
	 */
4894
	kernfs_remove(cgrp->kn);
4895

T
Tejun Heo 已提交
4896
	check_for_release(cgroup_parent(cgrp));
T
Tejun Heo 已提交
4897

4898
	/* put the base reference */
4899
	percpu_ref_kill(&cgrp->self.refcnt);
4900

4901 4902 4903
	return 0;
};

T
Tejun Heo 已提交
4904
static int cgroup_rmdir(struct kernfs_node *kn)
4905
{
4906
	struct cgroup *cgrp;
T
Tejun Heo 已提交
4907
	int ret = 0;
4908

4909 4910 4911
	cgrp = cgroup_kn_lock_live(kn);
	if (!cgrp)
		return 0;
4912

4913
	ret = cgroup_destroy_locked(cgrp);
4914

4915
	cgroup_kn_unlock(kn);
4916
	return ret;
4917 4918
}

T
Tejun Heo 已提交
4919 4920 4921 4922 4923 4924 4925 4926
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
	.remount_fs		= cgroup_remount,
	.show_options		= cgroup_show_options,
	.mkdir			= cgroup_mkdir,
	.rmdir			= cgroup_rmdir,
	.rename			= cgroup_rename,
};

4927
static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
4928 4929
{
	struct cgroup_subsys_state *css;
D
Diego Calleja 已提交
4930 4931

	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
4932

4933 4934
	mutex_lock(&cgroup_mutex);

4935
	idr_init(&ss->css_idr);
T
Tejun Heo 已提交
4936
	INIT_LIST_HEAD(&ss->cfts);
4937

4938 4939 4940
	/* Create the root cgroup state for this subsystem */
	ss->root = &cgrp_dfl_root;
	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
4941 4942
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
4943
	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
4944 4945 4946 4947 4948 4949 4950

	/*
	 * Root csses are never destroyed and we can't initialize
	 * percpu_ref during early init.  Disable refcnting.
	 */
	css->flags |= CSS_NO_REF;

4951
	if (early) {
4952
		/* allocation can't be done safely during early init */
4953 4954 4955 4956 4957
		css->id = 1;
	} else {
		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
		BUG_ON(css->id < 0);
	}
4958

L
Li Zefan 已提交
4959
	/* Update the init_css_set to contain a subsys
4960
	 * pointer to this state - since the subsystem is
L
Li Zefan 已提交
4961
	 * newly registered, all tasks and hence the
4962
	 * init_css_set is in the subsystem's root cgroup. */
4963
	init_css_set.subsys[ss->id] = css;
4964

4965 4966
	have_fork_callback |= (bool)ss->fork << ss->id;
	have_exit_callback |= (bool)ss->exit << ss->id;
4967
	have_canfork_callback |= (bool)ss->can_fork << ss->id;
4968

L
Li Zefan 已提交
4969 4970 4971 4972 4973
	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

4974
	BUG_ON(online_css(css));
4975

B
Ben Blum 已提交
4976 4977 4978
	mutex_unlock(&cgroup_mutex);
}

4979
/**
L
Li Zefan 已提交
4980 4981 4982 4983
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
4984 4985 4986
 */
int __init cgroup_init_early(void)
{
4987
	static struct cgroup_sb_opts __initdata opts;
4988
	struct cgroup_subsys *ss;
4989
	int i;
4990

4991
	init_cgroup_root(&cgrp_dfl_root, &opts);
4992 4993
	cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;

4994
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
4995

T
Tejun Heo 已提交
4996
	for_each_subsys(ss, i) {
4997
		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
4998 4999
		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
5000
		     ss->id, ss->name);
5001 5002 5003
		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);

5004
		ss->id = i;
5005
		ss->name = cgroup_subsys_name[i];
5006 5007
		if (!ss->legacy_name)
			ss->legacy_name = cgroup_subsys_name[i];
5008 5009

		if (ss->early_init)
5010
			cgroup_init_subsys(ss, true);
5011 5012 5013 5014 5015
	}
	return 0;
}

/**
L
Li Zefan 已提交
5016 5017 5018 5019
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
5020 5021 5022
 */
int __init cgroup_init(void)
{
5023
	struct cgroup_subsys *ss;
5024
	unsigned long key;
5025
	int ssid, err;
5026

5027
	BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
5028 5029
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
5030

T
Tejun Heo 已提交
5031 5032
	mutex_lock(&cgroup_mutex);

5033 5034 5035 5036
	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

5037
	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
5038

T
Tejun Heo 已提交
5039 5040
	mutex_unlock(&cgroup_mutex);

5041
	for_each_subsys(ss, ssid) {
5042 5043 5044 5045 5046 5047 5048 5049 5050 5051
		if (ss->early_init) {
			struct cgroup_subsys_state *css =
				init_css_set.subsys[ss->id];

			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
						   GFP_KERNEL);
			BUG_ON(css->id < 0);
		} else {
			cgroup_init_subsys(ss, false);
		}
5052

T
Tejun Heo 已提交
5053 5054
		list_add_tail(&init_css_set.e_cset_node[ssid],
			      &cgrp_dfl_root.cgrp.e_csets[ssid]);
5055 5056

		/*
5057 5058 5059
		 * Setting dfl_root subsys_mask needs to consider the
		 * disabled flag and cftype registration needs kmalloc,
		 * both of which aren't available during early_init.
5060
		 */
5061 5062 5063 5064 5065 5066 5067 5068
		if (ss->disabled)
			continue;

		cgrp_dfl_root.subsys_mask |= 1 << ss->id;

		if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
			ss->dfl_cftypes = ss->legacy_cftypes;

5069 5070 5071
		if (!ss->dfl_cftypes)
			cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;

5072 5073 5074 5075 5076
		if (ss->dfl_cftypes == ss->legacy_cftypes) {
			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
		} else {
			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
5077
		}
5078 5079 5080

		if (ss->bind)
			ss->bind(init_css_set.subsys[ssid]);
5081 5082
	}

5083 5084 5085
	err = sysfs_create_mount_point(fs_kobj, "cgroup");
	if (err)
		return err;
5086

5087
	err = register_filesystem(&cgroup_fs_type);
5088
	if (err < 0) {
5089
		sysfs_remove_mount_point(fs_kobj, "cgroup");
T
Tejun Heo 已提交
5090
		return err;
5091
	}
5092

L
Li Zefan 已提交
5093
	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
T
Tejun Heo 已提交
5094
	return 0;
5095
}
5096

5097 5098 5099 5100 5101
static int __init cgroup_wq_init(void)
{
	/*
	 * There isn't much point in executing destruction path in
	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
5102
	 * Use 1 for @max_active.
5103 5104 5105 5106
	 *
	 * We would prefer to do this in cgroup_init() above, but that
	 * is called before init_workqueues(): so leave this until after.
	 */
5107
	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5108
	BUG_ON(!cgroup_destroy_wq);
5109 5110 5111 5112 5113 5114 5115 5116 5117

	/*
	 * Used to destroy pidlists and separate to serve as flush domain.
	 * Cap @max_active to 1 too.
	 */
	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
						    0, 1);
	BUG_ON(!cgroup_pidlist_destroy_wq);

5118 5119 5120 5121
	return 0;
}
core_initcall(cgroup_wq_init);

5122 5123 5124 5125 5126
/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 */
Z
Zefan Li 已提交
5127 5128
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
		     struct pid *pid, struct task_struct *tsk)
5129
{
T
Tejun Heo 已提交
5130
	char *buf, *path;
5131
	int retval;
5132
	struct cgroup_root *root;
5133 5134

	retval = -ENOMEM;
T
Tejun Heo 已提交
5135
	buf = kmalloc(PATH_MAX, GFP_KERNEL);
5136 5137 5138 5139
	if (!buf)
		goto out;

	mutex_lock(&cgroup_mutex);
5140
	down_read(&css_set_rwsem);
5141

5142
	for_each_root(root) {
5143
		struct cgroup_subsys *ss;
5144
		struct cgroup *cgrp;
T
Tejun Heo 已提交
5145
		int ssid, count = 0;
5146

T
Tejun Heo 已提交
5147
		if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
5148 5149
			continue;

5150
		seq_printf(m, "%d:", root->hierarchy_id);
5151 5152 5153 5154
		if (root != &cgrp_dfl_root)
			for_each_subsys(ss, ssid)
				if (root->subsys_mask & (1 << ssid))
					seq_printf(m, "%s%s", count++ ? "," : "",
5155
						   ss->legacy_name);
5156 5157 5158
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
5159
		seq_putc(m, ':');
5160
		cgrp = task_cgroup_from_root(tsk, root);
T
Tejun Heo 已提交
5161 5162 5163
		path = cgroup_path(cgrp, buf, PATH_MAX);
		if (!path) {
			retval = -ENAMETOOLONG;
5164
			goto out_unlock;
T
Tejun Heo 已提交
5165 5166
		}
		seq_puts(m, path);
5167 5168 5169
		seq_putc(m, '\n');
	}

Z
Zefan Li 已提交
5170
	retval = 0;
5171
out_unlock:
5172
	up_read(&css_set_rwsem);
5173 5174 5175 5176 5177 5178 5179 5180 5181
	mutex_unlock(&cgroup_mutex);
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
5182
	struct cgroup_subsys *ss;
5183 5184
	int i;

5185
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
B
Ben Blum 已提交
5186 5187 5188 5189 5190
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
5191
	mutex_lock(&cgroup_mutex);
5192 5193

	for_each_subsys(ss, i)
5194
		seq_printf(m, "%s\t%d\t%d\t%d\n",
5195
			   ss->legacy_name, ss->root->hierarchy_id,
5196
			   atomic_read(&ss->root->nr_cgrps), !ss->disabled);
5197

5198 5199 5200 5201 5202 5203
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
5204
	return single_open(file, proc_cgroupstats_show, NULL);
5205 5206
}

5207
static const struct file_operations proc_cgroupstats_operations = {
5208 5209 5210 5211 5212 5213
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226
static void **subsys_canfork_priv_p(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
{
	if (CGROUP_CANFORK_START <= i && i < CGROUP_CANFORK_END)
		return &ss_priv[i - CGROUP_CANFORK_START];
	return NULL;
}

static void *subsys_canfork_priv(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
{
	void **private = subsys_canfork_priv_p(ss_priv, i);
	return private ? *private : NULL;
}

5227
/**
5228
 * cgroup_fork - initialize cgroup related fields during copy_process()
L
Li Zefan 已提交
5229
 * @child: pointer to task_struct of forking parent process.
5230
 *
5231 5232 5233
 * A task is associated with the init_css_set until cgroup_post_fork()
 * attaches it to the parent's css_set.  Empty cg_list indicates that
 * @child isn't holding reference to its css_set.
5234 5235 5236
 */
void cgroup_fork(struct task_struct *child)
{
5237
	RCU_INIT_POINTER(child->cgroups, &init_css_set);
5238
	INIT_LIST_HEAD(&child->cg_list);
5239 5240
}

5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291
/**
 * cgroup_can_fork - called on a new task before the process is exposed
 * @child: the task in question.
 *
 * This calls the subsystem can_fork() callbacks. If the can_fork() callback
 * returns an error, the fork aborts with that error code. This allows for
 * a cgroup subsystem to conditionally allow or deny new forks.
 */
int cgroup_can_fork(struct task_struct *child,
		    void *ss_priv[CGROUP_CANFORK_COUNT])
{
	struct cgroup_subsys *ss;
	int i, j, ret;

	for_each_subsys_which(ss, i, &have_canfork_callback) {
		ret = ss->can_fork(child, subsys_canfork_priv_p(ss_priv, i));
		if (ret)
			goto out_revert;
	}

	return 0;

out_revert:
	for_each_subsys(ss, j) {
		if (j >= i)
			break;
		if (ss->cancel_fork)
			ss->cancel_fork(child, subsys_canfork_priv(ss_priv, j));
	}

	return ret;
}

/**
 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
 * @child: the task in question
 *
 * This calls the cancel_fork() callbacks if a fork failed *after*
 * cgroup_can_fork() succeded.
 */
void cgroup_cancel_fork(struct task_struct *child,
			void *ss_priv[CGROUP_CANFORK_COUNT])
{
	struct cgroup_subsys *ss;
	int i;

	for_each_subsys(ss, i)
		if (ss->cancel_fork)
			ss->cancel_fork(child, subsys_canfork_priv(ss_priv, i));
}

5292
/**
L
Li Zefan 已提交
5293 5294 5295
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
5296 5297 5298
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
5299
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5300
 * list.
L
Li Zefan 已提交
5301
 */
5302 5303
void cgroup_post_fork(struct task_struct *child,
		      void *old_ss_priv[CGROUP_CANFORK_COUNT])
5304
{
5305
	struct cgroup_subsys *ss;
5306 5307
	int i;

5308
	/*
D
Dongsheng Yang 已提交
5309
	 * This may race against cgroup_enable_task_cg_lists().  As that
5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323
	 * function sets use_task_css_set_links before grabbing
	 * tasklist_lock and we just went through tasklist_lock to add
	 * @child, it's guaranteed that either we see the set
	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
	 * @child during its iteration.
	 *
	 * If we won the race, @child is associated with %current's
	 * css_set.  Grabbing css_set_rwsem guarantees both that the
	 * association is stable, and, on completion of the parent's
	 * migration, @child is visible in the source of migration or
	 * already in the destination cgroup.  This guarantee is necessary
	 * when implementing operations which need to migrate all tasks of
	 * a cgroup to another.
	 *
D
Dongsheng Yang 已提交
5324
	 * Note that if we lose to cgroup_enable_task_cg_lists(), @child
5325 5326 5327
	 * will remain in init_css_set.  This is safe because all tasks are
	 * in the init_css_set before cg_links is enabled and there's no
	 * operation which transfers all tasks out of init_css_set.
5328
	 */
5329
	if (use_task_css_set_links) {
5330 5331
		struct css_set *cset;

5332
		down_write(&css_set_rwsem);
5333
		cset = task_css_set(current);
5334 5335 5336 5337 5338
		if (list_empty(&child->cg_list)) {
			rcu_assign_pointer(child->cgroups, cset);
			list_add(&child->cg_list, &cset->tasks);
			get_css_set(cset);
		}
5339
		up_write(&css_set_rwsem);
5340
	}
5341 5342 5343 5344 5345 5346

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
5347
	for_each_subsys_which(ss, i, &have_fork_callback)
5348
		ss->fork(child, subsys_canfork_priv(old_ss_priv, i));
5349
}
5350

5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
5363 5364 5365 5366 5367
 * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
 * call cgroup_exit() while the task is still competent to handle
 * notify_on_release(), then leave the task attached to the root cgroup in
 * each hierarchy for the remainder of its exit.  No need to bother with
 * init_css_set refcnting.  init_css_set never goes away and we can't race
5368
 * with migration path - PF_EXITING is visible to migration path.
5369
 */
5370
void cgroup_exit(struct task_struct *tsk)
5371
{
5372
	struct cgroup_subsys *ss;
5373
	struct css_set *cset;
5374
	bool put_cset = false;
5375
	int i;
5376 5377

	/*
5378 5379
	 * Unlink from @tsk from its css_set.  As migration path can't race
	 * with us, we can check cg_list without grabbing css_set_rwsem.
5380 5381
	 */
	if (!list_empty(&tsk->cg_list)) {
5382
		down_write(&css_set_rwsem);
5383
		list_del_init(&tsk->cg_list);
5384
		up_write(&css_set_rwsem);
5385
		put_cset = true;
5386 5387
	}

5388
	/* Reassign the task to the init_css_set. */
5389 5390
	cset = task_css_set(tsk);
	RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
5391

5392 5393 5394 5395
	/* see cgroup_post_fork() for details */
	for_each_subsys_which(ss, i, &have_exit_callback) {
		struct cgroup_subsys_state *old_css = cset->subsys[i];
		struct cgroup_subsys_state *css = task_css(tsk, i);
5396

5397
		ss->exit(css, old_css, tsk);
5398 5399
	}

5400
	if (put_cset)
Z
Zefan Li 已提交
5401
		put_css_set(cset);
5402
}
5403

5404
static void check_for_release(struct cgroup *cgrp)
5405
{
Z
Zefan Li 已提交
5406
	if (notify_on_release(cgrp) && !cgroup_has_tasks(cgrp) &&
5407 5408
	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
		schedule_work(&cgrp->release_agent_work);
5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
5436 5437 5438 5439 5440
	struct cgroup *cgrp =
		container_of(work, struct cgroup, release_agent_work);
	char *pathbuf = NULL, *agentbuf = NULL, *path;
	char *argv[3], *envp[3];

5441
	mutex_lock(&cgroup_mutex);
5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460

	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
	agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
	if (!pathbuf || !agentbuf)
		goto out;

	path = cgroup_path(cgrp, pathbuf, PATH_MAX);
	if (!path)
		goto out;

	argv[0] = agentbuf;
	argv[1] = path;
	argv[2] = NULL;

	/* minimal command environment */
	envp[0] = "HOME=/";
	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
	envp[2] = NULL;

5461
	mutex_unlock(&cgroup_mutex);
5462
	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
5463
	goto out_free;
5464
out:
5465
	mutex_unlock(&cgroup_mutex);
5466
out_free:
5467 5468
	kfree(agentbuf);
	kfree(pathbuf);
5469
}
5470 5471 5472

static int __init cgroup_disable(char *str)
{
5473
	struct cgroup_subsys *ss;
5474
	char *token;
5475
	int i;
5476 5477 5478 5479

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
5480

T
Tejun Heo 已提交
5481
		for_each_subsys(ss, i) {
5482 5483 5484 5485 5486 5487 5488 5489
			if (strcmp(token, ss->name) &&
			    strcmp(token, ss->legacy_name))
				continue;

			ss->disabled = 1;
			printk(KERN_INFO "Disabling %s control group subsystem\n",
			       ss->name);
			break;
5490 5491 5492 5493 5494
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
K
KAMEZAWA Hiroyuki 已提交
5495

5496 5497 5498 5499 5500 5501 5502 5503
static int __init cgroup_set_legacy_files_on_dfl(char *str)
{
	printk("cgroup: using legacy files on the default hierarchy\n");
	cgroup_legacy_files_on_dfl = true;
	return 0;
}
__setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);

5504
/**
5505
 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
5506 5507
 * @dentry: directory dentry of interest
 * @ss: subsystem of interest
5508
 *
5509 5510 5511
 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
 * to get the corresponding css and return it.  If such css doesn't exist
 * or can't be pinned, an ERR_PTR value is returned.
S
Stephane Eranian 已提交
5512
 */
5513 5514
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
						       struct cgroup_subsys *ss)
S
Stephane Eranian 已提交
5515
{
T
Tejun Heo 已提交
5516 5517
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
	struct cgroup_subsys_state *css = NULL;
S
Stephane Eranian 已提交
5518 5519
	struct cgroup *cgrp;

5520
	/* is @dentry a cgroup dir? */
T
Tejun Heo 已提交
5521 5522
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
S
Stephane Eranian 已提交
5523 5524
		return ERR_PTR(-EBADF);

5525 5526
	rcu_read_lock();

T
Tejun Heo 已提交
5527 5528 5529
	/*
	 * This path doesn't originate from kernfs and @kn could already
	 * have been or be removed at any point.  @kn->priv is RCU
5530
	 * protected for this access.  See css_release_work_fn() for details.
T
Tejun Heo 已提交
5531 5532 5533 5534
	 */
	cgrp = rcu_dereference(kn->priv);
	if (cgrp)
		css = cgroup_css(cgrp, ss);
5535

5536
	if (!css || !css_tryget_online(css))
5537 5538 5539 5540
		css = ERR_PTR(-ENOENT);

	rcu_read_unlock();
	return css;
S
Stephane Eranian 已提交
5541 5542
}

5543 5544 5545 5546 5547 5548 5549 5550 5551 5552
/**
 * css_from_id - lookup css by id
 * @id: the cgroup id
 * @ss: cgroup subsys to be looked into
 *
 * Returns the css if there's valid one with @id, otherwise returns NULL.
 * Should be called under rcu_read_lock().
 */
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
5553
	WARN_ON_ONCE(!rcu_read_lock_held());
5554
	return id > 0 ? idr_find(&ss->css_idr, id) : NULL;
S
Stephane Eranian 已提交
5555 5556
}

5557
#ifdef CONFIG_CGROUP_DEBUG
5558 5559
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
5560 5561 5562 5563 5564 5565 5566 5567 5568
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

5569
static void debug_css_free(struct cgroup_subsys_state *css)
5570
{
5571
	kfree(css);
5572 5573
}

5574 5575
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5576
{
5577
	return cgroup_task_count(css->cgroup);
5578 5579
}

5580 5581
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5582 5583 5584 5585
{
	return (u64)(unsigned long)current->cgroups;
}

5586
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
L
Li Zefan 已提交
5587
					 struct cftype *cft)
5588 5589 5590 5591
{
	u64 count;

	rcu_read_lock();
5592
	count = atomic_read(&task_css_set(current)->refcount);
5593 5594 5595 5596
	rcu_read_unlock();
	return count;
}

5597
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
5598
{
5599
	struct cgrp_cset_link *link;
5600
	struct css_set *cset;
T
Tejun Heo 已提交
5601 5602 5603 5604 5605
	char *name_buf;

	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name_buf)
		return -ENOMEM;
5606

5607
	down_read(&css_set_rwsem);
5608
	rcu_read_lock();
5609
	cset = rcu_dereference(current->cgroups);
5610
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5611 5612
		struct cgroup *c = link->cgrp;

T
Tejun Heo 已提交
5613
		cgroup_name(c, name_buf, NAME_MAX + 1);
5614
		seq_printf(seq, "Root %d group %s\n",
T
Tejun Heo 已提交
5615
			   c->root->hierarchy_id, name_buf);
5616 5617
	}
	rcu_read_unlock();
5618
	up_read(&css_set_rwsem);
T
Tejun Heo 已提交
5619
	kfree(name_buf);
5620 5621 5622 5623
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
5624
static int cgroup_css_links_read(struct seq_file *seq, void *v)
5625
{
5626
	struct cgroup_subsys_state *css = seq_css(seq);
5627
	struct cgrp_cset_link *link;
5628

5629
	down_read(&css_set_rwsem);
5630
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5631
		struct css_set *cset = link->cset;
5632 5633
		struct task_struct *task;
		int count = 0;
T
Tejun Heo 已提交
5634

5635
		seq_printf(seq, "css_set %p\n", cset);
T
Tejun Heo 已提交
5636

5637
		list_for_each_entry(task, &cset->tasks, cg_list) {
T
Tejun Heo 已提交
5638 5639 5640 5641 5642 5643 5644 5645 5646
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
		}

		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
5647
		}
T
Tejun Heo 已提交
5648 5649 5650
		continue;
	overflow:
		seq_puts(seq, "  ...\n");
5651
	}
5652
	up_read(&css_set_rwsem);
5653 5654 5655
	return 0;
}

5656
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
5657
{
Z
Zefan Li 已提交
5658 5659
	return (!cgroup_has_tasks(css->cgroup) &&
		!css_has_online_children(&css->cgroup->self));
5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

5678 5679
	{
		.name = "current_css_set_cg_links",
5680
		.seq_show = current_css_set_cg_links_read,
5681 5682 5683 5684
	},

	{
		.name = "cgroup_css_links",
5685
		.seq_show = cgroup_css_links_read,
5686 5687
	},

5688 5689 5690 5691 5692
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

5693 5694
	{ }	/* terminate */
};
5695

5696
struct cgroup_subsys debug_cgrp_subsys = {
5697 5698
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,
5699
	.legacy_cftypes = debug_files,
5700 5701
};
#endif /* CONFIG_CGROUP_DEBUG */