cgroup.c 150.5 KB
Newer Older
1 2 3 4 5 6
/*
 *  Generic process-grouping system.
 *
 *  Based originally on the cpuset system, extracted by Paul Menage
 *  Copyright (C) 2006 Google, Inc
 *
7 8 9 10
 *  Notifications support
 *  Copyright (C) 2009 Nokia Corporation
 *  Author: Kirill A. Shutemov
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *  Copyright notices from the original cpuset code:
 *  --------------------------------------------------
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
 *  2003-10-10 Written by Simon Derr.
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson.
 *  ---------------------------------------------------
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/cgroup.h>
32
#include <linux/cred.h>
33
#include <linux/ctype.h>
34
#include <linux/errno.h>
35
#include <linux/init_task.h>
36 37
#include <linux/kernel.h>
#include <linux/list.h>
38
#include <linux/magic.h>
39 40 41 42
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
43
#include <linux/proc_fs.h>
44 45 46 47
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
48
#include <linux/rwsem.h>
49
#include <linux/string.h>
50
#include <linux/sort.h>
51
#include <linux/kmod.h>
B
Balbir Singh 已提交
52 53
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
54
#include <linux/hashtable.h>
L
Li Zefan 已提交
55
#include <linux/pid_namespace.h>
56
#include <linux/idr.h>
57
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
58
#include <linux/kthread.h>
T
Tejun Heo 已提交
59
#include <linux/delay.h>
B
Balbir Singh 已提交
60

A
Arun Sharma 已提交
61
#include <linux/atomic.h>
62

63 64 65 66 67 68 69 70
/*
 * pidlists linger the following amount before being destroyed.  The goal
 * is avoiding frequent destruction in the middle of consecutive read calls
 * Expiring in the middle is a performance problem not a correctness one.
 * 1 sec should be enough.
 */
#define CGROUP_PIDLIST_DESTROY_DELAY	HZ

T
Tejun Heo 已提交
71 72 73
#define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
					 MAX_CFTYPE_NAME + 2)

T
Tejun Heo 已提交
74 75 76 77
/*
 * cgroup_mutex is the master lock.  Any modification to cgroup or its
 * hierarchy must be performed while holding it.
 *
78 79
 * css_set_rwsem protects task->cgroups pointer, the list of css_set
 * objects, and the chain of tasks off each css_set.
T
Tejun Heo 已提交
80
 *
81 82
 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
 * cgroup.h can use them for lockdep annotations.
T
Tejun Heo 已提交
83
 */
T
Tejun Heo 已提交
84 85
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
86 87 88
DECLARE_RWSEM(css_set_rwsem);
EXPORT_SYMBOL_GPL(cgroup_mutex);
EXPORT_SYMBOL_GPL(css_set_rwsem);
T
Tejun Heo 已提交
89
#else
90
static DEFINE_MUTEX(cgroup_mutex);
91
static DECLARE_RWSEM(css_set_rwsem);
T
Tejun Heo 已提交
92 93
#endif

94
/*
95 96
 * Protects cgroup_idr and css_idr so that IDs can be released without
 * grabbing cgroup_mutex.
97 98 99
 */
static DEFINE_SPINLOCK(cgroup_idr_lock);

100 101 102 103 104
/*
 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
 */
static DEFINE_SPINLOCK(release_agent_path_lock);
105

T
Tejun Heo 已提交
106
#define cgroup_assert_mutex_or_rcu_locked()				\
107 108
	rcu_lockdep_assert(rcu_read_lock_held() ||			\
			   lockdep_is_held(&cgroup_mutex),		\
T
Tejun Heo 已提交
109
			   "cgroup_mutex or RCU read lock required");
110

111 112 113 114 115 116 117 118
/*
 * cgroup destruction makes heavy use of work items and there can be a lot
 * of concurrent destructions.  Use a separate workqueue so that cgroup
 * destruction work items don't end up filling up max_active of system_wq
 * which may lead to deadlock.
 */
static struct workqueue_struct *cgroup_destroy_wq;

119 120 121 122 123 124
/*
 * pidlist destructions need to be flushed on cgroup destruction.  Use a
 * separate workqueue as flush domain.
 */
static struct workqueue_struct *cgroup_pidlist_destroy_wq;

T
Tejun Heo 已提交
125
/* generate an array of cgroup subsystem pointers */
126
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
T
Tejun Heo 已提交
127
static struct cgroup_subsys *cgroup_subsys[] = {
128 129
#include <linux/cgroup_subsys.h>
};
130 131 132 133 134
#undef SUBSYS

/* array of cgroup subsystem names */
#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
static const char *cgroup_subsys_name[] = {
135 136
#include <linux/cgroup_subsys.h>
};
137
#undef SUBSYS
138 139

/*
140
 * The default hierarchy, reserved for the subsystems that are otherwise
141 142
 * unattached - it never has more than a single cgroup, and all tasks are
 * part of that cgroup.
143
 */
T
Tejun Heo 已提交
144
struct cgroup_root cgrp_dfl_root;
145

T
Tejun Heo 已提交
146 147 148 149 150
/*
 * The default hierarchy always exists but is hidden until mounted for the
 * first time.  This is for backward compatibility.
 */
static bool cgrp_dfl_root_visible;
151

152 153 154 155 156 157
/*
 * Set by the boot param of the same name and makes subsystems with NULL
 * ->dfl_files to use ->legacy_files on the default hierarchy.
 */
static bool cgroup_legacy_files_on_dfl;

158
/* some controllers are not supported in the default hierarchy */
159
static unsigned int cgrp_dfl_root_inhibit_ss_mask;
160

161 162
/* The list of hierarchy roots */

163 164
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
165

T
Tejun Heo 已提交
166
/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
167
static DEFINE_IDR(cgroup_hierarchy_idr);
168

169
/*
170 171 172 173 174
 * Assign a monotonically increasing serial number to csses.  It guarantees
 * cgroups with bigger numbers are newer than those with smaller numbers.
 * Also, as csses are always appended to the parent's ->children list, it
 * guarantees that sibling csses are always sorted in the ascending serial
 * number order on the list.  Protected by cgroup_mutex.
175
 */
176
static u64 css_serial_nr_next = 1;
177

178
/* This flag indicates whether tasks in the fork and exit paths should
L
Li Zefan 已提交
179 180 181
 * check for fork/exit handlers to call. This avoids us having to do
 * extra work in the fork/exit path if none of the subsystems need to
 * be called.
182
 */
183
static int need_forkexit_callback __read_mostly;
184

185 186
static struct cftype cgroup_dfl_base_files[];
static struct cftype cgroup_legacy_base_files[];
187

188
static void cgroup_put(struct cgroup *cgrp);
189
static int rebind_subsystems(struct cgroup_root *dst_root,
190
			     unsigned int ss_mask);
191
static int cgroup_destroy_locked(struct cgroup *cgrp);
192 193
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible);
194
static void css_release(struct percpu_ref *ref);
195
static void kill_css(struct cgroup_subsys_state *css);
196 197
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add);
198
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
199

200 201 202 203 204 205 206
/* IDR wrappers which synchronize using cgroup_idr_lock */
static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
			    gfp_t gfp_mask)
{
	int ret;

	idr_preload(gfp_mask);
T
Tejun Heo 已提交
207
	spin_lock_bh(&cgroup_idr_lock);
208
	ret = idr_alloc(idr, ptr, start, end, gfp_mask);
T
Tejun Heo 已提交
209
	spin_unlock_bh(&cgroup_idr_lock);
210 211 212 213 214 215 216 217
	idr_preload_end();
	return ret;
}

static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
{
	void *ret;

T
Tejun Heo 已提交
218
	spin_lock_bh(&cgroup_idr_lock);
219
	ret = idr_replace(idr, ptr, id);
T
Tejun Heo 已提交
220
	spin_unlock_bh(&cgroup_idr_lock);
221 222 223 224 225
	return ret;
}

static void cgroup_idr_remove(struct idr *idr, int id)
{
T
Tejun Heo 已提交
226
	spin_lock_bh(&cgroup_idr_lock);
227
	idr_remove(idr, id);
T
Tejun Heo 已提交
228
	spin_unlock_bh(&cgroup_idr_lock);
229 230
}

T
Tejun Heo 已提交
231 232 233 234 235 236 237 238 239
static struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
	struct cgroup_subsys_state *parent_css = cgrp->self.parent;

	if (parent_css)
		return container_of(parent_css, struct cgroup, self);
	return NULL;
}

T
Tejun Heo 已提交
240 241 242
/**
 * cgroup_css - obtain a cgroup's css for the specified subsystem
 * @cgrp: the cgroup of interest
243
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
T
Tejun Heo 已提交
244
 *
245 246 247 248 249
 * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
 * function must be called either under cgroup_mutex or rcu_read_lock() and
 * the caller is responsible for pinning the returned css if it wants to
 * keep accessing it outside the said locks.  This function may return
 * %NULL if @cgrp doesn't have @subsys_id enabled.
T
Tejun Heo 已提交
250 251
 */
static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
252
					      struct cgroup_subsys *ss)
T
Tejun Heo 已提交
253
{
254
	if (ss)
255
		return rcu_dereference_check(cgrp->subsys[ss->id],
T
Tejun Heo 已提交
256
					lockdep_is_held(&cgroup_mutex));
257
	else
258
		return &cgrp->self;
T
Tejun Heo 已提交
259
}
260

261 262 263
/**
 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
264
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
265 266 267 268 269 270 271 272 273 274 275 276
 *
 * Similar to cgroup_css() but returns the effctive css, which is defined
 * as the matching css of the nearest ancestor including self which has @ss
 * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
 * function is guaranteed to return non-NULL css.
 */
static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
						struct cgroup_subsys *ss)
{
	lockdep_assert_held(&cgroup_mutex);

	if (!ss)
277
		return &cgrp->self;
278 279 280 281

	if (!(cgrp->root->subsys_mask & (1 << ss->id)))
		return NULL;

T
Tejun Heo 已提交
282 283 284
	while (cgroup_parent(cgrp) &&
	       !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
		cgrp = cgroup_parent(cgrp);
285 286

	return cgroup_css(cgrp, ss);
T
Tejun Heo 已提交
287
}
288

289
/* convenient tests for these bits */
290
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
291
{
292
	return !(cgrp->self.flags & CSS_ONLINE);
293 294
}

T
Tejun Heo 已提交
295
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
296
{
T
Tejun Heo 已提交
297
	struct cgroup *cgrp = of->kn->parent->priv;
T
Tejun Heo 已提交
298
	struct cftype *cft = of_cft(of);
T
Tejun Heo 已提交
299 300 301 302 303 304 305 306 307 308 309 310

	/*
	 * This is open and unprotected implementation of cgroup_css().
	 * seq_css() is only called from a kernfs file operation which has
	 * an active reference on the file.  Because all the subsystem
	 * files are drained before a css is disassociated with a cgroup,
	 * the matching css from the cgroup's subsys table is guaranteed to
	 * be and stay valid until the enclosing operation is complete.
	 */
	if (cft->ss)
		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
	else
311
		return &cgrp->self;
312
}
T
Tejun Heo 已提交
313
EXPORT_SYMBOL_GPL(of_css);
314

315 316 317 318 319 320 321 322 323 324 325 326 327 328
/**
 * cgroup_is_descendant - test ancestry
 * @cgrp: the cgroup to be tested
 * @ancestor: possible ancestor of @cgrp
 *
 * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
 * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
 * and @ancestor are accessible.
 */
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
{
	while (cgrp) {
		if (cgrp == ancestor)
			return true;
T
Tejun Heo 已提交
329
		cgrp = cgroup_parent(cgrp);
330 331 332
	}
	return false;
}
333

334
static int cgroup_is_releasable(const struct cgroup *cgrp)
335 336
{
	const int bits =
337 338 339
		(1 << CGRP_RELEASABLE) |
		(1 << CGRP_NOTIFY_ON_RELEASE);
	return (cgrp->flags & bits) == bits;
340 341
}

342
static int notify_on_release(const struct cgroup *cgrp)
343
{
344
	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
345 346
}

T
Tejun Heo 已提交
347 348 349 350 351 352
/**
 * for_each_css - iterate all css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
353
 * Should be called under cgroup_[tree_]mutex.
T
Tejun Heo 已提交
354 355 356 357 358 359 360 361
 */
#define for_each_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = rcu_dereference_check(			\
				(cgrp)->subsys[(ssid)],			\
				lockdep_is_held(&cgroup_mutex)))) { }	\
		else

362 363 364 365 366 367 368 369 370 371 372 373 374 375
/**
 * for_each_e_css - iterate all effective css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
 * Should be called under cgroup_[tree_]mutex.
 */
#define for_each_e_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
			;						\
		else

376
/**
T
Tejun Heo 已提交
377
 * for_each_subsys - iterate all enabled cgroup subsystems
378
 * @ss: the iteration cursor
379
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
380
 */
381
#define for_each_subsys(ss, ssid)					\
T
Tejun Heo 已提交
382 383
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
384

385 386
/* iterate across the hierarchies */
#define for_each_root(root)						\
387
	list_for_each_entry((root), &cgroup_roots, root_list)
388

389 390
/* iterate over child cgrps, lock should be held throughout iteration */
#define cgroup_for_each_live_child(child, cgrp)				\
391
	list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
T
Tejun Heo 已提交
392
		if (({ lockdep_assert_held(&cgroup_mutex);		\
393 394 395
		       cgroup_is_dead(child); }))			\
			;						\
		else
396

397 398 399
/* the list of cgroups eligible for automatic release. Protected by
 * release_list_lock */
static LIST_HEAD(release_list);
400
static DEFINE_RAW_SPINLOCK(release_list_lock);
401 402
static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
403
static void check_for_release(struct cgroup *cgrp);
404

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
/*
 * A cgroup can be associated with multiple css_sets as different tasks may
 * belong to different cgroups on different hierarchies.  In the other
 * direction, a css_set is naturally associated with multiple cgroups.
 * This M:N relationship is represented by the following link structure
 * which exists for each association and allows traversing the associations
 * from both sides.
 */
struct cgrp_cset_link {
	/* the cgroup and css_set this link associates */
	struct cgroup		*cgrp;
	struct css_set		*cset;

	/* list of cgrp_cset_links anchored at cgrp->cset_links */
	struct list_head	cset_link;

	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
	struct list_head	cgrp_link;
423 424
};

425 426
/*
 * The default css_set - used by init and its children prior to any
427 428 429 430 431
 * hierarchies being mounted. It contains a pointer to the root state
 * for each subsystem. Also used to anchor the list of css_sets. Not
 * reference-counted, to improve performance when child cgroups
 * haven't been created.
 */
T
Tejun Heo 已提交
432
struct css_set init_css_set = {
433 434 435 436 437 438 439
	.refcount		= ATOMIC_INIT(1),
	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
};
440

441
static int css_set_count	= 1;	/* 1 for init_css_set */
442

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
/**
 * cgroup_update_populated - updated populated count of a cgroup
 * @cgrp: the target cgroup
 * @populated: inc or dec populated count
 *
 * @cgrp is either getting the first task (css_set) or losing the last.
 * Update @cgrp->populated_cnt accordingly.  The count is propagated
 * towards root so that a given cgroup's populated_cnt is zero iff the
 * cgroup and all its descendants are empty.
 *
 * @cgrp's interface file "cgroup.populated" is zero if
 * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
 * changes from or to zero, userland is notified that the content of the
 * interface file has changed.  This can be used to detect when @cgrp and
 * its descendants become populated or empty.
 */
static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
{
	lockdep_assert_held(&css_set_rwsem);

	do {
		bool trigger;

		if (populated)
			trigger = !cgrp->populated_cnt++;
		else
			trigger = !--cgrp->populated_cnt;

		if (!trigger)
			break;

		if (cgrp->populated_kn)
			kernfs_notify(cgrp->populated_kn);
T
Tejun Heo 已提交
476
		cgrp = cgroup_parent(cgrp);
477 478 479
	} while (cgrp);
}

480 481 482 483 484
/*
 * hash table for cgroup groups. This improves the performance to find
 * an existing css_set. This hash doesn't (currently) take into
 * account cgroups in empty hierarchies.
 */
485
#define CSS_SET_HASH_BITS	7
486
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
487

488
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
489
{
490
	unsigned long key = 0UL;
491 492
	struct cgroup_subsys *ss;
	int i;
493

494
	for_each_subsys(ss, i)
495 496
		key += (unsigned long)css[i];
	key = (key >> 16) ^ key;
497

498
	return key;
499 500
}

501
static void put_css_set_locked(struct css_set *cset, bool taskexit)
502
{
503
	struct cgrp_cset_link *link, *tmp_link;
T
Tejun Heo 已提交
504 505
	struct cgroup_subsys *ss;
	int ssid;
506

507 508 509
	lockdep_assert_held(&css_set_rwsem);

	if (!atomic_dec_and_test(&cset->refcount))
510
		return;
511

512
	/* This css_set is dead. unlink it and release cgroup refcounts */
T
Tejun Heo 已提交
513 514
	for_each_subsys(ss, ssid)
		list_del(&cset->e_cset_node[ssid]);
515
	hash_del(&cset->hlist);
516 517
	css_set_count--;

518
	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
519
		struct cgroup *cgrp = link->cgrp;
520

521 522
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
L
Li Zefan 已提交
523

524
		/* @cgrp can't go away while we're holding css_set_rwsem */
525 526 527 528 529 530 531
		if (list_empty(&cgrp->cset_links)) {
			cgroup_update_populated(cgrp, false);
			if (notify_on_release(cgrp)) {
				if (taskexit)
					set_bit(CGRP_RELEASABLE, &cgrp->flags);
				check_for_release(cgrp);
			}
532
		}
533 534

		kfree(link);
535
	}
536

537
	kfree_rcu(cset, rcu_head);
538 539
}

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
static void put_css_set(struct css_set *cset, bool taskexit)
{
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * rwlock
	 */
	if (atomic_add_unless(&cset->refcount, -1, 1))
		return;

	down_write(&css_set_rwsem);
	put_css_set_locked(cset, taskexit);
	up_write(&css_set_rwsem);
}

555 556 557
/*
 * refcounted get/put for css_set objects
 */
558
static inline void get_css_set(struct css_set *cset)
559
{
560
	atomic_inc(&cset->refcount);
561 562
}

563
/**
564
 * compare_css_sets - helper function for find_existing_css_set().
565 566
 * @cset: candidate css_set being tested
 * @old_cset: existing css_set for a task
567 568 569
 * @new_cgrp: cgroup that's being entered by the task
 * @template: desired set of css pointers in css_set (pre-calculated)
 *
L
Li Zefan 已提交
570
 * Returns true if "cset" matches "old_cset" except for the hierarchy
571 572
 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
 */
573 574
static bool compare_css_sets(struct css_set *cset,
			     struct css_set *old_cset,
575 576 577 578 579
			     struct cgroup *new_cgrp,
			     struct cgroup_subsys_state *template[])
{
	struct list_head *l1, *l2;

580 581 582 583 584 585
	/*
	 * On the default hierarchy, there can be csets which are
	 * associated with the same set of cgroups but different csses.
	 * Let's first ensure that csses match.
	 */
	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
586 587 588 589
		return false;

	/*
	 * Compare cgroup pointers in order to distinguish between
590 591 592
	 * different cgroups in hierarchies.  As different cgroups may
	 * share the same effective css, this comparison is always
	 * necessary.
593
	 */
594 595
	l1 = &cset->cgrp_links;
	l2 = &old_cset->cgrp_links;
596
	while (1) {
597
		struct cgrp_cset_link *link1, *link2;
598
		struct cgroup *cgrp1, *cgrp2;
599 600 601 602

		l1 = l1->next;
		l2 = l2->next;
		/* See if we reached the end - both lists are equal length. */
603 604
		if (l1 == &cset->cgrp_links) {
			BUG_ON(l2 != &old_cset->cgrp_links);
605 606
			break;
		} else {
607
			BUG_ON(l2 == &old_cset->cgrp_links);
608 609
		}
		/* Locate the cgroups associated with these links. */
610 611 612 613
		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
		cgrp1 = link1->cgrp;
		cgrp2 = link2->cgrp;
614
		/* Hierarchies should be linked in the same order. */
615
		BUG_ON(cgrp1->root != cgrp2->root);
616 617 618 619 620 621 622 623

		/*
		 * If this hierarchy is the hierarchy of the cgroup
		 * that's changing, then we need to check that this
		 * css_set points to the new cgroup; if it's any other
		 * hierarchy, then this css_set should point to the
		 * same cgroup as the old css_set.
		 */
624 625
		if (cgrp1->root == new_cgrp->root) {
			if (cgrp1 != new_cgrp)
626 627
				return false;
		} else {
628
			if (cgrp1 != cgrp2)
629 630 631 632 633 634
				return false;
		}
	}
	return true;
}

635 636 637 638 639
/**
 * find_existing_css_set - init css array and find the matching css_set
 * @old_cset: the css_set that we're using before the cgroup transition
 * @cgrp: the cgroup that we're moving into
 * @template: out param for the new set of csses, should be clear on entry
640
 */
641 642 643
static struct css_set *find_existing_css_set(struct css_set *old_cset,
					struct cgroup *cgrp,
					struct cgroup_subsys_state *template[])
644
{
645
	struct cgroup_root *root = cgrp->root;
646
	struct cgroup_subsys *ss;
647
	struct css_set *cset;
648
	unsigned long key;
649
	int i;
650

B
Ben Blum 已提交
651 652 653 654 655
	/*
	 * Build the set of subsystem state objects that we want to see in the
	 * new css_set. while subsystems can change globally, the entries here
	 * won't change, so no need for locking.
	 */
656
	for_each_subsys(ss, i) {
657
		if (root->subsys_mask & (1UL << i)) {
658 659 660 661 662
			/*
			 * @ss is in this hierarchy, so we want the
			 * effective css from @cgrp.
			 */
			template[i] = cgroup_e_css(cgrp, ss);
663
		} else {
664 665 666 667
			/*
			 * @ss is not in this hierarchy, so we don't want
			 * to change the css.
			 */
668
			template[i] = old_cset->subsys[i];
669 670 671
		}
	}

672
	key = css_set_hash(template);
673 674
	hash_for_each_possible(css_set_table, cset, hlist, key) {
		if (!compare_css_sets(cset, old_cset, cgrp, template))
675 676 677
			continue;

		/* This css_set matches what we need */
678
		return cset;
679
	}
680 681 682 683 684

	/* No existing cgroup group matched */
	return NULL;
}

685
static void free_cgrp_cset_links(struct list_head *links_to_free)
686
{
687
	struct cgrp_cset_link *link, *tmp_link;
688

689 690
	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
		list_del(&link->cset_link);
691 692 693 694
		kfree(link);
	}
}

695 696 697 698 699 700 701
/**
 * allocate_cgrp_cset_links - allocate cgrp_cset_links
 * @count: the number of links to allocate
 * @tmp_links: list_head the allocated links are put on
 *
 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
 * through ->cset_link.  Returns 0 on success or -errno.
702
 */
703
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
704
{
705
	struct cgrp_cset_link *link;
706
	int i;
707 708 709

	INIT_LIST_HEAD(tmp_links);

710
	for (i = 0; i < count; i++) {
711
		link = kzalloc(sizeof(*link), GFP_KERNEL);
712
		if (!link) {
713
			free_cgrp_cset_links(tmp_links);
714 715
			return -ENOMEM;
		}
716
		list_add(&link->cset_link, tmp_links);
717 718 719 720
	}
	return 0;
}

721 722
/**
 * link_css_set - a helper function to link a css_set to a cgroup
723
 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
724
 * @cset: the css_set to be linked
725 726
 * @cgrp: the destination cgroup
 */
727 728
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
			 struct cgroup *cgrp)
729
{
730
	struct cgrp_cset_link *link;
731

732
	BUG_ON(list_empty(tmp_links));
T
Tejun Heo 已提交
733 734 735 736

	if (cgroup_on_dfl(cgrp))
		cset->dfl_cgrp = cgrp;

737 738
	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
	link->cset = cset;
739
	link->cgrp = cgrp;
740 741 742

	if (list_empty(&cgrp->cset_links))
		cgroup_update_populated(cgrp, true);
743
	list_move(&link->cset_link, &cgrp->cset_links);
744

745 746 747 748
	/*
	 * Always add links to the tail of the list so that the list
	 * is sorted by order of hierarchy creation
	 */
749
	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
750 751
}

752 753 754 755 756 757 758
/**
 * find_css_set - return a new css_set with one cgroup updated
 * @old_cset: the baseline css_set
 * @cgrp: the cgroup to be updated
 *
 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
 * substituted into the appropriate hierarchy.
759
 */
760 761
static struct css_set *find_css_set(struct css_set *old_cset,
				    struct cgroup *cgrp)
762
{
763
	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
764
	struct css_set *cset;
765 766
	struct list_head tmp_links;
	struct cgrp_cset_link *link;
T
Tejun Heo 已提交
767
	struct cgroup_subsys *ss;
768
	unsigned long key;
T
Tejun Heo 已提交
769
	int ssid;
770

771 772
	lockdep_assert_held(&cgroup_mutex);

773 774
	/* First see if we already have a cgroup group that matches
	 * the desired set */
775
	down_read(&css_set_rwsem);
776 777 778
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
		get_css_set(cset);
779
	up_read(&css_set_rwsem);
780

781 782
	if (cset)
		return cset;
783

784
	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
785
	if (!cset)
786 787
		return NULL;

788
	/* Allocate all the cgrp_cset_link objects that we'll need */
789
	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
790
		kfree(cset);
791 792 793
		return NULL;
	}

794
	atomic_set(&cset->refcount, 1);
795
	INIT_LIST_HEAD(&cset->cgrp_links);
796
	INIT_LIST_HEAD(&cset->tasks);
T
Tejun Heo 已提交
797
	INIT_LIST_HEAD(&cset->mg_tasks);
798
	INIT_LIST_HEAD(&cset->mg_preload_node);
799
	INIT_LIST_HEAD(&cset->mg_node);
800
	INIT_HLIST_NODE(&cset->hlist);
801 802 803

	/* Copy the set of subsystem state objects generated in
	 * find_existing_css_set() */
804
	memcpy(cset->subsys, template, sizeof(cset->subsys));
805

806
	down_write(&css_set_rwsem);
807
	/* Add reference counts and links from the new css_set. */
808
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
809
		struct cgroup *c = link->cgrp;
810

811 812
		if (c->root == cgrp->root)
			c = cgrp;
813
		link_css_set(&tmp_links, cset, c);
814
	}
815

816
	BUG_ON(!list_empty(&tmp_links));
817 818

	css_set_count++;
819

T
Tejun Heo 已提交
820
	/* Add @cset to the hash table */
821 822
	key = css_set_hash(cset->subsys);
	hash_add(css_set_table, &cset->hlist, key);
823

T
Tejun Heo 已提交
824 825 826 827
	for_each_subsys(ss, ssid)
		list_add_tail(&cset->e_cset_node[ssid],
			      &cset->subsys[ssid]->cgroup->e_csets[ssid]);

828
	up_write(&css_set_rwsem);
829

830
	return cset;
831 832
}

833
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
834
{
835
	struct cgroup *root_cgrp = kf_root->kn->priv;
T
Tejun Heo 已提交
836

837
	return root_cgrp->root;
T
Tejun Heo 已提交
838 839
}

840
static int cgroup_init_root_id(struct cgroup_root *root)
841 842 843 844 845
{
	int id;

	lockdep_assert_held(&cgroup_mutex);

846
	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
847 848 849 850 851 852 853
	if (id < 0)
		return id;

	root->hierarchy_id = id;
	return 0;
}

854
static void cgroup_exit_root_id(struct cgroup_root *root)
855 856 857 858 859 860 861 862 863
{
	lockdep_assert_held(&cgroup_mutex);

	if (root->hierarchy_id) {
		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
		root->hierarchy_id = 0;
	}
}

864
static void cgroup_free_root(struct cgroup_root *root)
865 866 867 868 869 870 871 872 873 874
{
	if (root) {
		/* hierarhcy ID shoulid already have been released */
		WARN_ON_ONCE(root->hierarchy_id);

		idr_destroy(&root->cgroup_idr);
		kfree(root);
	}
}

875
static void cgroup_destroy_root(struct cgroup_root *root)
876
{
877
	struct cgroup *cgrp = &root->cgrp;
878 879
	struct cgrp_cset_link *link, *tmp_link;

T
Tejun Heo 已提交
880
	mutex_lock(&cgroup_mutex);
881

T
Tejun Heo 已提交
882
	BUG_ON(atomic_read(&root->nr_cgrps));
883
	BUG_ON(!list_empty(&cgrp->self.children));
884 885

	/* Rebind all subsystems back to the default hierarchy */
886
	rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
887 888

	/*
889 890
	 * Release all the links from cset_links to this hierarchy's
	 * root cgroup
891
	 */
892
	down_write(&css_set_rwsem);
893 894 895 896 897 898

	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
		kfree(link);
	}
899
	up_write(&css_set_rwsem);
900 901 902 903 904 905 906 907 908 909

	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
		cgroup_root_count--;
	}

	cgroup_exit_root_id(root);

	mutex_unlock(&cgroup_mutex);

T
Tejun Heo 已提交
910
	kernfs_destroy_root(root->kf_root);
911 912 913
	cgroup_free_root(root);
}

914 915
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
916
					    struct cgroup_root *root)
917 918 919
{
	struct cgroup *res = NULL;

920 921 922
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

923
	if (cset == &init_css_set) {
924
		res = &root->cgrp;
925
	} else {
926 927 928
		struct cgrp_cset_link *link;

		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
929
			struct cgroup *c = link->cgrp;
930

931 932 933 934 935 936
			if (c->root == root) {
				res = c;
				break;
			}
		}
	}
937

938 939 940 941
	BUG_ON(!res);
	return res;
}

942
/*
943 944 945 946
 * Return the cgroup for "task" from the given hierarchy. Must be
 * called with cgroup_mutex and css_set_rwsem held.
 */
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
947
					    struct cgroup_root *root)
948 949 950 951 952 953 954 955 956
{
	/*
	 * No need to lock the task - since we hold cgroup_mutex the
	 * task can't change groups, so the only thing that can happen
	 * is that it exits and its css is set back to init_css_set.
	 */
	return cset_cgroup_from_root(task_css_set(task), root);
}

957 958 959 960 961 962
/*
 * A task must hold cgroup_mutex to modify cgroups.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding cgroup_mutex can't rely on the count
 * field not changing.  However, if the count goes to zero, then only
963
 * cgroup_attach_task() can increment it again.  Because a count of zero
964 965 966 967 968 969 970 971 972 973 974 975 976
 * means that no tasks are currently attached, therefore there is no
 * way a task attached to that cgroup can fork (the other way to
 * increment the count).  So code holding cgroup_mutex can safely
 * assume that if the count is zero, it will stay zero. Similarly, if
 * a task holds cgroup_mutex on a cgroup with zero count, it
 * knows that the cgroup won't be removed, as cgroup_rmdir()
 * needs that mutex.
 *
 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
 * (usually) take cgroup_mutex.  These are the two most performance
 * critical pieces of code here.  The exception occurs on cgroup_exit(),
 * when a task in a notify_on_release cgroup exits.  Then cgroup_mutex
 * is taken, and if the cgroup count is zero, a usermode call made
L
Li Zefan 已提交
977 978
 * to the release agent with the name of the cgroup (path relative to
 * the root of cgroup file system) as the argument.
979 980 981 982
 *
 * A cgroup can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cgroups is empty.  Since all
 * tasks in the system use _some_ cgroup, and since there is always at
983
 * least one task in the system (init, pid == 1), therefore, root cgroup
984
 * always has either children cgroups and/or using tasks.  So we don't
985
 * need a special hack to ensure that root cgroup cannot be deleted.
986 987
 *
 * P.S.  One more locking exception.  RCU is used to guard the
988
 * update of a tasks cgroup pointer by cgroup_attach_task()
989 990
 */

991
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask);
T
Tejun Heo 已提交
992
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
993
static const struct file_operations proc_cgroupstats_operations;
994

T
Tejun Heo 已提交
995 996
static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
			      char *buf)
997
{
T
Tejun Heo 已提交
998 999 1000 1001 1002 1003 1004
	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
			 cft->ss->name, cft->name);
	else
		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
	return buf;
1005 1006
}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
 * returns cft->mode if ->mode is not 0
 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
 * returns S_IRUGO if it has only a read handler
 * returns S_IWUSR if it has only a write hander
 */
static umode_t cgroup_file_mode(const struct cftype *cft)
1017
{
1018
	umode_t mode = 0;
1019

1020 1021 1022 1023 1024 1025
	if (cft->mode)
		return cft->mode;

	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
		mode |= S_IRUGO;

1026
	if (cft->write_u64 || cft->write_s64 || cft->write)
1027 1028 1029
		mode |= S_IWUSR;

	return mode;
1030 1031
}

1032
static void cgroup_get(struct cgroup *cgrp)
1033
{
T
Tejun Heo 已提交
1034
	WARN_ON_ONCE(cgroup_is_dead(cgrp));
1035
	css_get(&cgrp->self);
1036 1037
}

1038
static void cgroup_put(struct cgroup *cgrp)
1039
{
1040
	css_put(&cgrp->self);
1041 1042
}

1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
/**
 * cgroup_refresh_child_subsys_mask - update child_subsys_mask
 * @cgrp: the target cgroup
 *
 * On the default hierarchy, a subsystem may request other subsystems to be
 * enabled together through its ->depends_on mask.  In such cases, more
 * subsystems than specified in "cgroup.subtree_control" may be enabled.
 *
 * This function determines which subsystems need to be enabled given the
 * current @cgrp->subtree_control and records it in
 * @cgrp->child_subsys_mask.  The resulting mask is always a superset of
 * @cgrp->subtree_control and follows the usual hierarchy rules.
 */
1056 1057
static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
{
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
	struct cgroup *parent = cgroup_parent(cgrp);
	unsigned int cur_ss_mask = cgrp->subtree_control;
	struct cgroup_subsys *ss;
	int ssid;

	lockdep_assert_held(&cgroup_mutex);

	if (!cgroup_on_dfl(cgrp)) {
		cgrp->child_subsys_mask = cur_ss_mask;
		return;
	}

	while (true) {
		unsigned int new_ss_mask = cur_ss_mask;

		for_each_subsys(ss, ssid)
			if (cur_ss_mask & (1 << ssid))
				new_ss_mask |= ss->depends_on;

		/*
		 * Mask out subsystems which aren't available.  This can
		 * happen only if some depended-upon subsystems were bound
		 * to non-default hierarchies.
		 */
		if (parent)
			new_ss_mask &= parent->child_subsys_mask;
		else
			new_ss_mask &= cgrp->root->subsys_mask;

		if (new_ss_mask == cur_ss_mask)
			break;
		cur_ss_mask = new_ss_mask;
	}

	cgrp->child_subsys_mask = cur_ss_mask;
1093 1094
}

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
/**
 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper undoes cgroup_kn_lock_live() and should be invoked before
 * the method finishes if locking succeeded.  Note that once this function
 * returns the cgroup returned by cgroup_kn_lock_live() may become
 * inaccessible any time.  If the caller intends to continue to access the
 * cgroup, it should pin it before invoking this function.
 */
static void cgroup_kn_unlock(struct kernfs_node *kn)
1106
{
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;

	mutex_unlock(&cgroup_mutex);

	kernfs_unbreak_active_protection(kn);
	cgroup_put(cgrp);
1118 1119
}

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
/**
 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper is to be used by a cgroup kernfs method currently servicing
 * @kn.  It breaks the active protection, performs cgroup locking and
 * verifies that the associated cgroup is alive.  Returns the cgroup if
 * alive; otherwise, %NULL.  A successful return should be undone by a
 * matching cgroup_kn_unlock() invocation.
 *
 * Any cgroup kernfs method implementation which requires locking the
 * associated cgroup should use this helper.  It avoids nesting cgroup
 * locking under kernfs active protection and allows all kernfs operations
 * including self-removal.
 */
static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
T
Tejun Heo 已提交
1136
{
1137 1138 1139 1140 1141 1142
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;
T
Tejun Heo 已提交
1143

1144
	/*
1145
	 * We're gonna grab cgroup_mutex which nests outside kernfs
1146 1147 1148
	 * active_ref.  cgroup liveliness check alone provides enough
	 * protection against removal.  Ensure @cgrp stays accessible and
	 * break the active_ref protection.
1149
	 */
1150 1151 1152
	cgroup_get(cgrp);
	kernfs_break_active_protection(kn);

T
Tejun Heo 已提交
1153
	mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1154

1155 1156 1157 1158 1159
	if (!cgroup_is_dead(cgrp))
		return cgrp;

	cgroup_kn_unlock(kn);
	return NULL;
1160
}
T
Tejun Heo 已提交
1161

1162
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
T
Tejun Heo 已提交
1163
{
T
Tejun Heo 已提交
1164
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
1165

1166
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
1167
	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
T
Tejun Heo 已提交
1168 1169
}

1170
/**
1171
 * cgroup_clear_dir - remove subsys files in a cgroup directory
1172
 * @cgrp: target cgroup
1173 1174
 * @subsys_mask: mask of the subsystem ids whose files should be removed
 */
1175
static void cgroup_clear_dir(struct cgroup *cgrp, unsigned int subsys_mask)
T
Tejun Heo 已提交
1176
{
1177
	struct cgroup_subsys *ss;
1178
	int i;
T
Tejun Heo 已提交
1179

1180
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
1181
		struct cftype *cfts;
1182

1183
		if (!(subsys_mask & (1 << i)))
1184
			continue;
T
Tejun Heo 已提交
1185 1186
		list_for_each_entry(cfts, &ss->cfts, node)
			cgroup_addrm_files(cgrp, cfts, false);
1187
	}
1188 1189
}

1190
static int rebind_subsystems(struct cgroup_root *dst_root, unsigned int ss_mask)
1191
{
1192
	struct cgroup_subsys *ss;
1193
	unsigned int tmp_ss_mask;
T
Tejun Heo 已提交
1194
	int ssid, i, ret;
1195

T
Tejun Heo 已提交
1196
	lockdep_assert_held(&cgroup_mutex);
1197

1198 1199 1200
	for_each_subsys(ss, ssid) {
		if (!(ss_mask & (1 << ssid)))
			continue;
B
Ben Blum 已提交
1201

1202 1203
		/* if @ss has non-root csses attached to it, can't move */
		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
T
Tejun Heo 已提交
1204
			return -EBUSY;
1205

1206
		/* can't move between two non-dummy roots either */
1207
		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1208
			return -EBUSY;
1209 1210
	}

1211 1212 1213 1214 1215 1216
	/* skip creating root files on dfl_root for inhibited subsystems */
	tmp_ss_mask = ss_mask;
	if (dst_root == &cgrp_dfl_root)
		tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;

	ret = cgroup_populate_dir(&dst_root->cgrp, tmp_ss_mask);
T
Tejun Heo 已提交
1217 1218
	if (ret) {
		if (dst_root != &cgrp_dfl_root)
1219
			return ret;
1220

T
Tejun Heo 已提交
1221 1222 1223 1224 1225 1226 1227
		/*
		 * Rebinding back to the default root is not allowed to
		 * fail.  Using both default and non-default roots should
		 * be rare.  Moving subsystems back and forth even more so.
		 * Just warn about it and continue.
		 */
		if (cgrp_dfl_root_visible) {
1228
			pr_warn("failed to create files (%d) while rebinding 0x%x to default root\n",
1229
				ret, ss_mask);
1230
			pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
T
Tejun Heo 已提交
1231
		}
1232
	}
1233 1234 1235 1236 1237

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
1238
	for_each_subsys(ss, ssid)
T
Tejun Heo 已提交
1239
		if (ss_mask & (1 << ssid))
1240
			cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
1241

1242
	for_each_subsys(ss, ssid) {
1243
		struct cgroup_root *src_root;
1244
		struct cgroup_subsys_state *css;
T
Tejun Heo 已提交
1245
		struct css_set *cset;
1246

1247 1248
		if (!(ss_mask & (1 << ssid)))
			continue;
1249

1250
		src_root = ss->root;
1251
		css = cgroup_css(&src_root->cgrp, ss);
1252

1253
		WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
1254

1255 1256
		RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
		rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
1257
		ss->root = dst_root;
1258
		css->cgroup = &dst_root->cgrp;
1259

T
Tejun Heo 已提交
1260 1261 1262 1263 1264 1265
		down_write(&css_set_rwsem);
		hash_for_each(css_set_table, i, cset, hlist)
			list_move_tail(&cset->e_cset_node[ss->id],
				       &dst_root->cgrp.e_csets[ss->id]);
		up_write(&css_set_rwsem);

1266
		src_root->subsys_mask &= ~(1 << ssid);
1267 1268
		src_root->cgrp.subtree_control &= ~(1 << ssid);
		cgroup_refresh_child_subsys_mask(&src_root->cgrp);
1269

1270
		/* default hierarchy doesn't enable controllers by default */
1271
		dst_root->subsys_mask |= 1 << ssid;
1272 1273 1274 1275
		if (dst_root != &cgrp_dfl_root) {
			dst_root->cgrp.subtree_control |= 1 << ssid;
			cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
		}
1276

1277 1278
		if (ss->bind)
			ss->bind(css);
1279 1280
	}

T
Tejun Heo 已提交
1281
	kernfs_activate(dst_root->cgrp.kn);
1282 1283 1284
	return 0;
}

T
Tejun Heo 已提交
1285 1286
static int cgroup_show_options(struct seq_file *seq,
			       struct kernfs_root *kf_root)
1287
{
1288
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1289
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
1290
	int ssid;
1291

T
Tejun Heo 已提交
1292
	for_each_subsys(ss, ssid)
1293
		if (root->subsys_mask & (1 << ssid))
T
Tejun Heo 已提交
1294
			seq_printf(seq, ",%s", ss->name);
1295
	if (root->flags & CGRP_ROOT_NOPREFIX)
1296
		seq_puts(seq, ",noprefix");
1297
	if (root->flags & CGRP_ROOT_XATTR)
A
Aristeu Rozanski 已提交
1298
		seq_puts(seq, ",xattr");
1299 1300

	spin_lock(&release_agent_path_lock);
1301 1302
	if (strlen(root->release_agent_path))
		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
1303 1304
	spin_unlock(&release_agent_path_lock);

1305
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1306
		seq_puts(seq, ",clone_children");
1307 1308
	if (strlen(root->name))
		seq_printf(seq, ",name=%s", root->name);
1309 1310 1311 1312
	return 0;
}

struct cgroup_sb_opts {
1313 1314
	unsigned int subsys_mask;
	unsigned int flags;
1315
	char *release_agent;
1316
	bool cpuset_clone_children;
1317
	char *name;
1318 1319
	/* User explicitly requested empty subsystem */
	bool none;
1320 1321
};

B
Ben Blum 已提交
1322
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1323
{
1324 1325
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
1326
	unsigned int mask = -1U;
1327
	struct cgroup_subsys *ss;
1328
	int nr_opts = 0;
1329
	int i;
1330 1331

#ifdef CONFIG_CPUSETS
1332
	mask = ~(1U << cpuset_cgrp_id);
1333
#endif
1334

1335
	memset(opts, 0, sizeof(*opts));
1336 1337

	while ((token = strsep(&o, ",")) != NULL) {
1338 1339
		nr_opts++;

1340 1341
		if (!*token)
			return -EINVAL;
1342
		if (!strcmp(token, "none")) {
1343 1344
			/* Explicitly have no subsystems */
			opts->none = true;
1345 1346 1347 1348 1349 1350 1351 1352 1353
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
1354 1355 1356 1357
		if (!strcmp(token, "__DEVEL__sane_behavior")) {
			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
			continue;
		}
1358
		if (!strcmp(token, "noprefix")) {
1359
			opts->flags |= CGRP_ROOT_NOPREFIX;
1360 1361 1362
			continue;
		}
		if (!strcmp(token, "clone_children")) {
1363
			opts->cpuset_clone_children = true;
1364 1365
			continue;
		}
A
Aristeu Rozanski 已提交
1366
		if (!strcmp(token, "xattr")) {
1367
			opts->flags |= CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
1368 1369
			continue;
		}
1370
		if (!strncmp(token, "release_agent=", 14)) {
1371 1372 1373
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
1374
			opts->release_agent =
1375
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1376 1377
			if (!opts->release_agent)
				return -ENOMEM;
1378 1379 1380
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
1398
					      MAX_CGROUP_ROOT_NAMELEN - 1,
1399 1400 1401
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
1402 1403 1404 1405

			continue;
		}

1406
		for_each_subsys(ss, i) {
1407 1408 1409 1410 1411 1412 1413 1414
			if (strcmp(token, ss->name))
				continue;
			if (ss->disabled)
				continue;

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
1415
			opts->subsys_mask |= (1 << i);
1416 1417 1418 1419 1420 1421 1422 1423
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

1424
	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
1425
		pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
1426 1427
		if (nr_opts != 1) {
			pr_err("sane_behavior: no other mount options allowed\n");
1428 1429
			return -EINVAL;
		}
1430
		return 0;
1431 1432
	}

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
	/*
	 * If the 'all' option was specified select all the subsystems,
	 * otherwise if 'none', 'name=' and a subsystem name options were
	 * not specified, let's default to 'all'
	 */
	if (all_ss || (!one_ss && !opts->none && !opts->name))
		for_each_subsys(ss, i)
			if (!ss->disabled)
				opts->subsys_mask |= (1 << i);

	/*
	 * We either have to specify by name or by subsystems. (So all
	 * empty hierarchies must have a name).
	 */
	if (!opts->subsys_mask && !opts->name)
		return -EINVAL;

1450 1451 1452 1453 1454
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
1455
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1456 1457
		return -EINVAL;

1458
	/* Can't specify "none" and some subsystems */
1459
	if (opts->subsys_mask && opts->none)
1460 1461
		return -EINVAL;

1462 1463 1464
	return 0;
}

T
Tejun Heo 已提交
1465
static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1466 1467
{
	int ret = 0;
1468
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1469
	struct cgroup_sb_opts opts;
1470
	unsigned int added_mask, removed_mask;
1471

1472 1473
	if (root == &cgrp_dfl_root) {
		pr_err("remount is not allowed\n");
1474 1475 1476
		return -EINVAL;
	}

1477 1478 1479 1480 1481 1482 1483
	mutex_lock(&cgroup_mutex);

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

1484
	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1485
		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1486
			task_tgid_nr(current), current->comm);
1487

1488 1489
	added_mask = opts.subsys_mask & ~root->subsys_mask;
	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1490

B
Ben Blum 已提交
1491
	/* Don't allow flags or name to change at remount */
T
Tejun Heo 已提交
1492
	if ((opts.flags ^ root->flags) ||
B
Ben Blum 已提交
1493
	    (opts.name && strcmp(opts.name, root->name))) {
1494
		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
T
Tejun Heo 已提交
1495
		       opts.flags, opts.name ?: "", root->flags, root->name);
1496 1497 1498 1499
		ret = -EINVAL;
		goto out_unlock;
	}

1500
	/* remounting is not allowed for populated hierarchies */
1501
	if (!list_empty(&root->cgrp.self.children)) {
1502
		ret = -EBUSY;
1503
		goto out_unlock;
B
Ben Blum 已提交
1504
	}
1505

1506
	ret = rebind_subsystems(root, added_mask);
1507
	if (ret)
1508
		goto out_unlock;
1509

1510
	rebind_subsystems(&cgrp_dfl_root, removed_mask);
1511

1512 1513
	if (opts.release_agent) {
		spin_lock(&release_agent_path_lock);
1514
		strcpy(root->release_agent_path, opts.release_agent);
1515 1516
		spin_unlock(&release_agent_path_lock);
	}
1517
 out_unlock:
1518
	kfree(opts.release_agent);
1519
	kfree(opts.name);
1520 1521 1522 1523
	mutex_unlock(&cgroup_mutex);
	return ret;
}

1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
/*
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
 * words after the first mount.
 */
static bool use_task_css_set_links __read_mostly;

static void cgroup_enable_task_cg_lists(void)
{
	struct task_struct *p, *g;

1536
	down_write(&css_set_rwsem);
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558

	if (use_task_css_set_links)
		goto out_unlock;

	use_task_css_set_links = true;

	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
	do_each_thread(g, p) {
		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
			     task_css_set(p) != &init_css_set);

		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
1559 1560
		 * Do it while holding siglock so that we don't end up
		 * racing against cgroup_exit().
1561
		 */
1562
		spin_lock_irq(&p->sighand->siglock);
1563 1564 1565 1566 1567 1568
		if (!(p->flags & PF_EXITING)) {
			struct css_set *cset = task_css_set(p);

			list_add(&p->cg_list, &cset->tasks);
			get_css_set(cset);
		}
1569
		spin_unlock_irq(&p->sighand->siglock);
1570 1571 1572
	} while_each_thread(g, p);
	read_unlock(&tasklist_lock);
out_unlock:
1573
	up_write(&css_set_rwsem);
1574
}
1575

1576 1577
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
T
Tejun Heo 已提交
1578 1579 1580
	struct cgroup_subsys *ss;
	int ssid;

1581 1582
	INIT_LIST_HEAD(&cgrp->self.sibling);
	INIT_LIST_HEAD(&cgrp->self.children);
1583
	INIT_LIST_HEAD(&cgrp->cset_links);
1584
	INIT_LIST_HEAD(&cgrp->release_list);
1585 1586
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
1587
	cgrp->self.cgroup = cgrp;
1588
	cgrp->self.flags |= CSS_ONLINE;
T
Tejun Heo 已提交
1589 1590 1591

	for_each_subsys(ss, ssid)
		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1592 1593

	init_waitqueue_head(&cgrp->offline_waitq);
1594
}
1595

1596
static void init_cgroup_root(struct cgroup_root *root,
1597
			     struct cgroup_sb_opts *opts)
1598
{
1599
	struct cgroup *cgrp = &root->cgrp;
1600

1601
	INIT_LIST_HEAD(&root->root_list);
1602
	atomic_set(&root->nr_cgrps, 1);
1603
	cgrp->root = root;
1604
	init_cgroup_housekeeping(cgrp);
1605
	idr_init(&root->cgroup_idr);
1606 1607 1608 1609 1610 1611

	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
1612
	if (opts->cpuset_clone_children)
1613
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1614 1615
}

1616
static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
1617
{
1618
	LIST_HEAD(tmp_links);
1619
	struct cgroup *root_cgrp = &root->cgrp;
1620
	struct cftype *base_files;
1621 1622
	struct css_set *cset;
	int i, ret;
1623

1624
	lockdep_assert_held(&cgroup_mutex);
1625

1626
	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
1627
	if (ret < 0)
T
Tejun Heo 已提交
1628
		goto out;
1629
	root_cgrp->id = ret;
1630

1631 1632 1633 1634
	ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release);
	if (ret)
		goto out;

1635
	/*
1636
	 * We're accessing css_set_count without locking css_set_rwsem here,
1637 1638 1639 1640 1641 1642
	 * but that's OK - it can only be increased by someone holding
	 * cgroup_lock, and that's us. The worst that can happen is that we
	 * have some link structures left over
	 */
	ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
	if (ret)
1643
		goto cancel_ref;
1644

1645
	ret = cgroup_init_root_id(root);
1646
	if (ret)
1647
		goto cancel_ref;
1648

T
Tejun Heo 已提交
1649 1650 1651 1652 1653 1654 1655 1656
	root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
					   KERNFS_ROOT_CREATE_DEACTIVATED,
					   root_cgrp);
	if (IS_ERR(root->kf_root)) {
		ret = PTR_ERR(root->kf_root);
		goto exit_root_id;
	}
	root_cgrp->kn = root->kf_root->kn;
1657

1658 1659 1660 1661 1662 1663
	if (root == &cgrp_dfl_root)
		base_files = cgroup_dfl_base_files;
	else
		base_files = cgroup_legacy_base_files;

	ret = cgroup_addrm_files(root_cgrp, base_files, true);
1664
	if (ret)
T
Tejun Heo 已提交
1665
		goto destroy_root;
1666

1667
	ret = rebind_subsystems(root, ss_mask);
1668
	if (ret)
T
Tejun Heo 已提交
1669
		goto destroy_root;
1670

1671 1672 1673 1674 1675 1676 1677
	/*
	 * There must be no failure case after here, since rebinding takes
	 * care of subsystems' refcounts, which are explicitly dropped in
	 * the failure exit path.
	 */
	list_add(&root->root_list, &cgroup_roots);
	cgroup_root_count++;
A
Al Viro 已提交
1678

1679
	/*
1680
	 * Link the root cgroup in this hierarchy into all the css_set
1681 1682
	 * objects.
	 */
1683
	down_write(&css_set_rwsem);
1684 1685
	hash_for_each(css_set_table, i, cset, hlist)
		link_css_set(&tmp_links, cset, root_cgrp);
1686
	up_write(&css_set_rwsem);
1687

1688
	BUG_ON(!list_empty(&root_cgrp->self.children));
1689
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1690

T
Tejun Heo 已提交
1691
	kernfs_activate(root_cgrp->kn);
1692
	ret = 0;
T
Tejun Heo 已提交
1693
	goto out;
1694

T
Tejun Heo 已提交
1695 1696 1697 1698
destroy_root:
	kernfs_destroy_root(root->kf_root);
	root->kf_root = NULL;
exit_root_id:
1699
	cgroup_exit_root_id(root);
1700
cancel_ref:
1701
	percpu_ref_exit(&root_cgrp->self.refcnt);
T
Tejun Heo 已提交
1702
out:
1703 1704
	free_cgrp_cset_links(&tmp_links);
	return ret;
1705 1706
}

A
Al Viro 已提交
1707
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1708
			 int flags, const char *unused_dev_name,
A
Al Viro 已提交
1709
			 void *data)
1710
{
1711
	struct super_block *pinned_sb = NULL;
1712
	struct cgroup_subsys *ss;
1713
	struct cgroup_root *root;
1714
	struct cgroup_sb_opts opts;
T
Tejun Heo 已提交
1715
	struct dentry *dentry;
1716
	int ret;
1717
	int i;
L
Li Zefan 已提交
1718
	bool new_sb;
1719

1720 1721 1722 1723 1724 1725
	/*
	 * The first time anyone tries to mount a cgroup, enable the list
	 * linking each css_set to its tasks and fix up all existing tasks.
	 */
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();
1726

B
Ben Blum 已提交
1727
	mutex_lock(&cgroup_mutex);
1728 1729

	/* First find the desired set of subsystems */
1730
	ret = parse_cgroupfs_options(data, &opts);
1731
	if (ret)
1732
		goto out_unlock;
1733

T
Tejun Heo 已提交
1734
	/* look for a matching existing root */
1735
	if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
T
Tejun Heo 已提交
1736 1737 1738 1739 1740
		cgrp_dfl_root_visible = true;
		root = &cgrp_dfl_root;
		cgroup_get(&root->cgrp);
		ret = 0;
		goto out_unlock;
1741 1742
	}

1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
	/*
	 * Destruction of cgroup root is asynchronous, so subsystems may
	 * still be dying after the previous unmount.  Let's drain the
	 * dying subsystems.  We just need to ensure that the ones
	 * unmounted previously finish dying and don't care about new ones
	 * starting.  Testing ref liveliness is good enough.
	 */
	for_each_subsys(ss, i) {
		if (!(opts.subsys_mask & (1 << i)) ||
		    ss->root == &cgrp_dfl_root)
			continue;

		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
			mutex_unlock(&cgroup_mutex);
			msleep(10);
			ret = restart_syscall();
			goto out_free;
		}
		cgroup_put(&ss->root->cgrp);
	}

1764
	for_each_root(root) {
T
Tejun Heo 已提交
1765
		bool name_match = false;
1766

1767
		if (root == &cgrp_dfl_root)
1768
			continue;
1769

B
Ben Blum 已提交
1770
		/*
T
Tejun Heo 已提交
1771 1772 1773
		 * If we asked for a name then it must match.  Also, if
		 * name matches but sybsys_mask doesn't, we should fail.
		 * Remember whether name matched.
B
Ben Blum 已提交
1774
		 */
T
Tejun Heo 已提交
1775 1776 1777 1778 1779
		if (opts.name) {
			if (strcmp(opts.name, root->name))
				continue;
			name_match = true;
		}
1780

1781
		/*
T
Tejun Heo 已提交
1782 1783
		 * If we asked for subsystems (or explicitly for no
		 * subsystems) then they must match.
1784
		 */
T
Tejun Heo 已提交
1785
		if ((opts.subsys_mask || opts.none) &&
1786
		    (opts.subsys_mask != root->subsys_mask)) {
T
Tejun Heo 已提交
1787 1788 1789 1790 1791
			if (!name_match)
				continue;
			ret = -EBUSY;
			goto out_unlock;
		}
1792

1793 1794
		if (root->flags ^ opts.flags)
			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1795

T
Tejun Heo 已提交
1796
		/*
1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
		 * We want to reuse @root whose lifetime is governed by its
		 * ->cgrp.  Let's check whether @root is alive and keep it
		 * that way.  As cgroup_kill_sb() can happen anytime, we
		 * want to block it by pinning the sb so that @root doesn't
		 * get killed before mount is complete.
		 *
		 * With the sb pinned, tryget_live can reliably indicate
		 * whether @root can be reused.  If it's being killed,
		 * drain it.  We can use wait_queue for the wait but this
		 * path is super cold.  Let's just sleep a bit and retry.
T
Tejun Heo 已提交
1807
		 */
1808 1809 1810
		pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
		if (IS_ERR(pinned_sb) ||
		    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
T
Tejun Heo 已提交
1811
			mutex_unlock(&cgroup_mutex);
1812 1813
			if (!IS_ERR_OR_NULL(pinned_sb))
				deactivate_super(pinned_sb);
T
Tejun Heo 已提交
1814
			msleep(10);
1815 1816
			ret = restart_syscall();
			goto out_free;
T
Tejun Heo 已提交
1817
		}
1818

T
Tejun Heo 已提交
1819
		ret = 0;
T
Tejun Heo 已提交
1820
		goto out_unlock;
1821 1822
	}

1823
	/*
1824 1825 1826
	 * No such thing, create a new one.  name= matching without subsys
	 * specification is allowed for already existing hierarchies but we
	 * can't create new one without subsys specification.
1827
	 */
1828 1829 1830
	if (!opts.subsys_mask && !opts.none) {
		ret = -EINVAL;
		goto out_unlock;
1831 1832
	}

1833 1834 1835
	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root) {
		ret = -ENOMEM;
T
Tejun Heo 已提交
1836
		goto out_unlock;
1837
	}
1838

1839 1840
	init_cgroup_root(root, &opts);

T
Tejun Heo 已提交
1841
	ret = cgroup_setup_root(root, opts.subsys_mask);
T
Tejun Heo 已提交
1842 1843
	if (ret)
		cgroup_free_root(root);
1844

1845
out_unlock:
1846
	mutex_unlock(&cgroup_mutex);
1847
out_free:
1848 1849
	kfree(opts.release_agent);
	kfree(opts.name);
A
Aristeu Rozanski 已提交
1850

T
Tejun Heo 已提交
1851
	if (ret)
1852
		return ERR_PTR(ret);
T
Tejun Heo 已提交
1853

1854 1855
	dentry = kernfs_mount(fs_type, flags, root->kf_root,
				CGROUP_SUPER_MAGIC, &new_sb);
L
Li Zefan 已提交
1856
	if (IS_ERR(dentry) || !new_sb)
1857
		cgroup_put(&root->cgrp);
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867

	/*
	 * If @pinned_sb, we're reusing an existing root and holding an
	 * extra ref on its sb.  Mount is complete.  Put the extra ref.
	 */
	if (pinned_sb) {
		WARN_ON(new_sb);
		deactivate_super(pinned_sb);
	}

T
Tejun Heo 已提交
1868 1869 1870 1871 1872 1873
	return dentry;
}

static void cgroup_kill_sb(struct super_block *sb)
{
	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
1874
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
T
Tejun Heo 已提交
1875

1876 1877 1878 1879
	/*
	 * If @root doesn't have any mounts or children, start killing it.
	 * This prevents new mounts by disabling percpu_ref_tryget_live().
	 * cgroup_mount() may wait for @root's release.
1880 1881
	 *
	 * And don't kill the default root.
1882
	 */
1883 1884
	if (css_has_online_children(&root->cgrp.self) ||
	    root == &cgrp_dfl_root)
1885 1886 1887 1888
		cgroup_put(&root->cgrp);
	else
		percpu_ref_kill(&root->cgrp.self.refcnt);

T
Tejun Heo 已提交
1889
	kernfs_kill_sb(sb);
1890 1891 1892 1893
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
A
Al Viro 已提交
1894
	.mount = cgroup_mount,
1895 1896 1897
	.kill_sb = cgroup_kill_sb,
};

1898 1899
static struct kobject *cgroup_kobj;

1900
/**
1901
 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1902 1903 1904 1905
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
1906 1907 1908 1909 1910
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
T
Tejun Heo 已提交
1911
 * Return value is the same as kernfs_path().
1912
 */
T
Tejun Heo 已提交
1913
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1914
{
1915
	struct cgroup_root *root;
1916
	struct cgroup *cgrp;
T
Tejun Heo 已提交
1917 1918
	int hierarchy_id = 1;
	char *path = NULL;
1919 1920

	mutex_lock(&cgroup_mutex);
1921
	down_read(&css_set_rwsem);
1922

1923 1924
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

1925 1926
	if (root) {
		cgrp = task_cgroup_from_root(task, root);
T
Tejun Heo 已提交
1927
		path = cgroup_path(cgrp, buf, buflen);
1928 1929
	} else {
		/* if no hierarchy exists, everyone is in "/" */
T
Tejun Heo 已提交
1930 1931
		if (strlcpy(buf, "/", buflen) < buflen)
			path = buf;
1932 1933
	}

1934
	up_read(&css_set_rwsem);
1935
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
1936
	return path;
1937
}
1938
EXPORT_SYMBOL_GPL(task_cgroup_path);
1939

1940
/* used to track tasks and other necessary states during migration */
1941
struct cgroup_taskset {
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
	/* the src and dst cset list running through cset->mg_node */
	struct list_head	src_csets;
	struct list_head	dst_csets;

	/*
	 * Fields for cgroup_taskset_*() iteration.
	 *
	 * Before migration is committed, the target migration tasks are on
	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
	 * or ->dst_csets depending on whether migration is committed.
	 *
	 * ->cur_csets and ->cur_task point to the current task position
	 * during iteration.
	 */
	struct list_head	*csets;
	struct css_set		*cur_cset;
	struct task_struct	*cur_task;
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
};

/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
 *
 * @tset iteration is initialized and the first task is returned.
 */
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
{
1970 1971 1972 1973
	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
	tset->cur_task = NULL;

	return cgroup_taskset_next(tset);
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
}

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
{
1985 1986
	struct css_set *cset = tset->cur_cset;
	struct task_struct *task = tset->cur_task;
1987

1988 1989 1990 1991 1992 1993
	while (&cset->mg_node != tset->csets) {
		if (!task)
			task = list_first_entry(&cset->mg_tasks,
						struct task_struct, cg_list);
		else
			task = list_next_entry(task, cg_list);
1994

1995 1996 1997 1998 1999
		if (&task->cg_list != &cset->mg_tasks) {
			tset->cur_cset = cset;
			tset->cur_task = task;
			return task;
		}
2000

2001 2002 2003
		cset = list_next_entry(cset, mg_node);
		task = NULL;
	}
2004

2005
	return NULL;
2006 2007
}

2008
/**
B
Ben Blum 已提交
2009
 * cgroup_task_migrate - move a task from one cgroup to another.
2010
 * @old_cgrp: the cgroup @tsk is being migrated from
2011 2012
 * @tsk: the task being migrated
 * @new_cset: the new css_set @tsk is being attached to
B
Ben Blum 已提交
2013
 *
2014
 * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
B
Ben Blum 已提交
2015
 */
2016 2017 2018
static void cgroup_task_migrate(struct cgroup *old_cgrp,
				struct task_struct *tsk,
				struct css_set *new_cset)
B
Ben Blum 已提交
2019
{
2020
	struct css_set *old_cset;
B
Ben Blum 已提交
2021

2022 2023 2024
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

B
Ben Blum 已提交
2025
	/*
2026 2027 2028
	 * We are synchronized through threadgroup_lock() against PF_EXITING
	 * setting such that we can't race against cgroup_exit() changing the
	 * css_set to init_css_set and dropping the old one.
B
Ben Blum 已提交
2029
	 */
2030
	WARN_ON_ONCE(tsk->flags & PF_EXITING);
2031
	old_cset = task_css_set(tsk);
B
Ben Blum 已提交
2032

2033
	get_css_set(new_cset);
2034
	rcu_assign_pointer(tsk->cgroups, new_cset);
B
Ben Blum 已提交
2035

2036 2037 2038 2039 2040 2041 2042
	/*
	 * Use move_tail so that cgroup_taskset_first() still returns the
	 * leader after migration.  This works because cgroup_migrate()
	 * ensures that the dst_cset of the leader is the first on the
	 * tset's dst_csets list.
	 */
	list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
B
Ben Blum 已提交
2043 2044

	/*
2045 2046 2047
	 * We just gained a reference on old_cset by taking it from the
	 * task. As trading it for new_cset is protected by cgroup_mutex,
	 * we're safe to drop it here; it will be freed under RCU.
B
Ben Blum 已提交
2048
	 */
2049
	set_bit(CGRP_RELEASABLE, &old_cgrp->flags);
2050
	put_css_set_locked(old_cset, false);
B
Ben Blum 已提交
2051 2052
}

L
Li Zefan 已提交
2053
/**
2054 2055
 * cgroup_migrate_finish - cleanup after attach
 * @preloaded_csets: list of preloaded css_sets
B
Ben Blum 已提交
2056
 *
2057 2058
 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
 * those functions for details.
B
Ben Blum 已提交
2059
 */
2060
static void cgroup_migrate_finish(struct list_head *preloaded_csets)
B
Ben Blum 已提交
2061
{
2062
	struct css_set *cset, *tmp_cset;
B
Ben Blum 已提交
2063

2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
	lockdep_assert_held(&cgroup_mutex);

	down_write(&css_set_rwsem);
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
		cset->mg_src_cgrp = NULL;
		cset->mg_dst_cset = NULL;
		list_del_init(&cset->mg_preload_node);
		put_css_set_locked(cset, false);
	}
	up_write(&css_set_rwsem);
}

/**
 * cgroup_migrate_add_src - add a migration source css_set
 * @src_cset: the source css_set to add
 * @dst_cgrp: the destination cgroup
 * @preloaded_csets: list of preloaded css_sets
 *
 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
 * @src_cset and add it to @preloaded_csets, which should later be cleaned
 * up by cgroup_migrate_finish().
 *
 * This function may be called without holding threadgroup_lock even if the
 * target is a process.  Threads may be created and destroyed but as long
 * as cgroup_mutex is not dropped, no new css_set can be put into play and
 * the preloaded css_sets are guaranteed to cover all migrations.
 */
static void cgroup_migrate_add_src(struct css_set *src_cset,
				   struct cgroup *dst_cgrp,
				   struct list_head *preloaded_csets)
{
	struct cgroup *src_cgrp;

	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);

	if (!list_empty(&src_cset->mg_preload_node))
		return;

	WARN_ON(src_cset->mg_src_cgrp);
	WARN_ON(!list_empty(&src_cset->mg_tasks));
	WARN_ON(!list_empty(&src_cset->mg_node));

	src_cset->mg_src_cgrp = src_cgrp;
	get_css_set(src_cset);
	list_add(&src_cset->mg_preload_node, preloaded_csets);
}

/**
 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2116
 * @dst_cgrp: the destination cgroup (may be %NULL)
2117 2118 2119 2120
 * @preloaded_csets: list of preloaded source css_sets
 *
 * Tasks are about to be moved to @dst_cgrp and all the source css_sets
 * have been preloaded to @preloaded_csets.  This function looks up and
2121 2122 2123
 * pins all destination css_sets, links each to its source, and append them
 * to @preloaded_csets.  If @dst_cgrp is %NULL, the destination of each
 * source css_set is assumed to be its cgroup on the default hierarchy.
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
 *
 * This function must be called after cgroup_migrate_add_src() has been
 * called on each migration source css_set.  After migration is performed
 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
 * @preloaded_csets.
 */
static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
				      struct list_head *preloaded_csets)
{
	LIST_HEAD(csets);
2134
	struct css_set *src_cset, *tmp_cset;
2135 2136 2137

	lockdep_assert_held(&cgroup_mutex);

2138 2139 2140 2141
	/*
	 * Except for the root, child_subsys_mask must be zero for a cgroup
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
2142
	if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) &&
2143 2144 2145
	    dst_cgrp->child_subsys_mask)
		return -EBUSY;

2146
	/* look up the dst cset for each src cset and link it to src */
2147
	list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
2148 2149
		struct css_set *dst_cset;

2150 2151
		dst_cset = find_css_set(src_cset,
					dst_cgrp ?: src_cset->dfl_cgrp);
2152 2153 2154 2155
		if (!dst_cset)
			goto err;

		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169

		/*
		 * If src cset equals dst, it's noop.  Drop the src.
		 * cgroup_migrate() will skip the cset too.  Note that we
		 * can't handle src == dst as some nodes are used by both.
		 */
		if (src_cset == dst_cset) {
			src_cset->mg_src_cgrp = NULL;
			list_del_init(&src_cset->mg_preload_node);
			put_css_set(src_cset, false);
			put_css_set(dst_cset, false);
			continue;
		}

2170 2171 2172 2173 2174 2175 2176 2177
		src_cset->mg_dst_cset = dst_cset;

		if (list_empty(&dst_cset->mg_preload_node))
			list_add(&dst_cset->mg_preload_node, &csets);
		else
			put_css_set(dst_cset, false);
	}

2178
	list_splice_tail(&csets, preloaded_csets);
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204
	return 0;
err:
	cgroup_migrate_finish(&csets);
	return -ENOMEM;
}

/**
 * cgroup_migrate - migrate a process or task to a cgroup
 * @cgrp: the destination cgroup
 * @leader: the leader of the process or the task to migrate
 * @threadgroup: whether @leader points to the whole process or a single task
 *
 * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
 * process, the caller must be holding threadgroup_lock of @leader.  The
 * caller is also responsible for invoking cgroup_migrate_add_src() and
 * cgroup_migrate_prepare_dst() on the targets before invoking this
 * function and following up with cgroup_migrate_finish().
 *
 * As long as a controller's ->can_attach() doesn't fail, this function is
 * guaranteed to succeed.  This means that, excluding ->can_attach()
 * failure, when migrating multiple targets, the success or failure can be
 * decided for all targets by invoking group_migrate_prepare_dst() before
 * actually starting migrating.
 */
static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
			  bool threadgroup)
B
Ben Blum 已提交
2205
{
2206 2207 2208 2209 2210
	struct cgroup_taskset tset = {
		.src_csets	= LIST_HEAD_INIT(tset.src_csets),
		.dst_csets	= LIST_HEAD_INIT(tset.dst_csets),
		.csets		= &tset.src_csets,
	};
T
Tejun Heo 已提交
2211
	struct cgroup_subsys_state *css, *failed_css = NULL;
2212 2213 2214
	struct css_set *cset, *tmp_cset;
	struct task_struct *task, *tmp_task;
	int i, ret;
B
Ben Blum 已提交
2215

2216 2217 2218 2219 2220
	/*
	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 */
2221
	down_write(&css_set_rwsem);
2222
	rcu_read_lock();
2223
	task = leader;
B
Ben Blum 已提交
2224
	do {
2225 2226
		/* @task either already exited or can't exit until the end */
		if (task->flags & PF_EXITING)
2227
			goto next;
2228

2229 2230
		/* leave @task alone if post_fork() hasn't linked it yet */
		if (list_empty(&task->cg_list))
2231
			goto next;
2232

2233
		cset = task_css_set(task);
2234
		if (!cset->mg_src_cgrp)
2235
			goto next;
2236

2237
		/*
2238 2239
		 * cgroup_taskset_first() must always return the leader.
		 * Take care to avoid disturbing the ordering.
2240
		 */
2241 2242 2243 2244 2245 2246
		list_move_tail(&task->cg_list, &cset->mg_tasks);
		if (list_empty(&cset->mg_node))
			list_add_tail(&cset->mg_node, &tset.src_csets);
		if (list_empty(&cset->mg_dst_cset->mg_node))
			list_move_tail(&cset->mg_dst_cset->mg_node,
				       &tset.dst_csets);
2247
	next:
2248 2249
		if (!threadgroup)
			break;
2250
	} while_each_thread(leader, task);
2251
	rcu_read_unlock();
2252
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
2253

2254
	/* methods shouldn't be called if no task is actually migrating */
2255 2256
	if (list_empty(&tset.src_csets))
		return 0;
2257

2258
	/* check that we can legitimately attach to the cgroup */
2259
	for_each_e_css(css, i, cgrp) {
T
Tejun Heo 已提交
2260
		if (css->ss->can_attach) {
2261 2262
			ret = css->ss->can_attach(css, &tset);
			if (ret) {
T
Tejun Heo 已提交
2263
				failed_css = css;
B
Ben Blum 已提交
2264 2265 2266 2267 2268 2269
				goto out_cancel_attach;
			}
		}
	}

	/*
2270 2271 2272
	 * Now that we're guaranteed success, proceed to move all tasks to
	 * the new cgroup.  There are no failure cases after here, so this
	 * is the commit point.
B
Ben Blum 已提交
2273
	 */
2274
	down_write(&css_set_rwsem);
2275 2276 2277 2278
	list_for_each_entry(cset, &tset.src_csets, mg_node) {
		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
			cgroup_task_migrate(cset->mg_src_cgrp, task,
					    cset->mg_dst_cset);
B
Ben Blum 已提交
2279
	}
2280
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
2281 2282

	/*
2283 2284 2285
	 * Migration is committed, all target tasks are now on dst_csets.
	 * Nothing is sensitive to fork() after this point.  Notify
	 * controllers that migration is complete.
B
Ben Blum 已提交
2286
	 */
2287
	tset.csets = &tset.dst_csets;
B
Ben Blum 已提交
2288

2289
	for_each_e_css(css, i, cgrp)
T
Tejun Heo 已提交
2290 2291
		if (css->ss->attach)
			css->ss->attach(css, &tset);
B
Ben Blum 已提交
2292

2293
	ret = 0;
2294 2295
	goto out_release_tset;

B
Ben Blum 已提交
2296
out_cancel_attach:
2297
	for_each_e_css(css, i, cgrp) {
2298 2299 2300 2301
		if (css == failed_css)
			break;
		if (css->ss->cancel_attach)
			css->ss->cancel_attach(css, &tset);
B
Ben Blum 已提交
2302
	}
2303 2304 2305 2306
out_release_tset:
	down_write(&css_set_rwsem);
	list_splice_init(&tset.dst_csets, &tset.src_csets);
	list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
2307
		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2308 2309 2310
		list_del_init(&cset->mg_node);
	}
	up_write(&css_set_rwsem);
2311
	return ret;
B
Ben Blum 已提交
2312 2313
}

2314 2315 2316 2317 2318 2319
/**
 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
 * @dst_cgrp: the cgroup to attach to
 * @leader: the task or the leader of the threadgroup to be attached
 * @threadgroup: attach the whole threadgroup?
 *
2320
 * Call holding cgroup_mutex and threadgroup_lock of @leader.
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
 */
static int cgroup_attach_task(struct cgroup *dst_cgrp,
			      struct task_struct *leader, bool threadgroup)
{
	LIST_HEAD(preloaded_csets);
	struct task_struct *task;
	int ret;

	/* look up all src csets */
	down_read(&css_set_rwsem);
	rcu_read_lock();
	task = leader;
	do {
		cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
				       &preloaded_csets);
		if (!threadgroup)
			break;
	} while_each_thread(leader, task);
	rcu_read_unlock();
	up_read(&css_set_rwsem);

	/* prepare dst csets and commit */
	ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
	if (!ret)
		ret = cgroup_migrate(dst_cgrp, leader, threadgroup);

	cgroup_migrate_finish(&preloaded_csets);
	return ret;
B
Ben Blum 已提交
2349 2350 2351 2352
}

/*
 * Find the task_struct of the task to attach by vpid and pass it along to the
2353
 * function to attach either it or all tasks in its threadgroup. Will lock
2354
 * cgroup_mutex and threadgroup.
2355
 */
2356 2357
static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
				    size_t nbytes, loff_t off, bool threadgroup)
2358 2359
{
	struct task_struct *tsk;
2360
	const struct cred *cred = current_cred(), *tcred;
2361
	struct cgroup *cgrp;
2362
	pid_t pid;
2363 2364
	int ret;

2365 2366 2367
	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
		return -EINVAL;

2368 2369
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
B
Ben Blum 已提交
2370 2371
		return -ENODEV;

2372 2373
retry_find_task:
	rcu_read_lock();
2374
	if (pid) {
2375
		tsk = find_task_by_vpid(pid);
B
Ben Blum 已提交
2376 2377
		if (!tsk) {
			rcu_read_unlock();
S
SeongJae Park 已提交
2378
			ret = -ESRCH;
2379
			goto out_unlock_cgroup;
2380
		}
B
Ben Blum 已提交
2381 2382 2383 2384
		/*
		 * even if we're attaching all tasks in the thread group, we
		 * only need to check permissions on one of them.
		 */
2385
		tcred = __task_cred(tsk);
2386 2387 2388
		if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
		    !uid_eq(cred->euid, tcred->uid) &&
		    !uid_eq(cred->euid, tcred->suid)) {
2389
			rcu_read_unlock();
2390 2391
			ret = -EACCES;
			goto out_unlock_cgroup;
2392
		}
2393 2394
	} else
		tsk = current;
2395 2396

	if (threadgroup)
2397
		tsk = tsk->group_leader;
2398 2399

	/*
2400
	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2401 2402 2403
	 * trapped in a cpuset, or RT worker may be born in a cgroup
	 * with no rt_runtime allocated.  Just say no.
	 */
2404
	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2405 2406 2407 2408 2409
		ret = -EINVAL;
		rcu_read_unlock();
		goto out_unlock_cgroup;
	}

2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
	get_task_struct(tsk);
	rcu_read_unlock();

	threadgroup_lock(tsk);
	if (threadgroup) {
		if (!thread_group_leader(tsk)) {
			/*
			 * a race with de_thread from another thread's exec()
			 * may strip us of our leadership, if this happens,
			 * there is no choice but to throw this task away and
			 * try again; this is
			 * "double-double-toil-and-trouble-check locking".
			 */
			threadgroup_unlock(tsk);
			put_task_struct(tsk);
			goto retry_find_task;
		}
2427 2428 2429 2430
	}

	ret = cgroup_attach_task(cgrp, tsk, threadgroup);

2431 2432
	threadgroup_unlock(tsk);

2433
	put_task_struct(tsk);
2434
out_unlock_cgroup:
2435
	cgroup_kn_unlock(of->kn);
2436
	return ret ?: nbytes;
2437 2438
}

2439 2440 2441 2442 2443 2444 2445
/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
2446
	struct cgroup_root *root;
2447 2448
	int retval = 0;

T
Tejun Heo 已提交
2449
	mutex_lock(&cgroup_mutex);
2450
	for_each_root(root) {
2451 2452
		struct cgroup *from_cgrp;

2453
		if (root == &cgrp_dfl_root)
2454 2455
			continue;

2456 2457 2458
		down_read(&css_set_rwsem);
		from_cgrp = task_cgroup_from_root(from, root);
		up_read(&css_set_rwsem);
2459

L
Li Zefan 已提交
2460
		retval = cgroup_attach_task(from_cgrp, tsk, false);
2461 2462 2463
		if (retval)
			break;
	}
T
Tejun Heo 已提交
2464
	mutex_unlock(&cgroup_mutex);
2465 2466 2467 2468 2469

	return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);

2470 2471
static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
B
Ben Blum 已提交
2472
{
2473
	return __cgroup_procs_write(of, buf, nbytes, off, false);
B
Ben Blum 已提交
2474 2475
}

2476 2477
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
2478
{
2479
	return __cgroup_procs_write(of, buf, nbytes, off, true);
2480 2481
}

2482 2483
static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
					  char *buf, size_t nbytes, loff_t off)
2484
{
2485
	struct cgroup *cgrp;
2486

2487
	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
2488

2489 2490
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
2491
		return -ENODEV;
2492
	spin_lock(&release_agent_path_lock);
2493 2494
	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
		sizeof(cgrp->root->release_agent_path));
2495
	spin_unlock(&release_agent_path_lock);
2496
	cgroup_kn_unlock(of->kn);
2497
	return nbytes;
2498 2499
}

2500
static int cgroup_release_agent_show(struct seq_file *seq, void *v)
2501
{
2502
	struct cgroup *cgrp = seq_css(seq)->cgroup;
2503

2504
	spin_lock(&release_agent_path_lock);
2505
	seq_puts(seq, cgrp->root->release_agent_path);
2506
	spin_unlock(&release_agent_path_lock);
2507 2508 2509 2510
	seq_putc(seq, '\n');
	return 0;
}

2511
static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
2512
{
2513
	seq_puts(seq, "0\n");
2514 2515 2516
	return 0;
}

2517
static void cgroup_print_ss_mask(struct seq_file *seq, unsigned int ss_mask)
2518
{
2519 2520 2521
	struct cgroup_subsys *ss;
	bool printed = false;
	int ssid;
2522

2523 2524 2525 2526 2527 2528 2529
	for_each_subsys(ss, ssid) {
		if (ss_mask & (1 << ssid)) {
			if (printed)
				seq_putc(seq, ' ');
			seq_printf(seq, "%s", ss->name);
			printed = true;
		}
2530
	}
2531 2532
	if (printed)
		seq_putc(seq, '\n');
2533 2534
}

2535 2536
/* show controllers which are currently attached to the default hierarchy */
static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
2537
{
2538 2539
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2540 2541
	cgroup_print_ss_mask(seq, cgrp->root->subsys_mask &
			     ~cgrp_dfl_root_inhibit_ss_mask);
2542
	return 0;
2543 2544
}

2545 2546
/* show controllers which are enabled from the parent */
static int cgroup_controllers_show(struct seq_file *seq, void *v)
2547
{
2548 2549
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2550
	cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
2551
	return 0;
2552 2553
}

2554 2555
/* show controllers which are enabled for a given cgroup's children */
static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2556
{
2557 2558
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2559
	cgroup_print_ss_mask(seq, cgrp->subtree_control);
2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
	return 0;
}

/**
 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
 * @cgrp: root of the subtree to update csses for
 *
 * @cgrp's child_subsys_mask has changed and its subtree's (self excluded)
 * css associations need to be updated accordingly.  This function looks up
 * all css_sets which are attached to the subtree, creates the matching
 * updated css_sets and migrates the tasks to the new ones.
 */
static int cgroup_update_dfl_csses(struct cgroup *cgrp)
{
	LIST_HEAD(preloaded_csets);
	struct cgroup_subsys_state *css;
	struct css_set *src_cset;
	int ret;

	lockdep_assert_held(&cgroup_mutex);

	/* look up all csses currently attached to @cgrp's subtree */
	down_read(&css_set_rwsem);
	css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
		struct cgrp_cset_link *link;

		/* self is not affected by child_subsys_mask change */
		if (css->cgroup == cgrp)
			continue;

		list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
			cgroup_migrate_add_src(link->cset, cgrp,
					       &preloaded_csets);
	}
	up_read(&css_set_rwsem);

	/* NULL dst indicates self on default hierarchy */
	ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
	if (ret)
		goto out_finish;

	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
		struct task_struct *last_task = NULL, *task;

		/* src_csets precede dst_csets, break on the first dst_cset */
		if (!src_cset->mg_src_cgrp)
			break;

		/*
		 * All tasks in src_cset need to be migrated to the
		 * matching dst_cset.  Empty it process by process.  We
		 * walk tasks but migrate processes.  The leader might even
		 * belong to a different cset but such src_cset would also
		 * be among the target src_csets because the default
		 * hierarchy enforces per-process membership.
		 */
		while (true) {
			down_read(&css_set_rwsem);
			task = list_first_entry_or_null(&src_cset->tasks,
						struct task_struct, cg_list);
			if (task) {
				task = task->group_leader;
				WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp);
				get_task_struct(task);
			}
			up_read(&css_set_rwsem);

			if (!task)
				break;

			/* guard against possible infinite loop */
			if (WARN(last_task == task,
				 "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n"))
				goto out_finish;
			last_task = task;

			threadgroup_lock(task);
			/* raced against de_thread() from another thread? */
			if (!thread_group_leader(task)) {
				threadgroup_unlock(task);
				put_task_struct(task);
				continue;
			}

			ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);

			threadgroup_unlock(task);
			put_task_struct(task);

			if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
				goto out_finish;
		}
	}

out_finish:
	cgroup_migrate_finish(&preloaded_csets);
	return ret;
}

/* change the enabled child controllers for a cgroup in the default hierarchy */
2660 2661 2662
static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
2663
{
2664
	unsigned int enable = 0, disable = 0;
2665
	unsigned int css_enable, css_disable, old_ctrl, new_ctrl;
2666
	struct cgroup *cgrp, *child;
2667
	struct cgroup_subsys *ss;
2668
	char *tok;
2669 2670 2671
	int ssid, ret;

	/*
2672 2673
	 * Parse input - space separated list of subsystem names prefixed
	 * with either + or -.
2674
	 */
2675 2676
	buf = strstrip(buf);
	while ((tok = strsep(&buf, " "))) {
2677 2678
		if (tok[0] == '\0')
			continue;
2679
		for_each_subsys(ss, ssid) {
2680 2681
			if (ss->disabled || strcmp(tok + 1, ss->name) ||
			    ((1 << ss->id) & cgrp_dfl_root_inhibit_ss_mask))
2682 2683 2684
				continue;

			if (*tok == '+') {
2685 2686
				enable |= 1 << ssid;
				disable &= ~(1 << ssid);
2687
			} else if (*tok == '-') {
2688 2689
				disable |= 1 << ssid;
				enable &= ~(1 << ssid);
2690 2691 2692 2693 2694 2695 2696 2697 2698
			} else {
				return -EINVAL;
			}
			break;
		}
		if (ssid == CGROUP_SUBSYS_COUNT)
			return -EINVAL;
	}

2699 2700 2701
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
		return -ENODEV;
2702 2703 2704

	for_each_subsys(ss, ssid) {
		if (enable & (1 << ssid)) {
2705
			if (cgrp->subtree_control & (1 << ssid)) {
2706 2707 2708 2709
				enable &= ~(1 << ssid);
				continue;
			}

2710 2711 2712
			/* unavailable or not enabled on the parent? */
			if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
			    (cgroup_parent(cgrp) &&
2713
			     !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
2714 2715 2716 2717
				ret = -ENOENT;
				goto out_unlock;
			}

2718 2719 2720 2721 2722 2723 2724
			/*
			 * @ss is already enabled through dependency and
			 * we'll just make it visible.  Skip draining.
			 */
			if (cgrp->child_subsys_mask & (1 << ssid))
				continue;

2725 2726 2727 2728 2729 2730 2731
			/*
			 * Because css offlining is asynchronous, userland
			 * might try to re-enable the same controller while
			 * the previous instance is still around.  In such
			 * cases, wait till it's gone using offline_waitq.
			 */
			cgroup_for_each_live_child(child, cgrp) {
2732
				DEFINE_WAIT(wait);
2733 2734 2735 2736

				if (!cgroup_css(child, ss))
					continue;

2737
				cgroup_get(child);
2738 2739
				prepare_to_wait(&child->offline_waitq, &wait,
						TASK_UNINTERRUPTIBLE);
2740
				cgroup_kn_unlock(of->kn);
2741 2742
				schedule();
				finish_wait(&child->offline_waitq, &wait);
2743
				cgroup_put(child);
2744

2745
				return restart_syscall();
2746 2747
			}
		} else if (disable & (1 << ssid)) {
2748
			if (!(cgrp->subtree_control & (1 << ssid))) {
2749 2750 2751 2752 2753 2754
				disable &= ~(1 << ssid);
				continue;
			}

			/* a child has it enabled? */
			cgroup_for_each_live_child(child, cgrp) {
2755
				if (child->subtree_control & (1 << ssid)) {
2756
					ret = -EBUSY;
2757
					goto out_unlock;
2758 2759 2760 2761 2762 2763 2764
				}
			}
		}
	}

	if (!enable && !disable) {
		ret = 0;
2765
		goto out_unlock;
2766 2767 2768
	}

	/*
2769
	 * Except for the root, subtree_control must be zero for a cgroup
2770 2771
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
2772
	if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
2773 2774 2775 2776 2777
		ret = -EBUSY;
		goto out_unlock;
	}

	/*
2778 2779 2780 2781
	 * Update subsys masks and calculate what needs to be done.  More
	 * subsystems than specified may need to be enabled or disabled
	 * depending on subsystem dependencies.
	 */
2782 2783
	cgrp->subtree_control |= enable;
	cgrp->subtree_control &= ~disable;
2784 2785

	old_ctrl = cgrp->child_subsys_mask;
2786
	cgroup_refresh_child_subsys_mask(cgrp);
2787 2788 2789 2790 2791 2792
	new_ctrl = cgrp->child_subsys_mask;

	css_enable = ~old_ctrl & new_ctrl;
	css_disable = old_ctrl & ~new_ctrl;
	enable |= css_enable;
	disable |= css_disable;
2793

2794 2795 2796 2797 2798
	/*
	 * Create new csses or make the existing ones visible.  A css is
	 * created invisible if it's being implicitly enabled through
	 * dependency.  An invisible css is made visible when the userland
	 * explicitly enables it.
2799 2800 2801 2802 2803 2804
	 */
	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
2805 2806 2807 2808 2809
			if (css_enable & (1 << ssid))
				ret = create_css(child, ss,
					cgrp->subtree_control & (1 << ssid));
			else
				ret = cgroup_populate_dir(child, 1 << ssid);
2810 2811 2812 2813 2814
			if (ret)
				goto err_undo_css;
		}
	}

2815 2816 2817 2818 2819
	/*
	 * At this point, cgroup_e_css() results reflect the new csses
	 * making the following cgroup_update_dfl_csses() properly update
	 * css associations of all tasks in the subtree.
	 */
2820 2821 2822 2823
	ret = cgroup_update_dfl_csses(cgrp);
	if (ret)
		goto err_undo_css;

2824 2825 2826
	/*
	 * All tasks are migrated out of disabled csses.  Kill or hide
	 * them.  A css is hidden when the userland requests it to be
2827 2828 2829 2830
	 * disabled while other subsystems are still depending on it.  The
	 * css must not actively control resources and be in the vanilla
	 * state if it's made visible again later.  Controllers which may
	 * be depended upon should provide ->css_reset() for this purpose.
2831
	 */
2832 2833 2834 2835
	for_each_subsys(ss, ssid) {
		if (!(disable & (1 << ssid)))
			continue;

2836
		cgroup_for_each_live_child(child, cgrp) {
2837 2838 2839 2840 2841
			struct cgroup_subsys_state *css = cgroup_css(child, ss);

			if (css_disable & (1 << ssid)) {
				kill_css(css);
			} else {
2842
				cgroup_clear_dir(child, 1 << ssid);
2843 2844 2845
				if (ss->css_reset)
					ss->css_reset(css);
			}
2846
		}
2847 2848 2849 2850 2851
	}

	kernfs_activate(cgrp->kn);
	ret = 0;
out_unlock:
2852
	cgroup_kn_unlock(of->kn);
2853
	return ret ?: nbytes;
2854 2855

err_undo_css:
2856 2857 2858
	cgrp->subtree_control &= ~enable;
	cgrp->subtree_control |= disable;
	cgroup_refresh_child_subsys_mask(cgrp);
2859 2860 2861 2862 2863 2864 2865

	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
			struct cgroup_subsys_state *css = cgroup_css(child, ss);
2866 2867 2868 2869 2870

			if (!css)
				continue;

			if (css_enable & (1 << ssid))
2871
				kill_css(css);
2872 2873
			else
				cgroup_clear_dir(child, 1 << ssid);
2874 2875 2876 2877 2878
		}
	}
	goto out_unlock;
}

2879 2880 2881 2882 2883 2884
static int cgroup_populated_show(struct seq_file *seq, void *v)
{
	seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt);
	return 0;
}

T
Tejun Heo 已提交
2885 2886
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
				 size_t nbytes, loff_t off)
2887
{
T
Tejun Heo 已提交
2888 2889 2890
	struct cgroup *cgrp = of->kn->parent->priv;
	struct cftype *cft = of->kn->priv;
	struct cgroup_subsys_state *css;
2891
	int ret;
2892

T
Tejun Heo 已提交
2893 2894 2895
	if (cft->write)
		return cft->write(of, buf, nbytes, off);

T
Tejun Heo 已提交
2896 2897 2898 2899 2900 2901 2902 2903 2904
	/*
	 * kernfs guarantees that a file isn't deleted with operations in
	 * flight, which means that the matching css is and stays alive and
	 * doesn't need to be pinned.  The RCU locking is not necessary
	 * either.  It's just for the convenience of using cgroup_css().
	 */
	rcu_read_lock();
	css = cgroup_css(cgrp, cft->ss);
	rcu_read_unlock();
2905

2906
	if (cft->write_u64) {
2907 2908 2909 2910 2911 2912 2913 2914 2915
		unsigned long long v;
		ret = kstrtoull(buf, 0, &v);
		if (!ret)
			ret = cft->write_u64(css, cft, v);
	} else if (cft->write_s64) {
		long long v;
		ret = kstrtoll(buf, 0, &v);
		if (!ret)
			ret = cft->write_s64(css, cft, v);
2916
	} else {
2917
		ret = -EINVAL;
2918
	}
T
Tejun Heo 已提交
2919

2920
	return ret ?: nbytes;
2921 2922
}

2923
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
2924
{
T
Tejun Heo 已提交
2925
	return seq_cft(seq)->seq_start(seq, ppos);
2926 2927
}

2928
static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
2929
{
T
Tejun Heo 已提交
2930
	return seq_cft(seq)->seq_next(seq, v, ppos);
2931 2932
}

2933
static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
2934
{
T
Tejun Heo 已提交
2935
	seq_cft(seq)->seq_stop(seq, v);
2936 2937
}

2938
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
2939
{
2940 2941
	struct cftype *cft = seq_cft(m);
	struct cgroup_subsys_state *css = seq_css(m);
2942

2943 2944
	if (cft->seq_show)
		return cft->seq_show(m, arg);
2945

2946
	if (cft->read_u64)
2947 2948 2949 2950 2951 2952
		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
	else if (cft->read_s64)
		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
	else
		return -EINVAL;
	return 0;
2953 2954
}

T
Tejun Heo 已提交
2955 2956 2957 2958
static struct kernfs_ops cgroup_kf_single_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_show		= cgroup_seqfile_show,
2959 2960
};

T
Tejun Heo 已提交
2961 2962 2963 2964 2965 2966 2967 2968
static struct kernfs_ops cgroup_kf_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_start		= cgroup_seqfile_start,
	.seq_next		= cgroup_seqfile_next,
	.seq_stop		= cgroup_seqfile_stop,
	.seq_show		= cgroup_seqfile_show,
};
2969 2970 2971 2972

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
T
Tejun Heo 已提交
2973 2974
static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
			 const char *new_name_str)
2975
{
T
Tejun Heo 已提交
2976
	struct cgroup *cgrp = kn->priv;
2977 2978
	int ret;

T
Tejun Heo 已提交
2979
	if (kernfs_type(kn) != KERNFS_DIR)
2980
		return -ENOTDIR;
T
Tejun Heo 已提交
2981
	if (kn->parent != new_parent)
2982
		return -EIO;
2983

2984 2985
	/*
	 * This isn't a proper migration and its usefulness is very
2986
	 * limited.  Disallow on the default hierarchy.
2987
	 */
2988
	if (cgroup_on_dfl(cgrp))
2989
		return -EPERM;
L
Li Zefan 已提交
2990

2991
	/*
T
Tejun Heo 已提交
2992
	 * We're gonna grab cgroup_mutex which nests outside kernfs
2993
	 * active_ref.  kernfs_rename() doesn't require active_ref
T
Tejun Heo 已提交
2994
	 * protection.  Break them before grabbing cgroup_mutex.
2995 2996 2997
	 */
	kernfs_break_active_protection(new_parent);
	kernfs_break_active_protection(kn);
L
Li Zefan 已提交
2998

T
Tejun Heo 已提交
2999
	mutex_lock(&cgroup_mutex);
L
Li Zefan 已提交
3000

T
Tejun Heo 已提交
3001
	ret = kernfs_rename(kn, new_parent, new_name_str);
L
Li Zefan 已提交
3002

T
Tejun Heo 已提交
3003
	mutex_unlock(&cgroup_mutex);
3004 3005 3006

	kernfs_unbreak_active_protection(kn);
	kernfs_unbreak_active_protection(new_parent);
T
Tejun Heo 已提交
3007
	return ret;
L
Li Zefan 已提交
3008 3009
}

3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
/* set uid and gid of cgroup dirs and files to that of the creator */
static int cgroup_kn_set_ugid(struct kernfs_node *kn)
{
	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
			       .ia_uid = current_fsuid(),
			       .ia_gid = current_fsgid(), };

	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
		return 0;

	return kernfs_setattr(kn, &iattr);
}

3024
static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
3025
{
T
Tejun Heo 已提交
3026
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
3027 3028
	struct kernfs_node *kn;
	struct lock_class_key *key = NULL;
3029
	int ret;
T
Tejun Heo 已提交
3030

T
Tejun Heo 已提交
3031 3032 3033 3034 3035 3036
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	key = &cft->lockdep_key;
#endif
	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
				  NULL, false, key);
3037 3038 3039 3040
	if (IS_ERR(kn))
		return PTR_ERR(kn);

	ret = cgroup_kn_set_ugid(kn);
3041
	if (ret) {
3042
		kernfs_remove(kn);
3043 3044 3045
		return ret;
	}

T
Tejun Heo 已提交
3046
	if (cft->seq_show == cgroup_populated_show)
3047
		cgrp->populated_kn = kn;
3048
	return 0;
3049 3050
}

3051 3052 3053 3054 3055 3056 3057
/**
 * cgroup_addrm_files - add or remove files to a cgroup directory
 * @cgrp: the target cgroup
 * @cfts: array of cftypes to be added
 * @is_add: whether to add or remove
 *
 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3058 3059 3060
 * For removals, this function never fails.  If addition fails, this
 * function doesn't remove files already added.  The caller is responsible
 * for cleaning up.
3061
 */
3062 3063
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add)
3064
{
A
Aristeu Rozanski 已提交
3065
	struct cftype *cft;
3066 3067
	int ret;

3068
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
3069 3070

	for (cft = cfts; cft->name[0] != '\0'; cft++) {
3071
		/* does cft->flags tell us to skip this file on @cgrp? */
3072
		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
T
Tejun Heo 已提交
3073
			continue;
3074
		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3075
			continue;
T
Tejun Heo 已提交
3076
		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3077
			continue;
T
Tejun Heo 已提交
3078
		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3079 3080
			continue;

3081
		if (is_add) {
3082
			ret = cgroup_add_file(cgrp, cft);
3083
			if (ret) {
3084 3085
				pr_warn("%s: failed to add %s, err=%d\n",
					__func__, cft->name, ret);
3086 3087
				return ret;
			}
3088 3089
		} else {
			cgroup_rm_file(cgrp, cft);
T
Tejun Heo 已提交
3090
		}
3091
	}
3092
	return 0;
3093 3094
}

3095
static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3096 3097
{
	LIST_HEAD(pending);
3098
	struct cgroup_subsys *ss = cfts[0].ss;
3099
	struct cgroup *root = &ss->root->cgrp;
3100
	struct cgroup_subsys_state *css;
3101
	int ret = 0;
3102

3103
	lockdep_assert_held(&cgroup_mutex);
3104 3105

	/* add/rm files for all cgroups created before */
3106
	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3107 3108
		struct cgroup *cgrp = css->cgroup;

3109 3110 3111
		if (cgroup_is_dead(cgrp))
			continue;

3112
		ret = cgroup_addrm_files(cgrp, cfts, is_add);
3113 3114
		if (ret)
			break;
3115
	}
3116 3117 3118

	if (is_add && !ret)
		kernfs_activate(root->kn);
3119
	return ret;
3120 3121
}

3122
static void cgroup_exit_cftypes(struct cftype *cfts)
3123
{
3124
	struct cftype *cft;
3125

T
Tejun Heo 已提交
3126 3127 3128 3129 3130
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		/* free copy for custom atomic_write_len, see init_cftypes() */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
			kfree(cft->kf_ops);
		cft->kf_ops = NULL;
3131
		cft->ss = NULL;
3132 3133

		/* revert flags set by cgroup core while adding @cfts */
3134
		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
T
Tejun Heo 已提交
3135
	}
3136 3137
}

T
Tejun Heo 已提交
3138
static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3139 3140 3141
{
	struct cftype *cft;

T
Tejun Heo 已提交
3142 3143 3144
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		struct kernfs_ops *kf_ops;

T
Tejun Heo 已提交
3145 3146
		WARN_ON(cft->ss || cft->kf_ops);

T
Tejun Heo 已提交
3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163
		if (cft->seq_start)
			kf_ops = &cgroup_kf_ops;
		else
			kf_ops = &cgroup_kf_single_ops;

		/*
		 * Ugh... if @cft wants a custom max_write_len, we need to
		 * make a copy of kf_ops to set its atomic_write_len.
		 */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
			if (!kf_ops) {
				cgroup_exit_cftypes(cfts);
				return -ENOMEM;
			}
			kf_ops->atomic_write_len = cft->max_write_len;
		}
3164

T
Tejun Heo 已提交
3165
		cft->kf_ops = kf_ops;
3166
		cft->ss = ss;
T
Tejun Heo 已提交
3167
	}
3168

T
Tejun Heo 已提交
3169
	return 0;
3170 3171
}

3172 3173
static int cgroup_rm_cftypes_locked(struct cftype *cfts)
{
3174
	lockdep_assert_held(&cgroup_mutex);
3175 3176 3177 3178 3179 3180 3181 3182

	if (!cfts || !cfts[0].ss)
		return -ENOENT;

	list_del(&cfts->node);
	cgroup_apply_cftypes(cfts, false);
	cgroup_exit_cftypes(cfts);
	return 0;
3183 3184
}

3185 3186 3187 3188
/**
 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
3189 3190 3191
 * Unregister @cfts.  Files described by @cfts are removed from all
 * existing cgroups and all future cgroups won't have them either.  This
 * function can be called anytime whether @cfts' subsys is attached or not.
3192 3193
 *
 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3194
 * registered.
3195
 */
3196
int cgroup_rm_cftypes(struct cftype *cfts)
3197
{
3198
	int ret;
3199

3200
	mutex_lock(&cgroup_mutex);
3201
	ret = cgroup_rm_cftypes_locked(cfts);
3202
	mutex_unlock(&cgroup_mutex);
3203
	return ret;
T
Tejun Heo 已提交
3204 3205
}

3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219
/**
 * cgroup_add_cftypes - add an array of cftypes to a subsystem
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Register @cfts to @ss.  Files described by @cfts are created for all
 * existing cgroups to which @ss is attached and all future cgroups will
 * have them too.  This function can be called anytime whether @ss is
 * attached or not.
 *
 * Returns 0 on successful registration, -errno on failure.  Note that this
 * function currently returns 0 as long as @cfts registration is successful
 * even if some file creation attempts on existing cgroups fail.
 */
3220
static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3221
{
3222
	int ret;
3223

3224 3225 3226
	if (ss->disabled)
		return 0;

3227 3228
	if (!cfts || cfts[0].name[0] == '\0')
		return 0;
3229

T
Tejun Heo 已提交
3230 3231 3232
	ret = cgroup_init_cftypes(ss, cfts);
	if (ret)
		return ret;
3233

3234
	mutex_lock(&cgroup_mutex);
3235

T
Tejun Heo 已提交
3236
	list_add_tail(&cfts->node, &ss->cfts);
3237
	ret = cgroup_apply_cftypes(cfts, true);
3238
	if (ret)
3239
		cgroup_rm_cftypes_locked(cfts);
3240

3241
	mutex_unlock(&cgroup_mutex);
3242
	return ret;
3243 3244
}

3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257
/**
 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the default hierarchy.
 */
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
	struct cftype *cft;

	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3258
		cft->flags |= __CFTYPE_ONLY_ON_DFL;
3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269
	return cgroup_add_cftypes(ss, cfts);
}

/**
 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the legacy hierarchies.
 */
3270 3271
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
3272 3273
	struct cftype *cft;

3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284
	/*
	 * If legacy_flies_on_dfl, we want to show the legacy files on the
	 * dfl hierarchy but iff the target subsystem hasn't been updated
	 * for the dfl hierarchy yet.
	 */
	if (!cgroup_legacy_files_on_dfl ||
	    ss->dfl_cftypes != ss->legacy_cftypes) {
		for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
			cft->flags |= __CFTYPE_NOT_ON_DFL;
	}

3285 3286 3287
	return cgroup_add_cftypes(ss, cfts);
}

L
Li Zefan 已提交
3288 3289 3290 3291 3292 3293
/**
 * cgroup_task_count - count the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 *
 * Return the number of tasks in the cgroup.
 */
3294
static int cgroup_task_count(const struct cgroup *cgrp)
3295 3296
{
	int count = 0;
3297
	struct cgrp_cset_link *link;
3298

3299
	down_read(&css_set_rwsem);
3300 3301
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
3302
	up_read(&css_set_rwsem);
3303 3304 3305
	return count;
}

3306
/**
3307
 * css_next_child - find the next child of a given css
3308 3309
 * @pos: the current position (%NULL to initiate traversal)
 * @parent: css whose children to walk
3310
 *
3311
 * This function returns the next child of @parent and should be called
3312
 * under either cgroup_mutex or RCU read lock.  The only requirement is
3313 3314 3315 3316 3317 3318 3319 3320 3321
 * that @parent and @pos are accessible.  The next sibling is guaranteed to
 * be returned regardless of their states.
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3322
 */
3323 3324
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
					   struct cgroup_subsys_state *parent)
3325
{
3326
	struct cgroup_subsys_state *next;
3327

T
Tejun Heo 已提交
3328
	cgroup_assert_mutex_or_rcu_locked();
3329 3330

	/*
3331 3332 3333 3334 3335 3336 3337 3338 3339 3340
	 * @pos could already have been unlinked from the sibling list.
	 * Once a cgroup is removed, its ->sibling.next is no longer
	 * updated when its next sibling changes.  CSS_RELEASED is set when
	 * @pos is taken off list, at which time its next pointer is valid,
	 * and, as releases are serialized, the one pointed to by the next
	 * pointer is guaranteed to not have started release yet.  This
	 * implies that if we observe !CSS_RELEASED on @pos in this RCU
	 * critical section, the one pointed to by its next pointer is
	 * guaranteed to not have finished its RCU grace period even if we
	 * have dropped rcu_read_lock() inbetween iterations.
3341
	 *
3342 3343 3344 3345 3346 3347 3348
	 * If @pos has CSS_RELEASED set, its next pointer can't be
	 * dereferenced; however, as each css is given a monotonically
	 * increasing unique serial number and always appended to the
	 * sibling list, the next one can be found by walking the parent's
	 * children until the first css with higher serial number than
	 * @pos's.  While this path can be slower, it happens iff iteration
	 * races against release and the race window is very small.
3349
	 */
3350
	if (!pos) {
3351 3352 3353
		next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
	} else if (likely(!(pos->flags & CSS_RELEASED))) {
		next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3354
	} else {
3355
		list_for_each_entry_rcu(next, &parent->children, sibling)
3356 3357
			if (next->serial_nr > pos->serial_nr)
				break;
3358 3359
	}

3360 3361
	/*
	 * @next, if not pointing to the head, can be dereferenced and is
3362
	 * the next sibling.
3363
	 */
3364 3365
	if (&next->sibling != &parent->children)
		return next;
3366
	return NULL;
3367 3368
}

3369
/**
3370
 * css_next_descendant_pre - find the next descendant for pre-order walk
3371
 * @pos: the current position (%NULL to initiate traversal)
3372
 * @root: css whose descendants to walk
3373
 *
3374
 * To be used by css_for_each_descendant_pre().  Find the next descendant
3375 3376
 * to visit for pre-order traversal of @root's descendants.  @root is
 * included in the iteration and the first node to be visited.
3377
 *
3378 3379 3380 3381
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @root are accessible and @pos is a descendant of @root.
3382 3383 3384 3385 3386 3387 3388
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3389
 */
3390 3391 3392
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
			struct cgroup_subsys_state *root)
3393
{
3394
	struct cgroup_subsys_state *next;
3395

T
Tejun Heo 已提交
3396
	cgroup_assert_mutex_or_rcu_locked();
3397

3398
	/* if first iteration, visit @root */
3399
	if (!pos)
3400
		return root;
3401 3402

	/* visit the first child if exists */
3403
	next = css_next_child(NULL, pos);
3404 3405 3406 3407
	if (next)
		return next;

	/* no child, visit my or the closest ancestor's next sibling */
3408
	while (pos != root) {
T
Tejun Heo 已提交
3409
		next = css_next_child(pos, pos->parent);
3410
		if (next)
3411
			return next;
T
Tejun Heo 已提交
3412
		pos = pos->parent;
3413
	}
3414 3415 3416 3417

	return NULL;
}

3418
/**
3419 3420
 * css_rightmost_descendant - return the rightmost descendant of a css
 * @pos: css of interest
3421
 *
3422 3423
 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
 * is returned.  This can be used during pre-order traversal to skip
3424
 * subtree of @pos.
3425
 *
3426 3427 3428 3429
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct rightmost descendant as
 * long as @pos is accessible.
3430
 */
3431 3432
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
3433
{
3434
	struct cgroup_subsys_state *last, *tmp;
3435

T
Tejun Heo 已提交
3436
	cgroup_assert_mutex_or_rcu_locked();
3437 3438 3439 3440 3441

	do {
		last = pos;
		/* ->prev isn't RCU safe, walk ->next till the end */
		pos = NULL;
3442
		css_for_each_child(tmp, last)
3443 3444 3445 3446 3447 3448
			pos = tmp;
	} while (pos);

	return last;
}

3449 3450
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
3451
{
3452
	struct cgroup_subsys_state *last;
3453 3454 3455

	do {
		last = pos;
3456
		pos = css_next_child(NULL, pos);
3457 3458 3459 3460 3461 3462
	} while (pos);

	return last;
}

/**
3463
 * css_next_descendant_post - find the next descendant for post-order walk
3464
 * @pos: the current position (%NULL to initiate traversal)
3465
 * @root: css whose descendants to walk
3466
 *
3467
 * To be used by css_for_each_descendant_post().  Find the next descendant
3468 3469
 * to visit for post-order traversal of @root's descendants.  @root is
 * included in the iteration and the last node to be visited.
3470
 *
3471 3472 3473 3474 3475
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @cgroup are accessible and @pos is a descendant of
 * @cgroup.
3476 3477 3478 3479 3480 3481 3482
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3483
 */
3484 3485 3486
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
			 struct cgroup_subsys_state *root)
3487
{
3488
	struct cgroup_subsys_state *next;
3489

T
Tejun Heo 已提交
3490
	cgroup_assert_mutex_or_rcu_locked();
3491

3492 3493 3494
	/* if first iteration, visit leftmost descendant which may be @root */
	if (!pos)
		return css_leftmost_descendant(root);
3495

3496 3497 3498 3499
	/* if we visited @root, we're done */
	if (pos == root)
		return NULL;

3500
	/* if there's an unvisited sibling, visit its leftmost descendant */
T
Tejun Heo 已提交
3501
	next = css_next_child(pos, pos->parent);
3502
	if (next)
3503
		return css_leftmost_descendant(next);
3504 3505

	/* no sibling left, visit parent */
T
Tejun Heo 已提交
3506
	return pos->parent;
3507 3508
}

3509 3510 3511 3512 3513 3514 3515 3516 3517
/**
 * css_has_online_children - does a css have online children
 * @css: the target css
 *
 * Returns %true if @css has any online children; otherwise, %false.  This
 * function can be called from any context but the caller is responsible
 * for synchronizing against on/offlining as necessary.
 */
bool css_has_online_children(struct cgroup_subsys_state *css)
3518
{
3519 3520
	struct cgroup_subsys_state *child;
	bool ret = false;
3521 3522

	rcu_read_lock();
3523
	css_for_each_child(child, css) {
3524
		if (child->flags & CSS_ONLINE) {
3525 3526
			ret = true;
			break;
3527 3528 3529
		}
	}
	rcu_read_unlock();
3530
	return ret;
3531 3532
}

3533
/**
3534
 * css_advance_task_iter - advance a task itererator to the next css_set
3535 3536 3537
 * @it: the iterator to advance
 *
 * Advance @it to the next css_set to walk.
3538
 */
3539
static void css_advance_task_iter(struct css_task_iter *it)
3540
{
T
Tejun Heo 已提交
3541
	struct list_head *l = it->cset_pos;
3542 3543 3544 3545 3546 3547
	struct cgrp_cset_link *link;
	struct css_set *cset;

	/* Advance to the next non-empty css_set */
	do {
		l = l->next;
T
Tejun Heo 已提交
3548 3549
		if (l == it->cset_head) {
			it->cset_pos = NULL;
3550 3551
			return;
		}
3552 3553 3554 3555 3556 3557 3558 3559

		if (it->ss) {
			cset = container_of(l, struct css_set,
					    e_cset_node[it->ss->id]);
		} else {
			link = list_entry(l, struct cgrp_cset_link, cset_link);
			cset = link->cset;
		}
T
Tejun Heo 已提交
3560 3561
	} while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));

T
Tejun Heo 已提交
3562
	it->cset_pos = l;
T
Tejun Heo 已提交
3563 3564

	if (!list_empty(&cset->tasks))
T
Tejun Heo 已提交
3565
		it->task_pos = cset->tasks.next;
T
Tejun Heo 已提交
3566
	else
T
Tejun Heo 已提交
3567 3568 3569 3570
		it->task_pos = cset->mg_tasks.next;

	it->tasks_head = &cset->tasks;
	it->mg_tasks_head = &cset->mg_tasks;
3571 3572
}

3573
/**
3574 3575
 * css_task_iter_start - initiate task iteration
 * @css: the css to walk tasks of
3576 3577
 * @it: the task iterator to use
 *
3578 3579 3580 3581
 * Initiate iteration through the tasks of @css.  The caller can call
 * css_task_iter_next() to walk through the tasks until the function
 * returns NULL.  On completion of iteration, css_task_iter_end() must be
 * called.
3582 3583 3584 3585 3586
 *
 * Note that this function acquires a lock which is released when the
 * iteration finishes.  The caller can't sleep while iteration is in
 * progress.
 */
3587 3588
void css_task_iter_start(struct cgroup_subsys_state *css,
			 struct css_task_iter *it)
3589
	__acquires(css_set_rwsem)
3590
{
3591 3592
	/* no one should try to iterate before mounting cgroups */
	WARN_ON_ONCE(!use_task_css_set_links);
3593

3594
	down_read(&css_set_rwsem);
3595

3596 3597 3598 3599 3600 3601 3602
	it->ss = css->ss;

	if (it->ss)
		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
	else
		it->cset_pos = &css->cgroup->cset_links;

T
Tejun Heo 已提交
3603
	it->cset_head = it->cset_pos;
3604

3605
	css_advance_task_iter(it);
3606 3607
}

3608
/**
3609
 * css_task_iter_next - return the next task for the iterator
3610 3611 3612
 * @it: the task iterator being iterated
 *
 * The "next" function for task iteration.  @it should have been
3613 3614
 * initialized via css_task_iter_start().  Returns NULL when the iteration
 * reaches the end.
3615
 */
3616
struct task_struct *css_task_iter_next(struct css_task_iter *it)
3617 3618
{
	struct task_struct *res;
T
Tejun Heo 已提交
3619
	struct list_head *l = it->task_pos;
3620 3621

	/* If the iterator cg is NULL, we have no tasks */
T
Tejun Heo 已提交
3622
	if (!it->cset_pos)
3623 3624
		return NULL;
	res = list_entry(l, struct task_struct, cg_list);
T
Tejun Heo 已提交
3625 3626 3627 3628 3629 3630

	/*
	 * Advance iterator to find next entry.  cset->tasks is consumed
	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
	 * next cset.
	 */
3631
	l = l->next;
T
Tejun Heo 已提交
3632

T
Tejun Heo 已提交
3633 3634
	if (l == it->tasks_head)
		l = it->mg_tasks_head->next;
T
Tejun Heo 已提交
3635

T
Tejun Heo 已提交
3636
	if (l == it->mg_tasks_head)
3637
		css_advance_task_iter(it);
T
Tejun Heo 已提交
3638
	else
T
Tejun Heo 已提交
3639
		it->task_pos = l;
T
Tejun Heo 已提交
3640

3641 3642 3643
	return res;
}

3644
/**
3645
 * css_task_iter_end - finish task iteration
3646 3647
 * @it: the task iterator to finish
 *
3648
 * Finish task iteration started by css_task_iter_start().
3649
 */
3650
void css_task_iter_end(struct css_task_iter *it)
3651
	__releases(css_set_rwsem)
3652
{
3653
	up_read(&css_set_rwsem);
3654 3655 3656
}

/**
3657 3658 3659
 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
3660
 *
3661 3662 3663 3664 3665
 * Locking rules between cgroup_post_fork() and the migration path
 * guarantee that, if a task is forking while being migrated, the new child
 * is guaranteed to be either visible in the source cgroup after the
 * parent's migration is complete or put into the target cgroup.  No task
 * can slip out of migration through forking.
3666
 */
3667
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
3668
{
3669 3670
	LIST_HEAD(preloaded_csets);
	struct cgrp_cset_link *link;
3671
	struct css_task_iter it;
3672
	struct task_struct *task;
3673
	int ret;
3674

3675
	mutex_lock(&cgroup_mutex);
3676

3677 3678 3679 3680 3681
	/* all tasks in @from are being moved, all csets are source */
	down_read(&css_set_rwsem);
	list_for_each_entry(link, &from->cset_links, cset_link)
		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
	up_read(&css_set_rwsem);
3682

3683 3684 3685
	ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
	if (ret)
		goto out_err;
3686

3687 3688 3689 3690
	/*
	 * Migrate tasks one-by-one until @form is empty.  This fails iff
	 * ->can_attach() fails.
	 */
3691
	do {
3692
		css_task_iter_start(&from->self, &it);
3693 3694 3695 3696 3697 3698
		task = css_task_iter_next(&it);
		if (task)
			get_task_struct(task);
		css_task_iter_end(&it);

		if (task) {
3699
			ret = cgroup_migrate(to, task, false);
3700 3701 3702
			put_task_struct(task);
		}
	} while (task && !ret);
3703 3704
out_err:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
3705
	mutex_unlock(&cgroup_mutex);
3706
	return ret;
3707 3708
}

3709
/*
3710
 * Stuff for reading the 'tasks'/'procs' files.
3711 3712 3713 3714 3715 3716 3717 3718
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
/* which pidlist file are we talking about? */
enum cgroup_filetype {
	CGROUP_FILE_PROCS,
	CGROUP_FILE_TASKS,
};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {
	/*
	 * used to find which pidlist is wanted. doesn't change as long as
	 * this particular list stays in the list.
	*/
	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
	/* array of xids */
	pid_t *list;
	/* how many elements the above list has */
	int length;
	/* each of these stored in a list by its cgroup */
	struct list_head links;
	/* pointer to the cgroup we belong to, for list removal purposes */
	struct cgroup *owner;
3745 3746
	/* for delayed destruction */
	struct delayed_work destroy_dwork;
3747 3748
};

3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761
/*
 * The following two functions "fix" the issue where there are more pids
 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 * TODO: replace with a kernel-wide solution to this problem
 */
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(count * sizeof(pid_t));
	else
		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
3762

3763 3764 3765 3766 3767 3768 3769 3770
static void pidlist_free(void *p)
{
	if (is_vmalloc_addr(p))
		vfree(p);
	else
		kfree(p);
}

3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797
/*
 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 * should be left afterwards.
 */
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
{
	struct cgroup_pidlist *l, *tmp_l;

	mutex_lock(&cgrp->pidlist_mutex);
	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
	mutex_unlock(&cgrp->pidlist_mutex);

	flush_workqueue(cgroup_pidlist_destroy_wq);
	BUG_ON(!list_empty(&cgrp->pidlists));
}

static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
						destroy_dwork);
	struct cgroup_pidlist *tofree = NULL;

	mutex_lock(&l->owner->pidlist_mutex);

	/*
3798 3799
	 * Destroy iff we didn't get queued again.  The state won't change
	 * as destroy_dwork can only be queued while locked.
3800
	 */
3801
	if (!delayed_work_pending(dwork)) {
3802 3803 3804 3805 3806 3807 3808 3809 3810 3811
		list_del(&l->links);
		pidlist_free(l->list);
		put_pid_ns(l->key.ns);
		tofree = l;
	}

	mutex_unlock(&l->owner->pidlist_mutex);
	kfree(tofree);
}

3812
/*
3813
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3814
 * Returns the number of unique elements.
3815
 */
3816
static int pidlist_uniq(pid_t *list, int length)
3817
{
3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
	int src, dest = 1;

	/*
	 * we presume the 0th element is unique, so i starts at 1. trivial
	 * edge cases first; no work needs to be done for either
	 */
	if (length == 0 || length == 1)
		return length;
	/* src and dest walk down the list; dest counts unique elements */
	for (src = 1; src < length; src++) {
		/* find next unique element */
		while (list[src] == list[src-1]) {
			src++;
			if (src == length)
				goto after;
		}
		/* dest always points to where the next unique element goes */
		list[dest] = list[src];
		dest++;
	}
after:
	return dest;
}

3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852
/*
 * The two pid files - task and cgroup.procs - guaranteed that the result
 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 * different per namespace, each namespace needs differently sorted list,
 * making it impossible to use, for example, single rbtree of member tasks
 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 * per open file is dangerous, so cgroup had to implement shared pool of
 * pidlists keyed by cgroup and namespace.
 *
 * All this extra complexity was caused by the original implementation
 * committing to an entirely unnecessary property.  In the long term, we
3853 3854 3855
 * want to do away with it.  Explicitly scramble sort order if on the
 * default hierarchy so that no such expectation exists in the new
 * interface.
3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869
 *
 * Scrambling is done by swapping every two consecutive bits, which is
 * non-identity one-to-one mapping which disturbs sort order sufficiently.
 */
static pid_t pid_fry(pid_t pid)
{
	unsigned a = pid & 0x55555555;
	unsigned b = pid & 0xAAAAAAAA;

	return (a << 1) | (b >> 1);
}

static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
{
3870
	if (cgroup_on_dfl(cgrp))
3871 3872 3873 3874 3875
		return pid_fry(pid);
	else
		return pid;
}

3876 3877 3878 3879 3880
static int cmppid(const void *a, const void *b)
{
	return *(pid_t *)a - *(pid_t *)b;
}

3881 3882 3883 3884 3885
static int fried_cmppid(const void *a, const void *b)
{
	return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
}

T
Tejun Heo 已提交
3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{
	struct cgroup_pidlist *l;
	/* don't need task_nsproxy() if we're looking at ourself */
	struct pid_namespace *ns = task_active_pid_ns(current);

	lockdep_assert_held(&cgrp->pidlist_mutex);

	list_for_each_entry(l, &cgrp->pidlists, links)
		if (l->key.type == type && l->key.ns == ns)
			return l;
	return NULL;
}

3901 3902 3903 3904 3905 3906
/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
T
Tejun Heo 已提交
3907 3908
static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
						enum cgroup_filetype type)
3909 3910
{
	struct cgroup_pidlist *l;
3911

T
Tejun Heo 已提交
3912 3913 3914 3915 3916 3917
	lockdep_assert_held(&cgrp->pidlist_mutex);

	l = cgroup_pidlist_find(cgrp, type);
	if (l)
		return l;

3918
	/* entry not found; create a new one */
3919
	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
T
Tejun Heo 已提交
3920
	if (!l)
3921
		return l;
T
Tejun Heo 已提交
3922

3923
	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
3924
	l->key.type = type;
T
Tejun Heo 已提交
3925 3926
	/* don't need task_nsproxy() if we're looking at ourself */
	l->key.ns = get_pid_ns(task_active_pid_ns(current));
3927 3928 3929 3930 3931
	l->owner = cgrp;
	list_add(&l->links, &cgrp->pidlists);
	return l;
}

3932 3933 3934
/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
3935 3936
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
3937 3938 3939 3940
{
	pid_t *array;
	int length;
	int pid, n = 0; /* used for populating the array */
3941
	struct css_task_iter it;
3942
	struct task_struct *tsk;
3943 3944
	struct cgroup_pidlist *l;

3945 3946
	lockdep_assert_held(&cgrp->pidlist_mutex);

3947 3948 3949 3950 3951 3952 3953
	/*
	 * If cgroup gets more users after we read count, we won't have
	 * enough space - tough.  This race is indistinguishable to the
	 * caller from the case that the additional cgroup users didn't
	 * show up until sometime later on.
	 */
	length = cgroup_task_count(cgrp);
3954
	array = pidlist_allocate(length);
3955 3956 3957
	if (!array)
		return -ENOMEM;
	/* now, populate the array */
3958
	css_task_iter_start(&cgrp->self, &it);
3959
	while ((tsk = css_task_iter_next(&it))) {
3960
		if (unlikely(n == length))
3961
			break;
3962
		/* get tgid or pid for procs or tasks file respectively */
3963 3964 3965 3966
		if (type == CGROUP_FILE_PROCS)
			pid = task_tgid_vnr(tsk);
		else
			pid = task_pid_vnr(tsk);
3967 3968
		if (pid > 0) /* make sure to only use valid results */
			array[n++] = pid;
3969
	}
3970
	css_task_iter_end(&it);
3971 3972
	length = n;
	/* now sort & (if procs) strip out duplicates */
3973
	if (cgroup_on_dfl(cgrp))
3974 3975 3976
		sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
	else
		sort(array, length, sizeof(pid_t), cmppid, NULL);
3977
	if (type == CGROUP_FILE_PROCS)
3978
		length = pidlist_uniq(array, length);
T
Tejun Heo 已提交
3979 3980

	l = cgroup_pidlist_find_create(cgrp, type);
3981
	if (!l) {
T
Tejun Heo 已提交
3982
		mutex_unlock(&cgrp->pidlist_mutex);
3983
		pidlist_free(array);
3984
		return -ENOMEM;
3985
	}
T
Tejun Heo 已提交
3986 3987

	/* store array, freeing old if necessary */
3988
	pidlist_free(l->list);
3989 3990
	l->list = array;
	l->length = length;
3991
	*lp = l;
3992
	return 0;
3993 3994
}

B
Balbir Singh 已提交
3995
/**
L
Li Zefan 已提交
3996
 * cgroupstats_build - build and fill cgroupstats
B
Balbir Singh 已提交
3997 3998 3999
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
L
Li Zefan 已提交
4000 4001 4002
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
B
Balbir Singh 已提交
4003 4004 4005
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
T
Tejun Heo 已提交
4006
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4007
	struct cgroup *cgrp;
4008
	struct css_task_iter it;
B
Balbir Singh 已提交
4009
	struct task_struct *tsk;
4010

T
Tejun Heo 已提交
4011 4012 4013 4014 4015
	/* it should be kernfs_node belonging to cgroupfs and is a directory */
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
		return -EINVAL;

4016 4017
	mutex_lock(&cgroup_mutex);

B
Balbir Singh 已提交
4018
	/*
T
Tejun Heo 已提交
4019
	 * We aren't being called from kernfs and there's no guarantee on
4020
	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
T
Tejun Heo 已提交
4021
	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
B
Balbir Singh 已提交
4022
	 */
T
Tejun Heo 已提交
4023 4024
	rcu_read_lock();
	cgrp = rcu_dereference(kn->priv);
4025
	if (!cgrp || cgroup_is_dead(cgrp)) {
T
Tejun Heo 已提交
4026
		rcu_read_unlock();
4027
		mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4028 4029
		return -ENOENT;
	}
4030
	rcu_read_unlock();
B
Balbir Singh 已提交
4031

4032
	css_task_iter_start(&cgrp->self, &it);
4033
	while ((tsk = css_task_iter_next(&it))) {
B
Balbir Singh 已提交
4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052
		switch (tsk->state) {
		case TASK_RUNNING:
			stats->nr_running++;
			break;
		case TASK_INTERRUPTIBLE:
			stats->nr_sleeping++;
			break;
		case TASK_UNINTERRUPTIBLE:
			stats->nr_uninterruptible++;
			break;
		case TASK_STOPPED:
			stats->nr_stopped++;
			break;
		default:
			if (delayacct_is_task_waiting_on_io(tsk))
				stats->nr_io_wait++;
			break;
		}
	}
4053
	css_task_iter_end(&it);
B
Balbir Singh 已提交
4054

4055
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4056
	return 0;
B
Balbir Singh 已提交
4057 4058
}

4059

4060
/*
4061
 * seq_file methods for the tasks/procs files. The seq_file position is the
4062
 * next pid to display; the seq_file iterator is a pointer to the pid
4063
 * in the cgroup->l->list array.
4064
 */
4065

4066
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
4067
{
4068 4069 4070 4071 4072 4073
	/*
	 * Initially we receive a position value that corresponds to
	 * one more than the last pid shown (or 0 on the first call or
	 * after a seek to the start). Use a binary-search to find the
	 * next pid to display, if any
	 */
T
Tejun Heo 已提交
4074
	struct kernfs_open_file *of = s->private;
4075
	struct cgroup *cgrp = seq_css(s)->cgroup;
4076
	struct cgroup_pidlist *l;
4077
	enum cgroup_filetype type = seq_cft(s)->private;
4078
	int index = 0, pid = *pos;
4079 4080 4081 4082 4083
	int *iter, ret;

	mutex_lock(&cgrp->pidlist_mutex);

	/*
4084
	 * !NULL @of->priv indicates that this isn't the first start()
4085
	 * after open.  If the matching pidlist is around, we can use that.
4086
	 * Look for it.  Note that @of->priv can't be used directly.  It
4087 4088
	 * could already have been destroyed.
	 */
4089 4090
	if (of->priv)
		of->priv = cgroup_pidlist_find(cgrp, type);
4091 4092 4093 4094 4095

	/*
	 * Either this is the first start() after open or the matching
	 * pidlist has been destroyed inbetween.  Create a new one.
	 */
4096 4097 4098
	if (!of->priv) {
		ret = pidlist_array_load(cgrp, type,
					 (struct cgroup_pidlist **)&of->priv);
4099 4100 4101
		if (ret)
			return ERR_PTR(ret);
	}
4102
	l = of->priv;
4103 4104

	if (pid) {
4105
		int end = l->length;
S
Stephen Rothwell 已提交
4106

4107 4108
		while (index < end) {
			int mid = (index + end) / 2;
4109
			if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
4110 4111
				index = mid;
				break;
4112
			} else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
4113 4114 4115 4116 4117 4118
				index = mid + 1;
			else
				end = mid;
		}
	}
	/* If we're off the end of the array, we're done */
4119
	if (index >= l->length)
4120 4121
		return NULL;
	/* Update the abstract position to be the actual pid that we found */
4122
	iter = l->list + index;
4123
	*pos = cgroup_pid_fry(cgrp, *iter);
4124 4125 4126
	return iter;
}

4127
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
4128
{
T
Tejun Heo 已提交
4129
	struct kernfs_open_file *of = s->private;
4130
	struct cgroup_pidlist *l = of->priv;
4131

4132 4133
	if (l)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
4134
				 CGROUP_PIDLIST_DESTROY_DELAY);
4135
	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
4136 4137
}

4138
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4139
{
T
Tejun Heo 已提交
4140
	struct kernfs_open_file *of = s->private;
4141
	struct cgroup_pidlist *l = of->priv;
4142 4143
	pid_t *p = v;
	pid_t *end = l->list + l->length;
4144 4145 4146 4147 4148 4149 4150 4151
	/*
	 * Advance to the next pid in the array. If this goes off the
	 * end, we're done
	 */
	p++;
	if (p >= end) {
		return NULL;
	} else {
4152
		*pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
4153 4154 4155 4156
		return p;
	}
}

4157
static int cgroup_pidlist_show(struct seq_file *s, void *v)
4158 4159 4160
{
	return seq_printf(s, "%d\n", *(int *)v);
}
4161

4162 4163
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
4164
{
4165
	return notify_on_release(css->cgroup);
4166 4167
}

4168 4169
static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
4170
{
4171
	clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
4172
	if (val)
4173
		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4174
	else
4175
		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4176 4177 4178
	return 0;
}

4179 4180
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
4181
{
4182
	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4183 4184
}

4185 4186
static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
4187 4188
{
	if (val)
4189
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4190
	else
4191
		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4192 4193 4194
	return 0;
}

4195 4196
/* cgroup core interface files for the default hierarchy */
static struct cftype cgroup_dfl_base_files[] = {
4197
	{
4198
		.name = "cgroup.procs",
4199 4200 4201 4202
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4203
		.private = CGROUP_FILE_PROCS,
4204
		.write = cgroup_procs_write,
B
Ben Blum 已提交
4205
		.mode = S_IRUGO | S_IWUSR,
4206
	},
4207 4208
	{
		.name = "cgroup.controllers",
4209
		.flags = CFTYPE_ONLY_ON_ROOT,
4210 4211 4212 4213
		.seq_show = cgroup_root_controllers_show,
	},
	{
		.name = "cgroup.controllers",
4214
		.flags = CFTYPE_NOT_ON_ROOT,
4215 4216 4217 4218 4219
		.seq_show = cgroup_controllers_show,
	},
	{
		.name = "cgroup.subtree_control",
		.seq_show = cgroup_subtree_control_show,
4220
		.write = cgroup_subtree_control_write,
4221
	},
4222 4223
	{
		.name = "cgroup.populated",
4224
		.flags = CFTYPE_NOT_ON_ROOT,
4225 4226
		.seq_show = cgroup_populated_show,
	},
4227 4228
	{ }	/* terminate */
};
4229

4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251
/* cgroup core interface files for the legacy hierarchies */
static struct cftype cgroup_legacy_base_files[] = {
	{
		.name = "cgroup.procs",
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
		.private = CGROUP_FILE_PROCS,
		.write = cgroup_procs_write,
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "cgroup.clone_children",
		.read_u64 = cgroup_clone_children_read,
		.write_u64 = cgroup_clone_children_write,
	},
	{
		.name = "cgroup.sane_behavior",
		.flags = CFTYPE_ONLY_ON_ROOT,
		.seq_show = cgroup_sane_behavior_show,
	},
4252 4253
	{
		.name = "tasks",
4254 4255 4256 4257
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4258
		.private = CGROUP_FILE_TASKS,
4259
		.write = cgroup_tasks_write,
4260 4261 4262 4263 4264 4265 4266
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "notify_on_release",
		.read_u64 = cgroup_read_notify_on_release,
		.write_u64 = cgroup_write_notify_on_release,
	},
4267 4268
	{
		.name = "release_agent",
4269
		.flags = CFTYPE_ONLY_ON_ROOT,
4270
		.seq_show = cgroup_release_agent_show,
4271
		.write = cgroup_release_agent_write,
4272
		.max_write_len = PATH_MAX - 1,
4273
	},
T
Tejun Heo 已提交
4274
	{ }	/* terminate */
4275 4276
};

4277
/**
4278
 * cgroup_populate_dir - create subsys files in a cgroup directory
4279 4280
 * @cgrp: target cgroup
 * @subsys_mask: mask of the subsystem ids whose files should be added
4281 4282
 *
 * On failure, no file is added.
4283
 */
4284
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask)
4285 4286
{
	struct cgroup_subsys *ss;
4287
	int i, ret = 0;
4288

4289
	/* process cftsets of each subsystem */
4290
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
4291
		struct cftype *cfts;
4292

4293
		if (!(subsys_mask & (1 << i)))
4294
			continue;
4295

T
Tejun Heo 已提交
4296 4297
		list_for_each_entry(cfts, &ss->cfts, node) {
			ret = cgroup_addrm_files(cgrp, cfts, true);
4298 4299 4300
			if (ret < 0)
				goto err;
		}
4301 4302
	}
	return 0;
4303 4304 4305
err:
	cgroup_clear_dir(cgrp, subsys_mask);
	return ret;
4306 4307
}

4308 4309 4310 4311 4312 4313 4314
/*
 * css destruction is four-stage process.
 *
 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
 *    Implemented in kill_css().
 *
 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
4315 4316 4317
 *    and thus css_tryget_online() is guaranteed to fail, the css can be
 *    offlined by invoking offline_css().  After offlining, the base ref is
 *    put.  Implemented in css_killed_work_fn().
4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329
 *
 * 3. When the percpu_ref reaches zero, the only possible remaining
 *    accessors are inside RCU read sections.  css_release() schedules the
 *    RCU callback.
 *
 * 4. After the grace period, the css can be freed.  Implemented in
 *    css_free_work_fn().
 *
 * It is actually hairier because both step 2 and 4 require process context
 * and thus involve punting to css->destroy_work adding two additional
 * steps to the already complex sequence.
 */
4330
static void css_free_work_fn(struct work_struct *work)
4331 4332
{
	struct cgroup_subsys_state *css =
4333
		container_of(work, struct cgroup_subsys_state, destroy_work);
4334
	struct cgroup *cgrp = css->cgroup;
4335

4336 4337
	percpu_ref_exit(&css->refcnt);

4338 4339 4340 4341
	if (css->ss) {
		/* css free path */
		if (css->parent)
			css_put(css->parent);
4342

4343 4344 4345 4346 4347 4348 4349
		css->ss->css_free(css);
		cgroup_put(cgrp);
	} else {
		/* cgroup free path */
		atomic_dec(&cgrp->root->nr_cgrps);
		cgroup_pidlist_destroy_all(cgrp);

T
Tejun Heo 已提交
4350
		if (cgroup_parent(cgrp)) {
4351 4352 4353 4354 4355 4356
			/*
			 * We get a ref to the parent, and put the ref when
			 * this cgroup is being freed, so it's guaranteed
			 * that the parent won't be destroyed before its
			 * children.
			 */
T
Tejun Heo 已提交
4357
			cgroup_put(cgroup_parent(cgrp));
4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368
			kernfs_put(cgrp->kn);
			kfree(cgrp);
		} else {
			/*
			 * This is root cgroup's refcnt reaching zero,
			 * which indicates that the root should be
			 * released.
			 */
			cgroup_destroy_root(cgrp->root);
		}
	}
4369 4370
}

4371
static void css_free_rcu_fn(struct rcu_head *rcu_head)
4372 4373
{
	struct cgroup_subsys_state *css =
4374
		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
4375

4376
	INIT_WORK(&css->destroy_work, css_free_work_fn);
4377
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4378 4379
}

4380
static void css_release_work_fn(struct work_struct *work)
4381 4382
{
	struct cgroup_subsys_state *css =
4383
		container_of(work, struct cgroup_subsys_state, destroy_work);
4384
	struct cgroup_subsys *ss = css->ss;
4385
	struct cgroup *cgrp = css->cgroup;
4386

4387 4388
	mutex_lock(&cgroup_mutex);

4389
	css->flags |= CSS_RELEASED;
4390 4391
	list_del_rcu(&css->sibling);

4392 4393 4394 4395 4396 4397 4398
	if (ss) {
		/* css release path */
		cgroup_idr_remove(&ss->css_idr, css->id);
	} else {
		/* cgroup release path */
		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
		cgrp->id = -1;
4399 4400 4401 4402 4403 4404 4405 4406 4407

		/*
		 * There are two control paths which try to determine
		 * cgroup from dentry without going through kernfs -
		 * cgroupstats_build() and css_tryget_online_from_dir().
		 * Those are supported by RCU protecting clearing of
		 * cgrp->kn->priv backpointer.
		 */
		RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
4408
	}
4409

4410 4411
	mutex_unlock(&cgroup_mutex);

4412
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4413 4414 4415 4416 4417 4418 4419
}

static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4420 4421
	INIT_WORK(&css->destroy_work, css_release_work_fn);
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4422 4423
}

4424 4425
static void init_and_link_css(struct cgroup_subsys_state *css,
			      struct cgroup_subsys *ss, struct cgroup *cgrp)
4426
{
4427 4428
	lockdep_assert_held(&cgroup_mutex);

4429 4430
	cgroup_get(cgrp);

4431
	memset(css, 0, sizeof(*css));
4432
	css->cgroup = cgrp;
4433
	css->ss = ss;
4434 4435
	INIT_LIST_HEAD(&css->sibling);
	INIT_LIST_HEAD(&css->children);
4436
	css->serial_nr = css_serial_nr_next++;
4437

T
Tejun Heo 已提交
4438 4439
	if (cgroup_parent(cgrp)) {
		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
4440 4441
		css_get(css->parent);
	}
4442

4443
	BUG_ON(cgroup_css(cgrp, ss));
4444 4445
}

4446
/* invoke ->css_online() on a new CSS and mark it online if successful */
4447
static int online_css(struct cgroup_subsys_state *css)
4448
{
4449
	struct cgroup_subsys *ss = css->ss;
T
Tejun Heo 已提交
4450 4451
	int ret = 0;

4452 4453
	lockdep_assert_held(&cgroup_mutex);

4454
	if (ss->css_online)
4455
		ret = ss->css_online(css);
4456
	if (!ret) {
4457
		css->flags |= CSS_ONLINE;
4458
		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4459
	}
T
Tejun Heo 已提交
4460
	return ret;
4461 4462
}

4463
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4464
static void offline_css(struct cgroup_subsys_state *css)
4465
{
4466
	struct cgroup_subsys *ss = css->ss;
4467 4468 4469 4470 4471 4472

	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

4473
	if (ss->css_offline)
4474
		ss->css_offline(css);
4475

4476
	css->flags &= ~CSS_ONLINE;
4477
	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4478 4479

	wake_up_all(&css->cgroup->offline_waitq);
4480 4481
}

4482 4483 4484 4485
/**
 * create_css - create a cgroup_subsys_state
 * @cgrp: the cgroup new css will be associated with
 * @ss: the subsys of new css
4486
 * @visible: whether to create control knobs for the new css or not
4487 4488
 *
 * Create a new css associated with @cgrp - @ss pair.  On success, the new
4489 4490
 * css is online and installed in @cgrp with all interface files created if
 * @visible.  Returns 0 on success, -errno on failure.
4491
 */
4492 4493
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible)
4494
{
T
Tejun Heo 已提交
4495
	struct cgroup *parent = cgroup_parent(cgrp);
4496
	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
4497 4498 4499 4500 4501
	struct cgroup_subsys_state *css;
	int err;

	lockdep_assert_held(&cgroup_mutex);

4502
	css = ss->css_alloc(parent_css);
4503 4504 4505
	if (IS_ERR(css))
		return PTR_ERR(css);

4506
	init_and_link_css(css, ss, cgrp);
4507

4508 4509
	err = percpu_ref_init(&css->refcnt, css_release);
	if (err)
4510
		goto err_free_css;
4511

4512 4513 4514 4515
	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
	if (err < 0)
		goto err_free_percpu_ref;
	css->id = err;
4516

4517 4518 4519 4520 4521
	if (visible) {
		err = cgroup_populate_dir(cgrp, 1 << ss->id);
		if (err)
			goto err_free_id;
	}
4522 4523

	/* @css is ready to be brought online now, make it visible */
4524
	list_add_tail_rcu(&css->sibling, &parent_css->children);
4525
	cgroup_idr_replace(&ss->css_idr, css, css->id);
4526 4527 4528

	err = online_css(css);
	if (err)
4529
		goto err_list_del;
4530

4531
	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
T
Tejun Heo 已提交
4532
	    cgroup_parent(parent)) {
4533
		pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4534
			current->comm, current->pid, ss->name);
4535
		if (!strcmp(ss->name, "memory"))
4536
			pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4537 4538 4539 4540 4541
		ss->warned_broken_hierarchy = true;
	}

	return 0;

4542 4543
err_list_del:
	list_del_rcu(&css->sibling);
4544
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4545 4546
err_free_id:
	cgroup_idr_remove(&ss->css_idr, css->id);
4547
err_free_percpu_ref:
4548
	percpu_ref_exit(&css->refcnt);
4549
err_free_css:
4550
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4551 4552 4553
	return err;
}

4554 4555
static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
			umode_t mode)
4556
{
4557 4558
	struct cgroup *parent, *cgrp;
	struct cgroup_root *root;
4559
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
4560
	struct kernfs_node *kn;
4561
	struct cftype *base_files;
4562
	int ssid, ret;
4563

4564 4565 4566 4567 4568
	/* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
	 */
	if (strchr(name, '\n'))
		return -EINVAL;

4569 4570 4571 4572
	parent = cgroup_kn_lock_live(parent_kn);
	if (!parent)
		return -ENODEV;
	root = parent->root;
4573

T
Tejun Heo 已提交
4574
	/* allocate the cgroup and its ID, 0 is reserved for the root */
4575
	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
T
Tejun Heo 已提交
4576 4577 4578
	if (!cgrp) {
		ret = -ENOMEM;
		goto out_unlock;
4579 4580
	}

4581 4582 4583 4584
	ret = percpu_ref_init(&cgrp->self.refcnt, css_release);
	if (ret)
		goto out_free_cgrp;

4585 4586 4587 4588
	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
4589
	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
4590
	if (cgrp->id < 0) {
T
Tejun Heo 已提交
4591
		ret = -ENOMEM;
4592
		goto out_cancel_ref;
4593 4594
	}

4595
	init_cgroup_housekeeping(cgrp);
4596

4597
	cgrp->self.parent = &parent->self;
T
Tejun Heo 已提交
4598
	cgrp->root = root;
4599

4600 4601 4602
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

4603 4604
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4605

T
Tejun Heo 已提交
4606
	/* create the directory */
T
Tejun Heo 已提交
4607
	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
T
Tejun Heo 已提交
4608
	if (IS_ERR(kn)) {
T
Tejun Heo 已提交
4609 4610
		ret = PTR_ERR(kn);
		goto out_free_id;
T
Tejun Heo 已提交
4611 4612
	}
	cgrp->kn = kn;
4613

4614
	/*
4615 4616
	 * This extra ref will be put in cgroup_free_fn() and guarantees
	 * that @cgrp->kn is always accessible.
4617
	 */
4618
	kernfs_get(kn);
4619

4620
	cgrp->self.serial_nr = css_serial_nr_next++;
4621

4622
	/* allocation complete, commit to creation */
4623
	list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
4624
	atomic_inc(&root->nr_cgrps);
4625
	cgroup_get(parent);
4626

4627 4628 4629 4630
	/*
	 * @cgrp is now fully operational.  If something fails after this
	 * point, it'll be released via the normal destruction path.
	 */
4631
	cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4632

T
Tejun Heo 已提交
4633 4634 4635
	ret = cgroup_kn_set_ugid(kn);
	if (ret)
		goto out_destroy;
4636

4637 4638 4639 4640 4641 4642
	if (cgroup_on_dfl(cgrp))
		base_files = cgroup_dfl_base_files;
	else
		base_files = cgroup_legacy_base_files;

	ret = cgroup_addrm_files(cgrp, base_files, true);
T
Tejun Heo 已提交
4643 4644
	if (ret)
		goto out_destroy;
4645

4646
	/* let's create and online css's */
T
Tejun Heo 已提交
4647
	for_each_subsys(ss, ssid) {
4648
		if (parent->child_subsys_mask & (1 << ssid)) {
4649 4650
			ret = create_css(cgrp, ss,
					 parent->subtree_control & (1 << ssid));
T
Tejun Heo 已提交
4651 4652
			if (ret)
				goto out_destroy;
T
Tejun Heo 已提交
4653
		}
4654
	}
4655

4656 4657
	/*
	 * On the default hierarchy, a child doesn't automatically inherit
4658
	 * subtree_control from the parent.  Each is configured manually.
4659
	 */
4660 4661 4662 4663
	if (!cgroup_on_dfl(cgrp)) {
		cgrp->subtree_control = parent->subtree_control;
		cgroup_refresh_child_subsys_mask(cgrp);
	}
T
Tejun Heo 已提交
4664 4665

	kernfs_activate(kn);
4666

T
Tejun Heo 已提交
4667 4668
	ret = 0;
	goto out_unlock;
4669

T
Tejun Heo 已提交
4670
out_free_id:
4671
	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
4672
out_cancel_ref:
4673
	percpu_ref_exit(&cgrp->self.refcnt);
T
Tejun Heo 已提交
4674
out_free_cgrp:
4675
	kfree(cgrp);
T
Tejun Heo 已提交
4676
out_unlock:
4677
	cgroup_kn_unlock(parent_kn);
T
Tejun Heo 已提交
4678
	return ret;
4679

T
Tejun Heo 已提交
4680
out_destroy:
4681
	cgroup_destroy_locked(cgrp);
T
Tejun Heo 已提交
4682
	goto out_unlock;
4683 4684
}

4685 4686
/*
 * This is called when the refcnt of a css is confirmed to be killed.
4687 4688
 * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
 * initate destruction and put the css ref from kill_css().
4689 4690
 */
static void css_killed_work_fn(struct work_struct *work)
4691
{
4692 4693
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
4694

4695
	mutex_lock(&cgroup_mutex);
4696
	offline_css(css);
4697
	mutex_unlock(&cgroup_mutex);
4698 4699

	css_put(css);
4700 4701
}

4702 4703
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
4704 4705 4706 4707
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4708
	INIT_WORK(&css->destroy_work, css_killed_work_fn);
4709
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4710 4711
}

4712 4713 4714 4715 4716 4717
/**
 * kill_css - destroy a css
 * @css: css to destroy
 *
 * This function initiates destruction of @css by removing cgroup interface
 * files and putting its base reference.  ->css_offline() will be invoked
4718 4719
 * asynchronously once css_tryget_online() is guaranteed to fail and when
 * the reference count reaches zero, @css will be released.
4720 4721
 */
static void kill_css(struct cgroup_subsys_state *css)
T
Tejun Heo 已提交
4722
{
4723
	lockdep_assert_held(&cgroup_mutex);
4724

T
Tejun Heo 已提交
4725 4726 4727 4728
	/*
	 * This must happen before css is disassociated with its cgroup.
	 * See seq_css() for details.
	 */
4729
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4730

T
Tejun Heo 已提交
4731 4732 4733 4734 4735 4736 4737 4738 4739
	/*
	 * Killing would put the base ref, but we need to keep it alive
	 * until after ->css_offline().
	 */
	css_get(css);

	/*
	 * cgroup core guarantees that, by the time ->css_offline() is
	 * invoked, no new css reference will be given out via
4740
	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
T
Tejun Heo 已提交
4741 4742 4743 4744 4745 4746 4747
	 * proceed to offlining css's because percpu_ref_kill() doesn't
	 * guarantee that the ref is seen as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.
	 */
	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
4748 4749 4750 4751 4752 4753 4754 4755
}

/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
4756 4757 4758
 * guarantee that css_tryget_online() won't succeed by the time
 * ->css_offline() is invoked.  To satisfy all the requirements,
 * destruction is implemented in the following two steps.
4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
4774 4775
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4776
{
T
Tejun Heo 已提交
4777
	struct cgroup_subsys_state *css;
4778
	bool empty;
T
Tejun Heo 已提交
4779
	int ssid;
4780

4781 4782
	lockdep_assert_held(&cgroup_mutex);

4783
	/*
4784
	 * css_set_rwsem synchronizes access to ->cset_links and prevents
4785
	 * @cgrp from being removed while put_css_set() is in progress.
4786
	 */
4787
	down_read(&css_set_rwsem);
4788
	empty = list_empty(&cgrp->cset_links);
4789
	up_read(&css_set_rwsem);
4790
	if (!empty)
4791
		return -EBUSY;
L
Li Zefan 已提交
4792

4793
	/*
4794 4795 4796
	 * Make sure there's no live children.  We can't test emptiness of
	 * ->self.children as dead children linger on it while being
	 * drained; otherwise, "rmdir parent/child parent" may fail.
4797
	 */
4798
	if (css_has_online_children(&cgrp->self))
4799 4800
		return -EBUSY;

4801 4802
	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
4803
	 * creation by disabling cgroup_lock_live_group().
4804
	 */
4805
	cgrp->self.flags &= ~CSS_ONLINE;
4806

4807
	/* initiate massacre of all css's */
T
Tejun Heo 已提交
4808 4809
	for_each_css(css, ssid, cgrp)
		kill_css(css);
4810

4811
	/* CSS_ONLINE is clear, remove from ->release_list for the last time */
4812 4813 4814 4815 4816 4817
	raw_spin_lock(&release_list_lock);
	if (!list_empty(&cgrp->release_list))
		list_del_init(&cgrp->release_list);
	raw_spin_unlock(&release_list_lock);

	/*
4818 4819
	 * Remove @cgrp directory along with the base files.  @cgrp has an
	 * extra ref on its kn.
4820
	 */
4821
	kernfs_remove(cgrp->kn);
4822

T
Tejun Heo 已提交
4823 4824
	set_bit(CGRP_RELEASABLE, &cgroup_parent(cgrp)->flags);
	check_for_release(cgroup_parent(cgrp));
T
Tejun Heo 已提交
4825

4826
	/* put the base reference */
4827
	percpu_ref_kill(&cgrp->self.refcnt);
4828

4829 4830 4831
	return 0;
};

T
Tejun Heo 已提交
4832
static int cgroup_rmdir(struct kernfs_node *kn)
4833
{
4834
	struct cgroup *cgrp;
T
Tejun Heo 已提交
4835
	int ret = 0;
4836

4837 4838 4839 4840
	cgrp = cgroup_kn_lock_live(kn);
	if (!cgrp)
		return 0;
	cgroup_get(cgrp);	/* for @kn->priv clearing */
4841

4842
	ret = cgroup_destroy_locked(cgrp);
4843

4844
	cgroup_kn_unlock(kn);
4845

T
Tejun Heo 已提交
4846
	cgroup_put(cgrp);
4847
	return ret;
4848 4849
}

T
Tejun Heo 已提交
4850 4851 4852 4853 4854 4855 4856 4857
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
	.remount_fs		= cgroup_remount,
	.show_options		= cgroup_show_options,
	.mkdir			= cgroup_mkdir,
	.rmdir			= cgroup_rmdir,
	.rename			= cgroup_rename,
};

4858
static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
4859 4860
{
	struct cgroup_subsys_state *css;
D
Diego Calleja 已提交
4861 4862

	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
4863

4864 4865
	mutex_lock(&cgroup_mutex);

4866
	idr_init(&ss->css_idr);
T
Tejun Heo 已提交
4867
	INIT_LIST_HEAD(&ss->cfts);
4868

4869 4870 4871
	/* Create the root cgroup state for this subsystem */
	ss->root = &cgrp_dfl_root;
	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
4872 4873
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
4874
	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
4875 4876 4877 4878 4879 4880 4881

	/*
	 * Root csses are never destroyed and we can't initialize
	 * percpu_ref during early init.  Disable refcnting.
	 */
	css->flags |= CSS_NO_REF;

4882
	if (early) {
4883
		/* allocation can't be done safely during early init */
4884 4885 4886 4887 4888
		css->id = 1;
	} else {
		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
		BUG_ON(css->id < 0);
	}
4889

L
Li Zefan 已提交
4890
	/* Update the init_css_set to contain a subsys
4891
	 * pointer to this state - since the subsystem is
L
Li Zefan 已提交
4892
	 * newly registered, all tasks and hence the
4893
	 * init_css_set is in the subsystem's root cgroup. */
4894
	init_css_set.subsys[ss->id] = css;
4895 4896 4897

	need_forkexit_callback |= ss->fork || ss->exit;

L
Li Zefan 已提交
4898 4899 4900 4901 4902
	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

4903
	BUG_ON(online_css(css));
4904

B
Ben Blum 已提交
4905 4906 4907
	mutex_unlock(&cgroup_mutex);
}

4908
/**
L
Li Zefan 已提交
4909 4910 4911 4912
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
4913 4914 4915
 */
int __init cgroup_init_early(void)
{
4916
	static struct cgroup_sb_opts __initdata opts;
4917
	struct cgroup_subsys *ss;
4918
	int i;
4919

4920
	init_cgroup_root(&cgrp_dfl_root, &opts);
4921 4922
	cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;

4923
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
4924

T
Tejun Heo 已提交
4925
	for_each_subsys(ss, i) {
4926
		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
4927 4928
		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
4929
		     ss->id, ss->name);
4930 4931 4932
		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);

4933
		ss->id = i;
4934
		ss->name = cgroup_subsys_name[i];
4935 4936

		if (ss->early_init)
4937
			cgroup_init_subsys(ss, true);
4938 4939 4940 4941 4942
	}
	return 0;
}

/**
L
Li Zefan 已提交
4943 4944 4945 4946
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
4947 4948 4949
 */
int __init cgroup_init(void)
{
4950
	struct cgroup_subsys *ss;
4951
	unsigned long key;
4952
	int ssid, err;
4953

4954 4955
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
4956

T
Tejun Heo 已提交
4957 4958
	mutex_lock(&cgroup_mutex);

4959 4960 4961 4962
	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

4963
	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
4964

T
Tejun Heo 已提交
4965 4966
	mutex_unlock(&cgroup_mutex);

4967
	for_each_subsys(ss, ssid) {
4968 4969 4970 4971 4972 4973 4974 4975 4976 4977
		if (ss->early_init) {
			struct cgroup_subsys_state *css =
				init_css_set.subsys[ss->id];

			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
						   GFP_KERNEL);
			BUG_ON(css->id < 0);
		} else {
			cgroup_init_subsys(ss, false);
		}
4978

T
Tejun Heo 已提交
4979 4980
		list_add_tail(&init_css_set.e_cset_node[ssid],
			      &cgrp_dfl_root.cgrp.e_csets[ssid]);
4981 4982

		/*
4983 4984 4985
		 * Setting dfl_root subsys_mask needs to consider the
		 * disabled flag and cftype registration needs kmalloc,
		 * both of which aren't available during early_init.
4986
		 */
4987 4988 4989 4990 4991 4992 4993 4994
		if (ss->disabled)
			continue;

		cgrp_dfl_root.subsys_mask |= 1 << ss->id;

		if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
			ss->dfl_cftypes = ss->legacy_cftypes;

4995 4996 4997
		if (!ss->dfl_cftypes)
			cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;

4998 4999 5000 5001 5002
		if (ss->dfl_cftypes == ss->legacy_cftypes) {
			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
		} else {
			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
5003
		}
5004 5005 5006
	}

	cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
T
Tejun Heo 已提交
5007 5008
	if (!cgroup_kobj)
		return -ENOMEM;
5009

5010
	err = register_filesystem(&cgroup_fs_type);
5011 5012
	if (err < 0) {
		kobject_put(cgroup_kobj);
T
Tejun Heo 已提交
5013
		return err;
5014
	}
5015

L
Li Zefan 已提交
5016
	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
T
Tejun Heo 已提交
5017
	return 0;
5018
}
5019

5020 5021 5022 5023 5024
static int __init cgroup_wq_init(void)
{
	/*
	 * There isn't much point in executing destruction path in
	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
5025
	 * Use 1 for @max_active.
5026 5027 5028 5029
	 *
	 * We would prefer to do this in cgroup_init() above, but that
	 * is called before init_workqueues(): so leave this until after.
	 */
5030
	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5031
	BUG_ON(!cgroup_destroy_wq);
5032 5033 5034 5035 5036 5037 5038 5039 5040

	/*
	 * Used to destroy pidlists and separate to serve as flush domain.
	 * Cap @max_active to 1 too.
	 */
	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
						    0, 1);
	BUG_ON(!cgroup_pidlist_destroy_wq);

5041 5042 5043 5044
	return 0;
}
core_initcall(cgroup_wq_init);

5045 5046 5047 5048 5049 5050 5051
/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 */

/* TODO: Use a proper seq_file iterator */
5052
int proc_cgroup_show(struct seq_file *m, void *v)
5053 5054 5055
{
	struct pid *pid;
	struct task_struct *tsk;
T
Tejun Heo 已提交
5056
	char *buf, *path;
5057
	int retval;
5058
	struct cgroup_root *root;
5059 5060

	retval = -ENOMEM;
T
Tejun Heo 已提交
5061
	buf = kmalloc(PATH_MAX, GFP_KERNEL);
5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073
	if (!buf)
		goto out;

	retval = -ESRCH;
	pid = m->private;
	tsk = get_pid_task(pid, PIDTYPE_PID);
	if (!tsk)
		goto out_free;

	retval = 0;

	mutex_lock(&cgroup_mutex);
5074
	down_read(&css_set_rwsem);
5075

5076
	for_each_root(root) {
5077
		struct cgroup_subsys *ss;
5078
		struct cgroup *cgrp;
T
Tejun Heo 已提交
5079
		int ssid, count = 0;
5080

T
Tejun Heo 已提交
5081
		if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
5082 5083
			continue;

5084
		seq_printf(m, "%d:", root->hierarchy_id);
T
Tejun Heo 已提交
5085
		for_each_subsys(ss, ssid)
5086
			if (root->subsys_mask & (1 << ssid))
T
Tejun Heo 已提交
5087
				seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
5088 5089 5090
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
5091
		seq_putc(m, ':');
5092
		cgrp = task_cgroup_from_root(tsk, root);
T
Tejun Heo 已提交
5093 5094 5095
		path = cgroup_path(cgrp, buf, PATH_MAX);
		if (!path) {
			retval = -ENAMETOOLONG;
5096
			goto out_unlock;
T
Tejun Heo 已提交
5097 5098
		}
		seq_puts(m, path);
5099 5100 5101 5102
		seq_putc(m, '\n');
	}

out_unlock:
5103
	up_read(&css_set_rwsem);
5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114
	mutex_unlock(&cgroup_mutex);
	put_task_struct(tsk);
out_free:
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
5115
	struct cgroup_subsys *ss;
5116 5117
	int i;

5118
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
B
Ben Blum 已提交
5119 5120 5121 5122 5123
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
5124
	mutex_lock(&cgroup_mutex);
5125 5126

	for_each_subsys(ss, i)
5127 5128
		seq_printf(m, "%s\t%d\t%d\t%d\n",
			   ss->name, ss->root->hierarchy_id,
5129
			   atomic_read(&ss->root->nr_cgrps), !ss->disabled);
5130

5131 5132 5133 5134 5135 5136
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
5137
	return single_open(file, proc_cgroupstats_show, NULL);
5138 5139
}

5140
static const struct file_operations proc_cgroupstats_operations = {
5141 5142 5143 5144 5145 5146
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

5147
/**
5148
 * cgroup_fork - initialize cgroup related fields during copy_process()
L
Li Zefan 已提交
5149
 * @child: pointer to task_struct of forking parent process.
5150
 *
5151 5152 5153
 * A task is associated with the init_css_set until cgroup_post_fork()
 * attaches it to the parent's css_set.  Empty cg_list indicates that
 * @child isn't holding reference to its css_set.
5154 5155 5156
 */
void cgroup_fork(struct task_struct *child)
{
5157
	RCU_INIT_POINTER(child->cgroups, &init_css_set);
5158
	INIT_LIST_HEAD(&child->cg_list);
5159 5160
}

5161
/**
L
Li Zefan 已提交
5162 5163 5164
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
5165 5166 5167
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
5168
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5169
 * list.
L
Li Zefan 已提交
5170
 */
5171 5172
void cgroup_post_fork(struct task_struct *child)
{
5173
	struct cgroup_subsys *ss;
5174 5175
	int i;

5176
	/*
5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195
	 * This may race against cgroup_enable_task_cg_links().  As that
	 * function sets use_task_css_set_links before grabbing
	 * tasklist_lock and we just went through tasklist_lock to add
	 * @child, it's guaranteed that either we see the set
	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
	 * @child during its iteration.
	 *
	 * If we won the race, @child is associated with %current's
	 * css_set.  Grabbing css_set_rwsem guarantees both that the
	 * association is stable, and, on completion of the parent's
	 * migration, @child is visible in the source of migration or
	 * already in the destination cgroup.  This guarantee is necessary
	 * when implementing operations which need to migrate all tasks of
	 * a cgroup to another.
	 *
	 * Note that if we lose to cgroup_enable_task_cg_links(), @child
	 * will remain in init_css_set.  This is safe because all tasks are
	 * in the init_css_set before cg_links is enabled and there's no
	 * operation which transfers all tasks out of init_css_set.
5196
	 */
5197
	if (use_task_css_set_links) {
5198 5199
		struct css_set *cset;

5200
		down_write(&css_set_rwsem);
5201
		cset = task_css_set(current);
5202 5203 5204 5205 5206
		if (list_empty(&child->cg_list)) {
			rcu_assign_pointer(child->cgroups, cset);
			list_add(&child->cg_list, &cset->tasks);
			get_css_set(cset);
		}
5207
		up_write(&css_set_rwsem);
5208
	}
5209 5210 5211 5212 5213 5214 5215

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
	if (need_forkexit_callback) {
T
Tejun Heo 已提交
5216
		for_each_subsys(ss, i)
5217 5218 5219
			if (ss->fork)
				ss->fork(child);
	}
5220
}
5221

5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
5234 5235 5236 5237 5238
 * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
 * call cgroup_exit() while the task is still competent to handle
 * notify_on_release(), then leave the task attached to the root cgroup in
 * each hierarchy for the remainder of its exit.  No need to bother with
 * init_css_set refcnting.  init_css_set never goes away and we can't race
5239
 * with migration path - PF_EXITING is visible to migration path.
5240
 */
5241
void cgroup_exit(struct task_struct *tsk)
5242
{
5243
	struct cgroup_subsys *ss;
5244
	struct css_set *cset;
5245
	bool put_cset = false;
5246
	int i;
5247 5248

	/*
5249 5250
	 * Unlink from @tsk from its css_set.  As migration path can't race
	 * with us, we can check cg_list without grabbing css_set_rwsem.
5251 5252
	 */
	if (!list_empty(&tsk->cg_list)) {
5253
		down_write(&css_set_rwsem);
5254
		list_del_init(&tsk->cg_list);
5255
		up_write(&css_set_rwsem);
5256
		put_cset = true;
5257 5258
	}

5259
	/* Reassign the task to the init_css_set. */
5260 5261
	cset = task_css_set(tsk);
	RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
5262

5263
	if (need_forkexit_callback) {
T
Tejun Heo 已提交
5264 5265
		/* see cgroup_post_fork() for details */
		for_each_subsys(ss, i) {
5266
			if (ss->exit) {
5267 5268
				struct cgroup_subsys_state *old_css = cset->subsys[i];
				struct cgroup_subsys_state *css = task_css(tsk, i);
5269

5270
				ss->exit(css, old_css, tsk);
5271 5272 5273 5274
			}
		}
	}

5275 5276
	if (put_cset)
		put_css_set(cset, true);
5277
}
5278

5279
static void check_for_release(struct cgroup *cgrp)
5280
{
5281 5282
	if (cgroup_is_releasable(cgrp) && list_empty(&cgrp->cset_links) &&
	    !css_has_online_children(&cgrp->self)) {
5283 5284
		/*
		 * Control Group is currently removeable. If it's not
5285
		 * already queued for a userspace notification, queue
5286 5287
		 * it now
		 */
5288
		int need_schedule_work = 0;
5289

5290
		raw_spin_lock(&release_list_lock);
5291
		if (!cgroup_is_dead(cgrp) &&
5292 5293
		    list_empty(&cgrp->release_list)) {
			list_add(&cgrp->release_list, &release_list);
5294 5295
			need_schedule_work = 1;
		}
5296
		raw_spin_unlock(&release_list_lock);
5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328
		if (need_schedule_work)
			schedule_work(&release_agent_work);
	}
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
	BUG_ON(work != &release_agent_work);
	mutex_lock(&cgroup_mutex);
5329
	raw_spin_lock(&release_list_lock);
5330 5331 5332
	while (!list_empty(&release_list)) {
		char *argv[3], *envp[3];
		int i;
T
Tejun Heo 已提交
5333
		char *pathbuf = NULL, *agentbuf = NULL, *path;
5334
		struct cgroup *cgrp = list_entry(release_list.next,
5335 5336
						    struct cgroup,
						    release_list);
5337
		list_del_init(&cgrp->release_list);
5338
		raw_spin_unlock(&release_list_lock);
T
Tejun Heo 已提交
5339
		pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
5340 5341
		if (!pathbuf)
			goto continue_free;
T
Tejun Heo 已提交
5342 5343
		path = cgroup_path(cgrp, pathbuf, PATH_MAX);
		if (!path)
5344 5345 5346 5347
			goto continue_free;
		agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
		if (!agentbuf)
			goto continue_free;
5348 5349

		i = 0;
5350
		argv[i++] = agentbuf;
T
Tejun Heo 已提交
5351
		argv[i++] = path;
5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365
		argv[i] = NULL;

		i = 0;
		/* minimal command environment */
		envp[i++] = "HOME=/";
		envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
		envp[i] = NULL;

		/* Drop the lock while we invoke the usermode helper,
		 * since the exec could involve hitting disk and hence
		 * be a slow process */
		mutex_unlock(&cgroup_mutex);
		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
		mutex_lock(&cgroup_mutex);
5366 5367 5368
 continue_free:
		kfree(pathbuf);
		kfree(agentbuf);
5369
		raw_spin_lock(&release_list_lock);
5370
	}
5371
	raw_spin_unlock(&release_list_lock);
5372 5373
	mutex_unlock(&cgroup_mutex);
}
5374 5375 5376

static int __init cgroup_disable(char *str)
{
5377
	struct cgroup_subsys *ss;
5378
	char *token;
5379
	int i;
5380 5381 5382 5383

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
5384

T
Tejun Heo 已提交
5385
		for_each_subsys(ss, i) {
5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396
			if (!strcmp(token, ss->name)) {
				ss->disabled = 1;
				printk(KERN_INFO "Disabling %s control group"
					" subsystem\n", ss->name);
				break;
			}
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
K
KAMEZAWA Hiroyuki 已提交
5397

5398 5399 5400 5401 5402 5403 5404 5405
static int __init cgroup_set_legacy_files_on_dfl(char *str)
{
	printk("cgroup: using legacy files on the default hierarchy\n");
	cgroup_legacy_files_on_dfl = true;
	return 0;
}
__setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);

5406
/**
5407
 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
5408 5409
 * @dentry: directory dentry of interest
 * @ss: subsystem of interest
5410
 *
5411 5412 5413
 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
 * to get the corresponding css and return it.  If such css doesn't exist
 * or can't be pinned, an ERR_PTR value is returned.
S
Stephane Eranian 已提交
5414
 */
5415 5416
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
						       struct cgroup_subsys *ss)
S
Stephane Eranian 已提交
5417
{
T
Tejun Heo 已提交
5418 5419
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
	struct cgroup_subsys_state *css = NULL;
S
Stephane Eranian 已提交
5420 5421
	struct cgroup *cgrp;

5422
	/* is @dentry a cgroup dir? */
T
Tejun Heo 已提交
5423 5424
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
S
Stephane Eranian 已提交
5425 5426
		return ERR_PTR(-EBADF);

5427 5428
	rcu_read_lock();

T
Tejun Heo 已提交
5429 5430 5431
	/*
	 * This path doesn't originate from kernfs and @kn could already
	 * have been or be removed at any point.  @kn->priv is RCU
5432
	 * protected for this access.  See css_release_work_fn() for details.
T
Tejun Heo 已提交
5433 5434 5435 5436
	 */
	cgrp = rcu_dereference(kn->priv);
	if (cgrp)
		css = cgroup_css(cgrp, ss);
5437

5438
	if (!css || !css_tryget_online(css))
5439 5440 5441 5442
		css = ERR_PTR(-ENOENT);

	rcu_read_unlock();
	return css;
S
Stephane Eranian 已提交
5443 5444
}

5445 5446 5447 5448 5449 5450 5451 5452 5453 5454
/**
 * css_from_id - lookup css by id
 * @id: the cgroup id
 * @ss: cgroup subsys to be looked into
 *
 * Returns the css if there's valid one with @id, otherwise returns NULL.
 * Should be called under rcu_read_lock().
 */
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
5455
	WARN_ON_ONCE(!rcu_read_lock_held());
5456
	return idr_find(&ss->css_idr, id);
S
Stephane Eranian 已提交
5457 5458
}

5459
#ifdef CONFIG_CGROUP_DEBUG
5460 5461
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
5462 5463 5464 5465 5466 5467 5468 5469 5470
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

5471
static void debug_css_free(struct cgroup_subsys_state *css)
5472
{
5473
	kfree(css);
5474 5475
}

5476 5477
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5478
{
5479
	return cgroup_task_count(css->cgroup);
5480 5481
}

5482 5483
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5484 5485 5486 5487
{
	return (u64)(unsigned long)current->cgroups;
}

5488
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
L
Li Zefan 已提交
5489
					 struct cftype *cft)
5490 5491 5492 5493
{
	u64 count;

	rcu_read_lock();
5494
	count = atomic_read(&task_css_set(current)->refcount);
5495 5496 5497 5498
	rcu_read_unlock();
	return count;
}

5499
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
5500
{
5501
	struct cgrp_cset_link *link;
5502
	struct css_set *cset;
T
Tejun Heo 已提交
5503 5504 5505 5506 5507
	char *name_buf;

	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name_buf)
		return -ENOMEM;
5508

5509
	down_read(&css_set_rwsem);
5510
	rcu_read_lock();
5511
	cset = rcu_dereference(current->cgroups);
5512
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5513 5514
		struct cgroup *c = link->cgrp;

T
Tejun Heo 已提交
5515
		cgroup_name(c, name_buf, NAME_MAX + 1);
5516
		seq_printf(seq, "Root %d group %s\n",
T
Tejun Heo 已提交
5517
			   c->root->hierarchy_id, name_buf);
5518 5519
	}
	rcu_read_unlock();
5520
	up_read(&css_set_rwsem);
T
Tejun Heo 已提交
5521
	kfree(name_buf);
5522 5523 5524 5525
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
5526
static int cgroup_css_links_read(struct seq_file *seq, void *v)
5527
{
5528
	struct cgroup_subsys_state *css = seq_css(seq);
5529
	struct cgrp_cset_link *link;
5530

5531
	down_read(&css_set_rwsem);
5532
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5533
		struct css_set *cset = link->cset;
5534 5535
		struct task_struct *task;
		int count = 0;
T
Tejun Heo 已提交
5536

5537
		seq_printf(seq, "css_set %p\n", cset);
T
Tejun Heo 已提交
5538

5539
		list_for_each_entry(task, &cset->tasks, cg_list) {
T
Tejun Heo 已提交
5540 5541 5542 5543 5544 5545 5546 5547 5548
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
		}

		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
5549
		}
T
Tejun Heo 已提交
5550 5551 5552
		continue;
	overflow:
		seq_puts(seq, "  ...\n");
5553
	}
5554
	up_read(&css_set_rwsem);
5555 5556 5557
	return 0;
}

5558
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
5559
{
5560
	return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

5579 5580
	{
		.name = "current_css_set_cg_links",
5581
		.seq_show = current_css_set_cg_links_read,
5582 5583 5584 5585
	},

	{
		.name = "cgroup_css_links",
5586
		.seq_show = cgroup_css_links_read,
5587 5588
	},

5589 5590 5591 5592 5593
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

5594 5595
	{ }	/* terminate */
};
5596

5597
struct cgroup_subsys debug_cgrp_subsys = {
5598 5599
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,
5600
	.legacy_cftypes = debug_files,
5601 5602
};
#endif /* CONFIG_CGROUP_DEBUG */