cgroup.c 151.0 KB
Newer Older
1 2 3 4 5 6
/*
 *  Generic process-grouping system.
 *
 *  Based originally on the cpuset system, extracted by Paul Menage
 *  Copyright (C) 2006 Google, Inc
 *
7 8 9 10
 *  Notifications support
 *  Copyright (C) 2009 Nokia Corporation
 *  Author: Kirill A. Shutemov
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *  Copyright notices from the original cpuset code:
 *  --------------------------------------------------
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
 *  2003-10-10 Written by Simon Derr.
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson.
 *  ---------------------------------------------------
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31
#include <linux/cgroup.h>
32
#include <linux/cred.h>
33
#include <linux/ctype.h>
34
#include <linux/errno.h>
35
#include <linux/init_task.h>
36 37
#include <linux/kernel.h>
#include <linux/list.h>
38
#include <linux/magic.h>
39 40 41 42
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
43
#include <linux/proc_fs.h>
44 45 46 47
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
48
#include <linux/rwsem.h>
49
#include <linux/percpu-rwsem.h>
50
#include <linux/string.h>
51
#include <linux/sort.h>
52
#include <linux/kmod.h>
B
Balbir Singh 已提交
53 54
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
55
#include <linux/hashtable.h>
L
Li Zefan 已提交
56
#include <linux/pid_namespace.h>
57
#include <linux/idr.h>
58
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
59
#include <linux/kthread.h>
T
Tejun Heo 已提交
60
#include <linux/delay.h>
B
Balbir Singh 已提交
61

A
Arun Sharma 已提交
62
#include <linux/atomic.h>
63

64 65 66 67 68 69 70 71
/*
 * pidlists linger the following amount before being destroyed.  The goal
 * is avoiding frequent destruction in the middle of consecutive read calls
 * Expiring in the middle is a performance problem not a correctness one.
 * 1 sec should be enough.
 */
#define CGROUP_PIDLIST_DESTROY_DELAY	HZ

T
Tejun Heo 已提交
72 73 74
#define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
					 MAX_CFTYPE_NAME + 2)

T
Tejun Heo 已提交
75 76 77 78
/*
 * cgroup_mutex is the master lock.  Any modification to cgroup or its
 * hierarchy must be performed while holding it.
 *
79 80
 * css_set_rwsem protects task->cgroups pointer, the list of css_set
 * objects, and the chain of tasks off each css_set.
T
Tejun Heo 已提交
81
 *
82 83
 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
 * cgroup.h can use them for lockdep annotations.
T
Tejun Heo 已提交
84
 */
T
Tejun Heo 已提交
85 86
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
87 88 89
DECLARE_RWSEM(css_set_rwsem);
EXPORT_SYMBOL_GPL(cgroup_mutex);
EXPORT_SYMBOL_GPL(css_set_rwsem);
T
Tejun Heo 已提交
90
#else
91
static DEFINE_MUTEX(cgroup_mutex);
92
static DECLARE_RWSEM(css_set_rwsem);
T
Tejun Heo 已提交
93 94
#endif

95
/*
96 97
 * Protects cgroup_idr and css_idr so that IDs can be released without
 * grabbing cgroup_mutex.
98 99 100
 */
static DEFINE_SPINLOCK(cgroup_idr_lock);

101 102 103 104 105
/*
 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
 */
static DEFINE_SPINLOCK(release_agent_path_lock);
106

107 108
struct percpu_rw_semaphore cgroup_threadgroup_rwsem;

T
Tejun Heo 已提交
109
#define cgroup_assert_mutex_or_rcu_locked()				\
110 111
	rcu_lockdep_assert(rcu_read_lock_held() ||			\
			   lockdep_is_held(&cgroup_mutex),		\
T
Tejun Heo 已提交
112
			   "cgroup_mutex or RCU read lock required");
113

114 115 116 117 118 119 120 121
/*
 * cgroup destruction makes heavy use of work items and there can be a lot
 * of concurrent destructions.  Use a separate workqueue so that cgroup
 * destruction work items don't end up filling up max_active of system_wq
 * which may lead to deadlock.
 */
static struct workqueue_struct *cgroup_destroy_wq;

122 123 124 125 126 127
/*
 * pidlist destructions need to be flushed on cgroup destruction.  Use a
 * separate workqueue as flush domain.
 */
static struct workqueue_struct *cgroup_pidlist_destroy_wq;

T
Tejun Heo 已提交
128
/* generate an array of cgroup subsystem pointers */
129
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
T
Tejun Heo 已提交
130
static struct cgroup_subsys *cgroup_subsys[] = {
131 132
#include <linux/cgroup_subsys.h>
};
133 134 135 136 137
#undef SUBSYS

/* array of cgroup subsystem names */
#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
static const char *cgroup_subsys_name[] = {
138 139
#include <linux/cgroup_subsys.h>
};
140
#undef SUBSYS
141 142

/*
143
 * The default hierarchy, reserved for the subsystems that are otherwise
144 145
 * unattached - it never has more than a single cgroup, and all tasks are
 * part of that cgroup.
146
 */
T
Tejun Heo 已提交
147
struct cgroup_root cgrp_dfl_root;
T
Tejun Heo 已提交
148
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
149

T
Tejun Heo 已提交
150 151 152 153 154
/*
 * The default hierarchy always exists but is hidden until mounted for the
 * first time.  This is for backward compatibility.
 */
static bool cgrp_dfl_root_visible;
155

156 157 158 159 160 161
/*
 * Set by the boot param of the same name and makes subsystems with NULL
 * ->dfl_files to use ->legacy_files on the default hierarchy.
 */
static bool cgroup_legacy_files_on_dfl;

162
/* some controllers are not supported in the default hierarchy */
163
static unsigned long cgrp_dfl_root_inhibit_ss_mask;
164

165 166
/* The list of hierarchy roots */

167 168
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
169

T
Tejun Heo 已提交
170
/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
171
static DEFINE_IDR(cgroup_hierarchy_idr);
172

173
/*
174 175 176 177 178
 * Assign a monotonically increasing serial number to csses.  It guarantees
 * cgroups with bigger numbers are newer than those with smaller numbers.
 * Also, as csses are always appended to the parent's ->children list, it
 * guarantees that sibling csses are always sorted in the ascending serial
 * number order on the list.  Protected by cgroup_mutex.
179
 */
180
static u64 css_serial_nr_next = 1;
181

182 183 184 185
/*
 * These bitmask flags indicate whether tasks in the fork and exit paths have
 * fork/exit handlers to call. This avoids us having to do extra work in the
 * fork/exit path to check which subsystems have fork/exit callbacks.
186
 */
187 188
static unsigned long have_fork_callback __read_mostly;
static unsigned long have_exit_callback __read_mostly;
189

190 191
static struct cftype cgroup_dfl_base_files[];
static struct cftype cgroup_legacy_base_files[];
192

193
static int rebind_subsystems(struct cgroup_root *dst_root,
194
			     unsigned long ss_mask);
195
static int cgroup_destroy_locked(struct cgroup *cgrp);
196 197
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible);
198
static void css_release(struct percpu_ref *ref);
199
static void kill_css(struct cgroup_subsys_state *css);
200 201
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add);
202

203 204 205 206 207 208 209
/* IDR wrappers which synchronize using cgroup_idr_lock */
static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
			    gfp_t gfp_mask)
{
	int ret;

	idr_preload(gfp_mask);
T
Tejun Heo 已提交
210
	spin_lock_bh(&cgroup_idr_lock);
211
	ret = idr_alloc(idr, ptr, start, end, gfp_mask);
T
Tejun Heo 已提交
212
	spin_unlock_bh(&cgroup_idr_lock);
213 214 215 216 217 218 219 220
	idr_preload_end();
	return ret;
}

static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
{
	void *ret;

T
Tejun Heo 已提交
221
	spin_lock_bh(&cgroup_idr_lock);
222
	ret = idr_replace(idr, ptr, id);
T
Tejun Heo 已提交
223
	spin_unlock_bh(&cgroup_idr_lock);
224 225 226 227 228
	return ret;
}

static void cgroup_idr_remove(struct idr *idr, int id)
{
T
Tejun Heo 已提交
229
	spin_lock_bh(&cgroup_idr_lock);
230
	idr_remove(idr, id);
T
Tejun Heo 已提交
231
	spin_unlock_bh(&cgroup_idr_lock);
232 233
}

T
Tejun Heo 已提交
234 235 236 237 238 239 240 241 242
static struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
	struct cgroup_subsys_state *parent_css = cgrp->self.parent;

	if (parent_css)
		return container_of(parent_css, struct cgroup, self);
	return NULL;
}

T
Tejun Heo 已提交
243 244 245
/**
 * cgroup_css - obtain a cgroup's css for the specified subsystem
 * @cgrp: the cgroup of interest
246
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
T
Tejun Heo 已提交
247
 *
248 249 250 251 252
 * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
 * function must be called either under cgroup_mutex or rcu_read_lock() and
 * the caller is responsible for pinning the returned css if it wants to
 * keep accessing it outside the said locks.  This function may return
 * %NULL if @cgrp doesn't have @subsys_id enabled.
T
Tejun Heo 已提交
253 254
 */
static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
255
					      struct cgroup_subsys *ss)
T
Tejun Heo 已提交
256
{
257
	if (ss)
258
		return rcu_dereference_check(cgrp->subsys[ss->id],
T
Tejun Heo 已提交
259
					lockdep_is_held(&cgroup_mutex));
260
	else
261
		return &cgrp->self;
T
Tejun Heo 已提交
262
}
263

264 265 266
/**
 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
267
 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
268
 *
C
Chen Hanxiao 已提交
269
 * Similar to cgroup_css() but returns the effective css, which is defined
270 271 272 273 274 275 276 277 278 279
 * as the matching css of the nearest ancestor including self which has @ss
 * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
 * function is guaranteed to return non-NULL css.
 */
static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
						struct cgroup_subsys *ss)
{
	lockdep_assert_held(&cgroup_mutex);

	if (!ss)
280
		return &cgrp->self;
281 282 283 284

	if (!(cgrp->root->subsys_mask & (1 << ss->id)))
		return NULL;

T
Tejun Heo 已提交
285 286 287 288
	/*
	 * This function is used while updating css associations and thus
	 * can't test the csses directly.  Use ->child_subsys_mask.
	 */
T
Tejun Heo 已提交
289 290 291
	while (cgroup_parent(cgrp) &&
	       !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
		cgrp = cgroup_parent(cgrp);
292 293

	return cgroup_css(cgrp, ss);
T
Tejun Heo 已提交
294
}
295

T
Tejun Heo 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
/**
 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
 * @cgrp: the cgroup of interest
 * @ss: the subsystem of interest
 *
 * Find and get the effective css of @cgrp for @ss.  The effective css is
 * defined as the matching css of the nearest ancestor including self which
 * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
 * the root css is returned, so this function always returns a valid css.
 * The returned css must be put using css_put().
 */
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
					     struct cgroup_subsys *ss)
{
	struct cgroup_subsys_state *css;

	rcu_read_lock();

	do {
		css = cgroup_css(cgrp, ss);

		if (css && css_tryget_online(css))
			goto out_unlock;
		cgrp = cgroup_parent(cgrp);
	} while (cgrp);

	css = init_css_set.subsys[ss->id];
	css_get(css);
out_unlock:
	rcu_read_unlock();
	return css;
}

329
/* convenient tests for these bits */
330
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
331
{
332
	return !(cgrp->self.flags & CSS_ONLINE);
333 334
}

T
Tejun Heo 已提交
335
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
336
{
T
Tejun Heo 已提交
337
	struct cgroup *cgrp = of->kn->parent->priv;
T
Tejun Heo 已提交
338
	struct cftype *cft = of_cft(of);
T
Tejun Heo 已提交
339 340 341 342 343 344 345 346 347 348 349 350

	/*
	 * This is open and unprotected implementation of cgroup_css().
	 * seq_css() is only called from a kernfs file operation which has
	 * an active reference on the file.  Because all the subsystem
	 * files are drained before a css is disassociated with a cgroup,
	 * the matching css from the cgroup's subsys table is guaranteed to
	 * be and stay valid until the enclosing operation is complete.
	 */
	if (cft->ss)
		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
	else
351
		return &cgrp->self;
352
}
T
Tejun Heo 已提交
353
EXPORT_SYMBOL_GPL(of_css);
354

355 356 357 358 359 360 361 362 363 364 365 366 367 368
/**
 * cgroup_is_descendant - test ancestry
 * @cgrp: the cgroup to be tested
 * @ancestor: possible ancestor of @cgrp
 *
 * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
 * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
 * and @ancestor are accessible.
 */
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
{
	while (cgrp) {
		if (cgrp == ancestor)
			return true;
T
Tejun Heo 已提交
369
		cgrp = cgroup_parent(cgrp);
370 371 372
	}
	return false;
}
373

374
static int notify_on_release(const struct cgroup *cgrp)
375
{
376
	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
377 378
}

T
Tejun Heo 已提交
379 380 381 382 383 384
/**
 * for_each_css - iterate all css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
385
 * Should be called under cgroup_[tree_]mutex.
T
Tejun Heo 已提交
386 387 388 389 390 391 392 393
 */
#define for_each_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = rcu_dereference_check(			\
				(cgrp)->subsys[(ssid)],			\
				lockdep_is_held(&cgroup_mutex)))) { }	\
		else

394 395 396 397 398 399 400 401 402 403 404 405 406 407
/**
 * for_each_e_css - iterate all effective css's of a cgroup
 * @css: the iteration cursor
 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
 * @cgrp: the target cgroup to iterate css's of
 *
 * Should be called under cgroup_[tree_]mutex.
 */
#define for_each_e_css(css, ssid, cgrp)					\
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
		if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
			;						\
		else

408
/**
T
Tejun Heo 已提交
409
 * for_each_subsys - iterate all enabled cgroup subsystems
410
 * @ss: the iteration cursor
411
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
412
 */
413
#define for_each_subsys(ss, ssid)					\
T
Tejun Heo 已提交
414 415
	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
416

417 418 419 420 421 422 423 424 425 426 427
/**
 * for_each_subsys_which - filter for_each_subsys with a bitmask
 * @ss: the iteration cursor
 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
 * @ss_maskp: a pointer to the bitmask
 *
 * The block will only run for cases where the ssid-th bit (1 << ssid) of
 * mask is set to 1.
 */
#define for_each_subsys_which(ss, ssid, ss_maskp)			\
	if (!CGROUP_SUBSYS_COUNT) /* to avoid spurious gcc warning */	\
428
		(ssid) = 0;						\
429 430 431 432 433 434
	else								\
		for_each_set_bit(ssid, ss_maskp, CGROUP_SUBSYS_COUNT)	\
			if (((ss) = cgroup_subsys[ssid]) && false)	\
				break;					\
			else

435 436
/* iterate across the hierarchies */
#define for_each_root(root)						\
437
	list_for_each_entry((root), &cgroup_roots, root_list)
438

439 440
/* iterate over child cgrps, lock should be held throughout iteration */
#define cgroup_for_each_live_child(child, cgrp)				\
441
	list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
T
Tejun Heo 已提交
442
		if (({ lockdep_assert_held(&cgroup_mutex);		\
443 444 445
		       cgroup_is_dead(child); }))			\
			;						\
		else
446

447
static void cgroup_release_agent(struct work_struct *work);
448
static void check_for_release(struct cgroup *cgrp);
449

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
/*
 * A cgroup can be associated with multiple css_sets as different tasks may
 * belong to different cgroups on different hierarchies.  In the other
 * direction, a css_set is naturally associated with multiple cgroups.
 * This M:N relationship is represented by the following link structure
 * which exists for each association and allows traversing the associations
 * from both sides.
 */
struct cgrp_cset_link {
	/* the cgroup and css_set this link associates */
	struct cgroup		*cgrp;
	struct css_set		*cset;

	/* list of cgrp_cset_links anchored at cgrp->cset_links */
	struct list_head	cset_link;

	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
	struct list_head	cgrp_link;
468 469
};

470 471
/*
 * The default css_set - used by init and its children prior to any
472 473 474 475 476
 * hierarchies being mounted. It contains a pointer to the root state
 * for each subsystem. Also used to anchor the list of css_sets. Not
 * reference-counted, to improve performance when child cgroups
 * haven't been created.
 */
T
Tejun Heo 已提交
477
struct css_set init_css_set = {
478 479 480 481 482 483 484
	.refcount		= ATOMIC_INIT(1),
	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
};
485

486
static int css_set_count	= 1;	/* 1 for init_css_set */
487

488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
/**
 * cgroup_update_populated - updated populated count of a cgroup
 * @cgrp: the target cgroup
 * @populated: inc or dec populated count
 *
 * @cgrp is either getting the first task (css_set) or losing the last.
 * Update @cgrp->populated_cnt accordingly.  The count is propagated
 * towards root so that a given cgroup's populated_cnt is zero iff the
 * cgroup and all its descendants are empty.
 *
 * @cgrp's interface file "cgroup.populated" is zero if
 * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
 * changes from or to zero, userland is notified that the content of the
 * interface file has changed.  This can be used to detect when @cgrp and
 * its descendants become populated or empty.
 */
static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
{
	lockdep_assert_held(&css_set_rwsem);

	do {
		bool trigger;

		if (populated)
			trigger = !cgrp->populated_cnt++;
		else
			trigger = !--cgrp->populated_cnt;

		if (!trigger)
			break;

		if (cgrp->populated_kn)
			kernfs_notify(cgrp->populated_kn);
T
Tejun Heo 已提交
521
		cgrp = cgroup_parent(cgrp);
522 523 524
	} while (cgrp);
}

525 526 527 528 529
/*
 * hash table for cgroup groups. This improves the performance to find
 * an existing css_set. This hash doesn't (currently) take into
 * account cgroups in empty hierarchies.
 */
530
#define CSS_SET_HASH_BITS	7
531
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
532

533
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
534
{
535
	unsigned long key = 0UL;
536 537
	struct cgroup_subsys *ss;
	int i;
538

539
	for_each_subsys(ss, i)
540 541
		key += (unsigned long)css[i];
	key = (key >> 16) ^ key;
542

543
	return key;
544 545
}

Z
Zefan Li 已提交
546
static void put_css_set_locked(struct css_set *cset)
547
{
548
	struct cgrp_cset_link *link, *tmp_link;
T
Tejun Heo 已提交
549 550
	struct cgroup_subsys *ss;
	int ssid;
551

552 553 554
	lockdep_assert_held(&css_set_rwsem);

	if (!atomic_dec_and_test(&cset->refcount))
555
		return;
556

557
	/* This css_set is dead. unlink it and release cgroup refcounts */
T
Tejun Heo 已提交
558 559
	for_each_subsys(ss, ssid)
		list_del(&cset->e_cset_node[ssid]);
560
	hash_del(&cset->hlist);
561 562
	css_set_count--;

563
	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
564
		struct cgroup *cgrp = link->cgrp;
565

566 567
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
L
Li Zefan 已提交
568

569
		/* @cgrp can't go away while we're holding css_set_rwsem */
570 571
		if (list_empty(&cgrp->cset_links)) {
			cgroup_update_populated(cgrp, false);
Z
Zefan Li 已提交
572
			check_for_release(cgrp);
573
		}
574 575

		kfree(link);
576
	}
577

578
	kfree_rcu(cset, rcu_head);
579 580
}

Z
Zefan Li 已提交
581
static void put_css_set(struct css_set *cset)
582 583 584 585 586 587 588 589 590 591
{
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * rwlock
	 */
	if (atomic_add_unless(&cset->refcount, -1, 1))
		return;

	down_write(&css_set_rwsem);
Z
Zefan Li 已提交
592
	put_css_set_locked(cset);
593 594 595
	up_write(&css_set_rwsem);
}

596 597 598
/*
 * refcounted get/put for css_set objects
 */
599
static inline void get_css_set(struct css_set *cset)
600
{
601
	atomic_inc(&cset->refcount);
602 603
}

604
/**
605
 * compare_css_sets - helper function for find_existing_css_set().
606 607
 * @cset: candidate css_set being tested
 * @old_cset: existing css_set for a task
608 609 610
 * @new_cgrp: cgroup that's being entered by the task
 * @template: desired set of css pointers in css_set (pre-calculated)
 *
L
Li Zefan 已提交
611
 * Returns true if "cset" matches "old_cset" except for the hierarchy
612 613
 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
 */
614 615
static bool compare_css_sets(struct css_set *cset,
			     struct css_set *old_cset,
616 617 618 619 620
			     struct cgroup *new_cgrp,
			     struct cgroup_subsys_state *template[])
{
	struct list_head *l1, *l2;

621 622 623 624 625 626
	/*
	 * On the default hierarchy, there can be csets which are
	 * associated with the same set of cgroups but different csses.
	 * Let's first ensure that csses match.
	 */
	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
627 628 629 630
		return false;

	/*
	 * Compare cgroup pointers in order to distinguish between
631 632 633
	 * different cgroups in hierarchies.  As different cgroups may
	 * share the same effective css, this comparison is always
	 * necessary.
634
	 */
635 636
	l1 = &cset->cgrp_links;
	l2 = &old_cset->cgrp_links;
637
	while (1) {
638
		struct cgrp_cset_link *link1, *link2;
639
		struct cgroup *cgrp1, *cgrp2;
640 641 642 643

		l1 = l1->next;
		l2 = l2->next;
		/* See if we reached the end - both lists are equal length. */
644 645
		if (l1 == &cset->cgrp_links) {
			BUG_ON(l2 != &old_cset->cgrp_links);
646 647
			break;
		} else {
648
			BUG_ON(l2 == &old_cset->cgrp_links);
649 650
		}
		/* Locate the cgroups associated with these links. */
651 652 653 654
		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
		cgrp1 = link1->cgrp;
		cgrp2 = link2->cgrp;
655
		/* Hierarchies should be linked in the same order. */
656
		BUG_ON(cgrp1->root != cgrp2->root);
657 658 659 660 661 662 663 664

		/*
		 * If this hierarchy is the hierarchy of the cgroup
		 * that's changing, then we need to check that this
		 * css_set points to the new cgroup; if it's any other
		 * hierarchy, then this css_set should point to the
		 * same cgroup as the old css_set.
		 */
665 666
		if (cgrp1->root == new_cgrp->root) {
			if (cgrp1 != new_cgrp)
667 668
				return false;
		} else {
669
			if (cgrp1 != cgrp2)
670 671 672 673 674 675
				return false;
		}
	}
	return true;
}

676 677 678 679 680
/**
 * find_existing_css_set - init css array and find the matching css_set
 * @old_cset: the css_set that we're using before the cgroup transition
 * @cgrp: the cgroup that we're moving into
 * @template: out param for the new set of csses, should be clear on entry
681
 */
682 683 684
static struct css_set *find_existing_css_set(struct css_set *old_cset,
					struct cgroup *cgrp,
					struct cgroup_subsys_state *template[])
685
{
686
	struct cgroup_root *root = cgrp->root;
687
	struct cgroup_subsys *ss;
688
	struct css_set *cset;
689
	unsigned long key;
690
	int i;
691

B
Ben Blum 已提交
692 693 694 695 696
	/*
	 * Build the set of subsystem state objects that we want to see in the
	 * new css_set. while subsystems can change globally, the entries here
	 * won't change, so no need for locking.
	 */
697
	for_each_subsys(ss, i) {
698
		if (root->subsys_mask & (1UL << i)) {
699 700 701 702 703
			/*
			 * @ss is in this hierarchy, so we want the
			 * effective css from @cgrp.
			 */
			template[i] = cgroup_e_css(cgrp, ss);
704
		} else {
705 706 707 708
			/*
			 * @ss is not in this hierarchy, so we don't want
			 * to change the css.
			 */
709
			template[i] = old_cset->subsys[i];
710 711 712
		}
	}

713
	key = css_set_hash(template);
714 715
	hash_for_each_possible(css_set_table, cset, hlist, key) {
		if (!compare_css_sets(cset, old_cset, cgrp, template))
716 717 718
			continue;

		/* This css_set matches what we need */
719
		return cset;
720
	}
721 722 723 724 725

	/* No existing cgroup group matched */
	return NULL;
}

726
static void free_cgrp_cset_links(struct list_head *links_to_free)
727
{
728
	struct cgrp_cset_link *link, *tmp_link;
729

730 731
	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
		list_del(&link->cset_link);
732 733 734 735
		kfree(link);
	}
}

736 737 738 739 740 741 742
/**
 * allocate_cgrp_cset_links - allocate cgrp_cset_links
 * @count: the number of links to allocate
 * @tmp_links: list_head the allocated links are put on
 *
 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
 * through ->cset_link.  Returns 0 on success or -errno.
743
 */
744
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
745
{
746
	struct cgrp_cset_link *link;
747
	int i;
748 749 750

	INIT_LIST_HEAD(tmp_links);

751
	for (i = 0; i < count; i++) {
752
		link = kzalloc(sizeof(*link), GFP_KERNEL);
753
		if (!link) {
754
			free_cgrp_cset_links(tmp_links);
755 756
			return -ENOMEM;
		}
757
		list_add(&link->cset_link, tmp_links);
758 759 760 761
	}
	return 0;
}

762 763
/**
 * link_css_set - a helper function to link a css_set to a cgroup
764
 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
765
 * @cset: the css_set to be linked
766 767
 * @cgrp: the destination cgroup
 */
768 769
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
			 struct cgroup *cgrp)
770
{
771
	struct cgrp_cset_link *link;
772

773
	BUG_ON(list_empty(tmp_links));
T
Tejun Heo 已提交
774 775 776 777

	if (cgroup_on_dfl(cgrp))
		cset->dfl_cgrp = cgrp;

778 779
	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
	link->cset = cset;
780
	link->cgrp = cgrp;
781 782 783

	if (list_empty(&cgrp->cset_links))
		cgroup_update_populated(cgrp, true);
784
	list_move(&link->cset_link, &cgrp->cset_links);
785

786 787 788 789
	/*
	 * Always add links to the tail of the list so that the list
	 * is sorted by order of hierarchy creation
	 */
790
	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
791 792
}

793 794 795 796 797 798 799
/**
 * find_css_set - return a new css_set with one cgroup updated
 * @old_cset: the baseline css_set
 * @cgrp: the cgroup to be updated
 *
 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
 * substituted into the appropriate hierarchy.
800
 */
801 802
static struct css_set *find_css_set(struct css_set *old_cset,
				    struct cgroup *cgrp)
803
{
804
	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
805
	struct css_set *cset;
806 807
	struct list_head tmp_links;
	struct cgrp_cset_link *link;
T
Tejun Heo 已提交
808
	struct cgroup_subsys *ss;
809
	unsigned long key;
T
Tejun Heo 已提交
810
	int ssid;
811

812 813
	lockdep_assert_held(&cgroup_mutex);

814 815
	/* First see if we already have a cgroup group that matches
	 * the desired set */
816
	down_read(&css_set_rwsem);
817 818 819
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
		get_css_set(cset);
820
	up_read(&css_set_rwsem);
821

822 823
	if (cset)
		return cset;
824

825
	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
826
	if (!cset)
827 828
		return NULL;

829
	/* Allocate all the cgrp_cset_link objects that we'll need */
830
	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
831
		kfree(cset);
832 833 834
		return NULL;
	}

835
	atomic_set(&cset->refcount, 1);
836
	INIT_LIST_HEAD(&cset->cgrp_links);
837
	INIT_LIST_HEAD(&cset->tasks);
T
Tejun Heo 已提交
838
	INIT_LIST_HEAD(&cset->mg_tasks);
839
	INIT_LIST_HEAD(&cset->mg_preload_node);
840
	INIT_LIST_HEAD(&cset->mg_node);
841
	INIT_HLIST_NODE(&cset->hlist);
842 843 844

	/* Copy the set of subsystem state objects generated in
	 * find_existing_css_set() */
845
	memcpy(cset->subsys, template, sizeof(cset->subsys));
846

847
	down_write(&css_set_rwsem);
848
	/* Add reference counts and links from the new css_set. */
849
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
850
		struct cgroup *c = link->cgrp;
851

852 853
		if (c->root == cgrp->root)
			c = cgrp;
854
		link_css_set(&tmp_links, cset, c);
855
	}
856

857
	BUG_ON(!list_empty(&tmp_links));
858 859

	css_set_count++;
860

T
Tejun Heo 已提交
861
	/* Add @cset to the hash table */
862 863
	key = css_set_hash(cset->subsys);
	hash_add(css_set_table, &cset->hlist, key);
864

T
Tejun Heo 已提交
865 866 867 868
	for_each_subsys(ss, ssid)
		list_add_tail(&cset->e_cset_node[ssid],
			      &cset->subsys[ssid]->cgroup->e_csets[ssid]);

869
	up_write(&css_set_rwsem);
870

871
	return cset;
872 873
}

874
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
875
{
876
	struct cgroup *root_cgrp = kf_root->kn->priv;
T
Tejun Heo 已提交
877

878
	return root_cgrp->root;
T
Tejun Heo 已提交
879 880
}

881
static int cgroup_init_root_id(struct cgroup_root *root)
882 883 884 885 886
{
	int id;

	lockdep_assert_held(&cgroup_mutex);

887
	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
888 889 890 891 892 893 894
	if (id < 0)
		return id;

	root->hierarchy_id = id;
	return 0;
}

895
static void cgroup_exit_root_id(struct cgroup_root *root)
896 897 898 899 900 901 902 903 904
{
	lockdep_assert_held(&cgroup_mutex);

	if (root->hierarchy_id) {
		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
		root->hierarchy_id = 0;
	}
}

905
static void cgroup_free_root(struct cgroup_root *root)
906 907
{
	if (root) {
C
Chen Hanxiao 已提交
908
		/* hierarchy ID should already have been released */
909 910 911 912 913 914 915
		WARN_ON_ONCE(root->hierarchy_id);

		idr_destroy(&root->cgroup_idr);
		kfree(root);
	}
}

916
static void cgroup_destroy_root(struct cgroup_root *root)
917
{
918
	struct cgroup *cgrp = &root->cgrp;
919 920
	struct cgrp_cset_link *link, *tmp_link;

T
Tejun Heo 已提交
921
	mutex_lock(&cgroup_mutex);
922

T
Tejun Heo 已提交
923
	BUG_ON(atomic_read(&root->nr_cgrps));
924
	BUG_ON(!list_empty(&cgrp->self.children));
925 926

	/* Rebind all subsystems back to the default hierarchy */
927
	rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
928 929

	/*
930 931
	 * Release all the links from cset_links to this hierarchy's
	 * root cgroup
932
	 */
933
	down_write(&css_set_rwsem);
934 935 936 937 938 939

	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
		kfree(link);
	}
940
	up_write(&css_set_rwsem);
941 942 943 944 945 946 947 948 949 950

	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
		cgroup_root_count--;
	}

	cgroup_exit_root_id(root);

	mutex_unlock(&cgroup_mutex);

T
Tejun Heo 已提交
951
	kernfs_destroy_root(root->kf_root);
952 953 954
	cgroup_free_root(root);
}

955 956
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
957
					    struct cgroup_root *root)
958 959 960
{
	struct cgroup *res = NULL;

961 962 963
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

964
	if (cset == &init_css_set) {
965
		res = &root->cgrp;
966
	} else {
967 968 969
		struct cgrp_cset_link *link;

		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
970
			struct cgroup *c = link->cgrp;
971

972 973 974 975 976 977
			if (c->root == root) {
				res = c;
				break;
			}
		}
	}
978

979 980 981 982
	BUG_ON(!res);
	return res;
}

983
/*
984 985 986 987
 * Return the cgroup for "task" from the given hierarchy. Must be
 * called with cgroup_mutex and css_set_rwsem held.
 */
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
988
					    struct cgroup_root *root)
989 990 991 992 993 994 995 996 997
{
	/*
	 * No need to lock the task - since we hold cgroup_mutex the
	 * task can't change groups, so the only thing that can happen
	 * is that it exits and its css is set back to init_css_set.
	 */
	return cset_cgroup_from_root(task_css_set(task), root);
}

998 999 1000 1001 1002 1003
/*
 * A task must hold cgroup_mutex to modify cgroups.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding cgroup_mutex can't rely on the count
 * field not changing.  However, if the count goes to zero, then only
1004
 * cgroup_attach_task() can increment it again.  Because a count of zero
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
 * means that no tasks are currently attached, therefore there is no
 * way a task attached to that cgroup can fork (the other way to
 * increment the count).  So code holding cgroup_mutex can safely
 * assume that if the count is zero, it will stay zero. Similarly, if
 * a task holds cgroup_mutex on a cgroup with zero count, it
 * knows that the cgroup won't be removed, as cgroup_rmdir()
 * needs that mutex.
 *
 * A cgroup can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cgroups is empty.  Since all
 * tasks in the system use _some_ cgroup, and since there is always at
1016
 * least one task in the system (init, pid == 1), therefore, root cgroup
1017
 * always has either children cgroups and/or using tasks.  So we don't
1018
 * need a special hack to ensure that root cgroup cannot be deleted.
1019 1020
 *
 * P.S.  One more locking exception.  RCU is used to guard the
1021
 * update of a tasks cgroup pointer by cgroup_attach_task()
1022 1023
 */

1024
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
T
Tejun Heo 已提交
1025
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1026
static const struct file_operations proc_cgroupstats_operations;
1027

T
Tejun Heo 已提交
1028 1029
static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
			      char *buf)
1030
{
T
Tejun Heo 已提交
1031 1032 1033 1034 1035 1036 1037
	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
			 cft->ss->name, cft->name);
	else
		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
	return buf;
1038 1039
}

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
 * returns cft->mode if ->mode is not 0
 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
 * returns S_IRUGO if it has only a read handler
 * returns S_IWUSR if it has only a write hander
 */
static umode_t cgroup_file_mode(const struct cftype *cft)
1050
{
1051
	umode_t mode = 0;
1052

1053 1054 1055 1056 1057 1058
	if (cft->mode)
		return cft->mode;

	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
		mode |= S_IRUGO;

1059
	if (cft->write_u64 || cft->write_s64 || cft->write)
1060 1061 1062
		mode |= S_IWUSR;

	return mode;
1063 1064
}

1065
static void cgroup_get(struct cgroup *cgrp)
1066
{
T
Tejun Heo 已提交
1067
	WARN_ON_ONCE(cgroup_is_dead(cgrp));
1068
	css_get(&cgrp->self);
1069 1070
}

1071 1072 1073 1074 1075
static bool cgroup_tryget(struct cgroup *cgrp)
{
	return css_tryget(&cgrp->self);
}

1076
static void cgroup_put(struct cgroup *cgrp)
1077
{
1078
	css_put(&cgrp->self);
1079 1080
}

1081
/**
1082
 * cgroup_calc_child_subsys_mask - calculate child_subsys_mask
1083
 * @cgrp: the target cgroup
1084
 * @subtree_control: the new subtree_control mask to consider
1085 1086 1087 1088 1089
 *
 * On the default hierarchy, a subsystem may request other subsystems to be
 * enabled together through its ->depends_on mask.  In such cases, more
 * subsystems than specified in "cgroup.subtree_control" may be enabled.
 *
1090 1091 1092
 * This function calculates which subsystems need to be enabled if
 * @subtree_control is to be applied to @cgrp.  The returned mask is always
 * a superset of @subtree_control and follows the usual hierarchy rules.
1093
 */
1094 1095
static unsigned long cgroup_calc_child_subsys_mask(struct cgroup *cgrp,
						  unsigned long subtree_control)
1096
{
1097
	struct cgroup *parent = cgroup_parent(cgrp);
1098
	unsigned long cur_ss_mask = subtree_control;
1099 1100 1101 1102 1103
	struct cgroup_subsys *ss;
	int ssid;

	lockdep_assert_held(&cgroup_mutex);

1104 1105
	if (!cgroup_on_dfl(cgrp))
		return cur_ss_mask;
1106 1107

	while (true) {
1108
		unsigned long new_ss_mask = cur_ss_mask;
1109

1110 1111
		for_each_subsys_which(ss, ssid, &cur_ss_mask)
			new_ss_mask |= ss->depends_on;
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127

		/*
		 * Mask out subsystems which aren't available.  This can
		 * happen only if some depended-upon subsystems were bound
		 * to non-default hierarchies.
		 */
		if (parent)
			new_ss_mask &= parent->child_subsys_mask;
		else
			new_ss_mask &= cgrp->root->subsys_mask;

		if (new_ss_mask == cur_ss_mask)
			break;
		cur_ss_mask = new_ss_mask;
	}

1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
	return cur_ss_mask;
}

/**
 * cgroup_refresh_child_subsys_mask - update child_subsys_mask
 * @cgrp: the target cgroup
 *
 * Update @cgrp->child_subsys_mask according to the current
 * @cgrp->subtree_control using cgroup_calc_child_subsys_mask().
 */
static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
{
	cgrp->child_subsys_mask =
		cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control);
1142 1143
}

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
/**
 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper undoes cgroup_kn_lock_live() and should be invoked before
 * the method finishes if locking succeeded.  Note that once this function
 * returns the cgroup returned by cgroup_kn_lock_live() may become
 * inaccessible any time.  If the caller intends to continue to access the
 * cgroup, it should pin it before invoking this function.
 */
static void cgroup_kn_unlock(struct kernfs_node *kn)
1155
{
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;

	mutex_unlock(&cgroup_mutex);

	kernfs_unbreak_active_protection(kn);
	cgroup_put(cgrp);
1167 1168
}

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
/**
 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
 * @kn: the kernfs_node being serviced
 *
 * This helper is to be used by a cgroup kernfs method currently servicing
 * @kn.  It breaks the active protection, performs cgroup locking and
 * verifies that the associated cgroup is alive.  Returns the cgroup if
 * alive; otherwise, %NULL.  A successful return should be undone by a
 * matching cgroup_kn_unlock() invocation.
 *
 * Any cgroup kernfs method implementation which requires locking the
 * associated cgroup should use this helper.  It avoids nesting cgroup
 * locking under kernfs active protection and allows all kernfs operations
 * including self-removal.
 */
static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
T
Tejun Heo 已提交
1185
{
1186 1187 1188 1189 1190 1191
	struct cgroup *cgrp;

	if (kernfs_type(kn) == KERNFS_DIR)
		cgrp = kn->priv;
	else
		cgrp = kn->parent->priv;
T
Tejun Heo 已提交
1192

1193
	/*
1194
	 * We're gonna grab cgroup_mutex which nests outside kernfs
1195 1196 1197
	 * active_ref.  cgroup liveliness check alone provides enough
	 * protection against removal.  Ensure @cgrp stays accessible and
	 * break the active_ref protection.
1198
	 */
1199 1200
	if (!cgroup_tryget(cgrp))
		return NULL;
1201 1202
	kernfs_break_active_protection(kn);

T
Tejun Heo 已提交
1203
	mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1204

1205 1206 1207 1208 1209
	if (!cgroup_is_dead(cgrp))
		return cgrp;

	cgroup_kn_unlock(kn);
	return NULL;
1210
}
T
Tejun Heo 已提交
1211

1212
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
T
Tejun Heo 已提交
1213
{
T
Tejun Heo 已提交
1214
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
1215

1216
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
1217
	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
T
Tejun Heo 已提交
1218 1219
}

1220
/**
1221
 * cgroup_clear_dir - remove subsys files in a cgroup directory
1222
 * @cgrp: target cgroup
1223 1224
 * @subsys_mask: mask of the subsystem ids whose files should be removed
 */
1225
static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
T
Tejun Heo 已提交
1226
{
1227
	struct cgroup_subsys *ss;
1228
	int i;
T
Tejun Heo 已提交
1229

1230
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
1231
		struct cftype *cfts;
1232

1233
		if (!(subsys_mask & (1 << i)))
1234
			continue;
T
Tejun Heo 已提交
1235 1236
		list_for_each_entry(cfts, &ss->cfts, node)
			cgroup_addrm_files(cgrp, cfts, false);
1237
	}
1238 1239
}

1240 1241
static int rebind_subsystems(struct cgroup_root *dst_root,
			     unsigned long ss_mask)
1242
{
1243
	struct cgroup_subsys *ss;
1244
	unsigned long tmp_ss_mask;
T
Tejun Heo 已提交
1245
	int ssid, i, ret;
1246

T
Tejun Heo 已提交
1247
	lockdep_assert_held(&cgroup_mutex);
1248

1249
	for_each_subsys_which(ss, ssid, &ss_mask) {
1250 1251
		/* if @ss has non-root csses attached to it, can't move */
		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
T
Tejun Heo 已提交
1252
			return -EBUSY;
1253

1254
		/* can't move between two non-dummy roots either */
1255
		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1256
			return -EBUSY;
1257 1258
	}

1259 1260 1261 1262 1263 1264
	/* skip creating root files on dfl_root for inhibited subsystems */
	tmp_ss_mask = ss_mask;
	if (dst_root == &cgrp_dfl_root)
		tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;

	ret = cgroup_populate_dir(&dst_root->cgrp, tmp_ss_mask);
T
Tejun Heo 已提交
1265 1266
	if (ret) {
		if (dst_root != &cgrp_dfl_root)
1267
			return ret;
1268

T
Tejun Heo 已提交
1269 1270 1271 1272 1273 1274 1275
		/*
		 * Rebinding back to the default root is not allowed to
		 * fail.  Using both default and non-default roots should
		 * be rare.  Moving subsystems back and forth even more so.
		 * Just warn about it and continue.
		 */
		if (cgrp_dfl_root_visible) {
1276
			pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
1277
				ret, ss_mask);
1278
			pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
T
Tejun Heo 已提交
1279
		}
1280
	}
1281 1282 1283 1284 1285

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
1286 1287
	for_each_subsys_which(ss, ssid, &ss_mask)
		cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
1288

1289
	for_each_subsys_which(ss, ssid, &ss_mask) {
1290
		struct cgroup_root *src_root;
1291
		struct cgroup_subsys_state *css;
T
Tejun Heo 已提交
1292
		struct css_set *cset;
1293

1294
		src_root = ss->root;
1295
		css = cgroup_css(&src_root->cgrp, ss);
1296

1297
		WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
1298

1299 1300
		RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
		rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
1301
		ss->root = dst_root;
1302
		css->cgroup = &dst_root->cgrp;
1303

T
Tejun Heo 已提交
1304 1305 1306 1307 1308 1309
		down_write(&css_set_rwsem);
		hash_for_each(css_set_table, i, cset, hlist)
			list_move_tail(&cset->e_cset_node[ss->id],
				       &dst_root->cgrp.e_csets[ss->id]);
		up_write(&css_set_rwsem);

1310
		src_root->subsys_mask &= ~(1 << ssid);
1311 1312
		src_root->cgrp.subtree_control &= ~(1 << ssid);
		cgroup_refresh_child_subsys_mask(&src_root->cgrp);
1313

1314
		/* default hierarchy doesn't enable controllers by default */
1315
		dst_root->subsys_mask |= 1 << ssid;
1316 1317 1318 1319
		if (dst_root != &cgrp_dfl_root) {
			dst_root->cgrp.subtree_control |= 1 << ssid;
			cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
		}
1320

1321 1322
		if (ss->bind)
			ss->bind(css);
1323 1324
	}

T
Tejun Heo 已提交
1325
	kernfs_activate(dst_root->cgrp.kn);
1326 1327 1328
	return 0;
}

T
Tejun Heo 已提交
1329 1330
static int cgroup_show_options(struct seq_file *seq,
			       struct kernfs_root *kf_root)
1331
{
1332
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1333
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
1334
	int ssid;
1335

T
Tejun Heo 已提交
1336
	for_each_subsys(ss, ssid)
1337
		if (root->subsys_mask & (1 << ssid))
T
Tejun Heo 已提交
1338
			seq_printf(seq, ",%s", ss->name);
1339
	if (root->flags & CGRP_ROOT_NOPREFIX)
1340
		seq_puts(seq, ",noprefix");
1341
	if (root->flags & CGRP_ROOT_XATTR)
A
Aristeu Rozanski 已提交
1342
		seq_puts(seq, ",xattr");
1343 1344

	spin_lock(&release_agent_path_lock);
1345 1346
	if (strlen(root->release_agent_path))
		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
1347 1348
	spin_unlock(&release_agent_path_lock);

1349
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1350
		seq_puts(seq, ",clone_children");
1351 1352
	if (strlen(root->name))
		seq_printf(seq, ",name=%s", root->name);
1353 1354 1355 1356
	return 0;
}

struct cgroup_sb_opts {
1357
	unsigned long subsys_mask;
1358
	unsigned int flags;
1359
	char *release_agent;
1360
	bool cpuset_clone_children;
1361
	char *name;
1362 1363
	/* User explicitly requested empty subsystem */
	bool none;
1364 1365
};

B
Ben Blum 已提交
1366
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1367
{
1368 1369
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
1370
	unsigned long mask = -1UL;
1371
	struct cgroup_subsys *ss;
1372
	int nr_opts = 0;
1373
	int i;
1374 1375

#ifdef CONFIG_CPUSETS
1376
	mask = ~(1U << cpuset_cgrp_id);
1377
#endif
1378

1379
	memset(opts, 0, sizeof(*opts));
1380 1381

	while ((token = strsep(&o, ",")) != NULL) {
1382 1383
		nr_opts++;

1384 1385
		if (!*token)
			return -EINVAL;
1386
		if (!strcmp(token, "none")) {
1387 1388
			/* Explicitly have no subsystems */
			opts->none = true;
1389 1390 1391 1392 1393 1394 1395 1396 1397
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
1398 1399 1400 1401
		if (!strcmp(token, "__DEVEL__sane_behavior")) {
			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
			continue;
		}
1402
		if (!strcmp(token, "noprefix")) {
1403
			opts->flags |= CGRP_ROOT_NOPREFIX;
1404 1405 1406
			continue;
		}
		if (!strcmp(token, "clone_children")) {
1407
			opts->cpuset_clone_children = true;
1408 1409
			continue;
		}
A
Aristeu Rozanski 已提交
1410
		if (!strcmp(token, "xattr")) {
1411
			opts->flags |= CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
1412 1413
			continue;
		}
1414
		if (!strncmp(token, "release_agent=", 14)) {
1415 1416 1417
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
1418
			opts->release_agent =
1419
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1420 1421
			if (!opts->release_agent)
				return -ENOMEM;
1422 1423 1424
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
1442
					      MAX_CGROUP_ROOT_NAMELEN - 1,
1443 1444 1445
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
1446 1447 1448 1449

			continue;
		}

1450
		for_each_subsys(ss, i) {
1451 1452 1453 1454 1455 1456 1457 1458
			if (strcmp(token, ss->name))
				continue;
			if (ss->disabled)
				continue;

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
1459
			opts->subsys_mask |= (1 << i);
1460 1461 1462 1463 1464 1465 1466 1467
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

1468
	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
1469
		pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
1470 1471
		if (nr_opts != 1) {
			pr_err("sane_behavior: no other mount options allowed\n");
1472 1473
			return -EINVAL;
		}
1474
		return 0;
1475 1476
	}

1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
	/*
	 * If the 'all' option was specified select all the subsystems,
	 * otherwise if 'none', 'name=' and a subsystem name options were
	 * not specified, let's default to 'all'
	 */
	if (all_ss || (!one_ss && !opts->none && !opts->name))
		for_each_subsys(ss, i)
			if (!ss->disabled)
				opts->subsys_mask |= (1 << i);

	/*
	 * We either have to specify by name or by subsystems. (So all
	 * empty hierarchies must have a name).
	 */
	if (!opts->subsys_mask && !opts->name)
		return -EINVAL;

1494 1495 1496 1497 1498
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
1499
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1500 1501
		return -EINVAL;

1502
	/* Can't specify "none" and some subsystems */
1503
	if (opts->subsys_mask && opts->none)
1504 1505
		return -EINVAL;

1506 1507 1508
	return 0;
}

T
Tejun Heo 已提交
1509
static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1510 1511
{
	int ret = 0;
1512
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1513
	struct cgroup_sb_opts opts;
1514
	unsigned long added_mask, removed_mask;
1515

1516 1517
	if (root == &cgrp_dfl_root) {
		pr_err("remount is not allowed\n");
1518 1519 1520
		return -EINVAL;
	}

1521 1522 1523 1524 1525 1526 1527
	mutex_lock(&cgroup_mutex);

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

1528
	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1529
		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1530
			task_tgid_nr(current), current->comm);
1531

1532 1533
	added_mask = opts.subsys_mask & ~root->subsys_mask;
	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1534

B
Ben Blum 已提交
1535
	/* Don't allow flags or name to change at remount */
T
Tejun Heo 已提交
1536
	if ((opts.flags ^ root->flags) ||
B
Ben Blum 已提交
1537
	    (opts.name && strcmp(opts.name, root->name))) {
1538
		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
T
Tejun Heo 已提交
1539
		       opts.flags, opts.name ?: "", root->flags, root->name);
1540 1541 1542 1543
		ret = -EINVAL;
		goto out_unlock;
	}

1544
	/* remounting is not allowed for populated hierarchies */
1545
	if (!list_empty(&root->cgrp.self.children)) {
1546
		ret = -EBUSY;
1547
		goto out_unlock;
B
Ben Blum 已提交
1548
	}
1549

1550
	ret = rebind_subsystems(root, added_mask);
1551
	if (ret)
1552
		goto out_unlock;
1553

1554
	rebind_subsystems(&cgrp_dfl_root, removed_mask);
1555

1556 1557
	if (opts.release_agent) {
		spin_lock(&release_agent_path_lock);
1558
		strcpy(root->release_agent_path, opts.release_agent);
1559 1560
		spin_unlock(&release_agent_path_lock);
	}
1561
 out_unlock:
1562
	kfree(opts.release_agent);
1563
	kfree(opts.name);
1564 1565 1566 1567
	mutex_unlock(&cgroup_mutex);
	return ret;
}

1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
/*
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
 * words after the first mount.
 */
static bool use_task_css_set_links __read_mostly;

static void cgroup_enable_task_cg_lists(void)
{
	struct task_struct *p, *g;

1580
	down_write(&css_set_rwsem);
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602

	if (use_task_css_set_links)
		goto out_unlock;

	use_task_css_set_links = true;

	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
	do_each_thread(g, p) {
		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
			     task_css_set(p) != &init_css_set);

		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
1603 1604
		 * Do it while holding siglock so that we don't end up
		 * racing against cgroup_exit().
1605
		 */
1606
		spin_lock_irq(&p->sighand->siglock);
1607 1608 1609 1610 1611 1612
		if (!(p->flags & PF_EXITING)) {
			struct css_set *cset = task_css_set(p);

			list_add(&p->cg_list, &cset->tasks);
			get_css_set(cset);
		}
1613
		spin_unlock_irq(&p->sighand->siglock);
1614 1615 1616
	} while_each_thread(g, p);
	read_unlock(&tasklist_lock);
out_unlock:
1617
	up_write(&css_set_rwsem);
1618
}
1619

1620 1621
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
T
Tejun Heo 已提交
1622 1623 1624
	struct cgroup_subsys *ss;
	int ssid;

1625 1626
	INIT_LIST_HEAD(&cgrp->self.sibling);
	INIT_LIST_HEAD(&cgrp->self.children);
1627
	INIT_LIST_HEAD(&cgrp->cset_links);
1628 1629
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
1630
	cgrp->self.cgroup = cgrp;
1631
	cgrp->self.flags |= CSS_ONLINE;
T
Tejun Heo 已提交
1632 1633 1634

	for_each_subsys(ss, ssid)
		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1635 1636

	init_waitqueue_head(&cgrp->offline_waitq);
1637
	INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
1638
}
1639

1640
static void init_cgroup_root(struct cgroup_root *root,
1641
			     struct cgroup_sb_opts *opts)
1642
{
1643
	struct cgroup *cgrp = &root->cgrp;
1644

1645
	INIT_LIST_HEAD(&root->root_list);
1646
	atomic_set(&root->nr_cgrps, 1);
1647
	cgrp->root = root;
1648
	init_cgroup_housekeeping(cgrp);
1649
	idr_init(&root->cgroup_idr);
1650 1651 1652 1653 1654 1655

	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
1656
	if (opts->cpuset_clone_children)
1657
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1658 1659
}

1660
static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
1661
{
1662
	LIST_HEAD(tmp_links);
1663
	struct cgroup *root_cgrp = &root->cgrp;
1664
	struct cftype *base_files;
1665 1666
	struct css_set *cset;
	int i, ret;
1667

1668
	lockdep_assert_held(&cgroup_mutex);
1669

1670
	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
1671
	if (ret < 0)
T
Tejun Heo 已提交
1672
		goto out;
1673
	root_cgrp->id = ret;
1674

1675 1676
	ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
			      GFP_KERNEL);
1677 1678 1679
	if (ret)
		goto out;

1680
	/*
1681
	 * We're accessing css_set_count without locking css_set_rwsem here,
1682 1683 1684 1685 1686 1687
	 * but that's OK - it can only be increased by someone holding
	 * cgroup_lock, and that's us. The worst that can happen is that we
	 * have some link structures left over
	 */
	ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
	if (ret)
1688
		goto cancel_ref;
1689

1690
	ret = cgroup_init_root_id(root);
1691
	if (ret)
1692
		goto cancel_ref;
1693

T
Tejun Heo 已提交
1694 1695 1696 1697 1698 1699 1700 1701
	root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
					   KERNFS_ROOT_CREATE_DEACTIVATED,
					   root_cgrp);
	if (IS_ERR(root->kf_root)) {
		ret = PTR_ERR(root->kf_root);
		goto exit_root_id;
	}
	root_cgrp->kn = root->kf_root->kn;
1702

1703 1704 1705 1706 1707 1708
	if (root == &cgrp_dfl_root)
		base_files = cgroup_dfl_base_files;
	else
		base_files = cgroup_legacy_base_files;

	ret = cgroup_addrm_files(root_cgrp, base_files, true);
1709
	if (ret)
T
Tejun Heo 已提交
1710
		goto destroy_root;
1711

1712
	ret = rebind_subsystems(root, ss_mask);
1713
	if (ret)
T
Tejun Heo 已提交
1714
		goto destroy_root;
1715

1716 1717 1718 1719 1720 1721 1722
	/*
	 * There must be no failure case after here, since rebinding takes
	 * care of subsystems' refcounts, which are explicitly dropped in
	 * the failure exit path.
	 */
	list_add(&root->root_list, &cgroup_roots);
	cgroup_root_count++;
A
Al Viro 已提交
1723

1724
	/*
1725
	 * Link the root cgroup in this hierarchy into all the css_set
1726 1727
	 * objects.
	 */
1728
	down_write(&css_set_rwsem);
1729 1730
	hash_for_each(css_set_table, i, cset, hlist)
		link_css_set(&tmp_links, cset, root_cgrp);
1731
	up_write(&css_set_rwsem);
1732

1733
	BUG_ON(!list_empty(&root_cgrp->self.children));
1734
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1735

T
Tejun Heo 已提交
1736
	kernfs_activate(root_cgrp->kn);
1737
	ret = 0;
T
Tejun Heo 已提交
1738
	goto out;
1739

T
Tejun Heo 已提交
1740 1741 1742 1743
destroy_root:
	kernfs_destroy_root(root->kf_root);
	root->kf_root = NULL;
exit_root_id:
1744
	cgroup_exit_root_id(root);
1745
cancel_ref:
1746
	percpu_ref_exit(&root_cgrp->self.refcnt);
T
Tejun Heo 已提交
1747
out:
1748 1749
	free_cgrp_cset_links(&tmp_links);
	return ret;
1750 1751
}

A
Al Viro 已提交
1752
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1753
			 int flags, const char *unused_dev_name,
A
Al Viro 已提交
1754
			 void *data)
1755
{
1756
	struct super_block *pinned_sb = NULL;
1757
	struct cgroup_subsys *ss;
1758
	struct cgroup_root *root;
1759
	struct cgroup_sb_opts opts;
T
Tejun Heo 已提交
1760
	struct dentry *dentry;
1761
	int ret;
1762
	int i;
L
Li Zefan 已提交
1763
	bool new_sb;
1764

1765 1766 1767 1768 1769 1770
	/*
	 * The first time anyone tries to mount a cgroup, enable the list
	 * linking each css_set to its tasks and fix up all existing tasks.
	 */
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();
1771

B
Ben Blum 已提交
1772
	mutex_lock(&cgroup_mutex);
1773 1774

	/* First find the desired set of subsystems */
1775
	ret = parse_cgroupfs_options(data, &opts);
1776
	if (ret)
1777
		goto out_unlock;
1778

T
Tejun Heo 已提交
1779
	/* look for a matching existing root */
1780
	if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
T
Tejun Heo 已提交
1781 1782 1783 1784 1785
		cgrp_dfl_root_visible = true;
		root = &cgrp_dfl_root;
		cgroup_get(&root->cgrp);
		ret = 0;
		goto out_unlock;
1786 1787
	}

1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
	/*
	 * Destruction of cgroup root is asynchronous, so subsystems may
	 * still be dying after the previous unmount.  Let's drain the
	 * dying subsystems.  We just need to ensure that the ones
	 * unmounted previously finish dying and don't care about new ones
	 * starting.  Testing ref liveliness is good enough.
	 */
	for_each_subsys(ss, i) {
		if (!(opts.subsys_mask & (1 << i)) ||
		    ss->root == &cgrp_dfl_root)
			continue;

		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
			mutex_unlock(&cgroup_mutex);
			msleep(10);
			ret = restart_syscall();
			goto out_free;
		}
		cgroup_put(&ss->root->cgrp);
	}

1809
	for_each_root(root) {
T
Tejun Heo 已提交
1810
		bool name_match = false;
1811

1812
		if (root == &cgrp_dfl_root)
1813
			continue;
1814

B
Ben Blum 已提交
1815
		/*
T
Tejun Heo 已提交
1816 1817 1818
		 * If we asked for a name then it must match.  Also, if
		 * name matches but sybsys_mask doesn't, we should fail.
		 * Remember whether name matched.
B
Ben Blum 已提交
1819
		 */
T
Tejun Heo 已提交
1820 1821 1822 1823 1824
		if (opts.name) {
			if (strcmp(opts.name, root->name))
				continue;
			name_match = true;
		}
1825

1826
		/*
T
Tejun Heo 已提交
1827 1828
		 * If we asked for subsystems (or explicitly for no
		 * subsystems) then they must match.
1829
		 */
T
Tejun Heo 已提交
1830
		if ((opts.subsys_mask || opts.none) &&
1831
		    (opts.subsys_mask != root->subsys_mask)) {
T
Tejun Heo 已提交
1832 1833 1834 1835 1836
			if (!name_match)
				continue;
			ret = -EBUSY;
			goto out_unlock;
		}
1837

1838 1839
		if (root->flags ^ opts.flags)
			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1840

T
Tejun Heo 已提交
1841
		/*
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
		 * We want to reuse @root whose lifetime is governed by its
		 * ->cgrp.  Let's check whether @root is alive and keep it
		 * that way.  As cgroup_kill_sb() can happen anytime, we
		 * want to block it by pinning the sb so that @root doesn't
		 * get killed before mount is complete.
		 *
		 * With the sb pinned, tryget_live can reliably indicate
		 * whether @root can be reused.  If it's being killed,
		 * drain it.  We can use wait_queue for the wait but this
		 * path is super cold.  Let's just sleep a bit and retry.
T
Tejun Heo 已提交
1852
		 */
1853 1854 1855
		pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
		if (IS_ERR(pinned_sb) ||
		    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
T
Tejun Heo 已提交
1856
			mutex_unlock(&cgroup_mutex);
1857 1858
			if (!IS_ERR_OR_NULL(pinned_sb))
				deactivate_super(pinned_sb);
T
Tejun Heo 已提交
1859
			msleep(10);
1860 1861
			ret = restart_syscall();
			goto out_free;
T
Tejun Heo 已提交
1862
		}
1863

T
Tejun Heo 已提交
1864
		ret = 0;
T
Tejun Heo 已提交
1865
		goto out_unlock;
1866 1867
	}

1868
	/*
1869 1870 1871
	 * No such thing, create a new one.  name= matching without subsys
	 * specification is allowed for already existing hierarchies but we
	 * can't create new one without subsys specification.
1872
	 */
1873 1874 1875
	if (!opts.subsys_mask && !opts.none) {
		ret = -EINVAL;
		goto out_unlock;
1876 1877
	}

1878 1879 1880
	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root) {
		ret = -ENOMEM;
T
Tejun Heo 已提交
1881
		goto out_unlock;
1882
	}
1883

1884 1885
	init_cgroup_root(root, &opts);

T
Tejun Heo 已提交
1886
	ret = cgroup_setup_root(root, opts.subsys_mask);
T
Tejun Heo 已提交
1887 1888
	if (ret)
		cgroup_free_root(root);
1889

1890
out_unlock:
1891
	mutex_unlock(&cgroup_mutex);
1892
out_free:
1893 1894
	kfree(opts.release_agent);
	kfree(opts.name);
A
Aristeu Rozanski 已提交
1895

T
Tejun Heo 已提交
1896
	if (ret)
1897
		return ERR_PTR(ret);
T
Tejun Heo 已提交
1898

1899 1900
	dentry = kernfs_mount(fs_type, flags, root->kf_root,
				CGROUP_SUPER_MAGIC, &new_sb);
L
Li Zefan 已提交
1901
	if (IS_ERR(dentry) || !new_sb)
1902
		cgroup_put(&root->cgrp);
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912

	/*
	 * If @pinned_sb, we're reusing an existing root and holding an
	 * extra ref on its sb.  Mount is complete.  Put the extra ref.
	 */
	if (pinned_sb) {
		WARN_ON(new_sb);
		deactivate_super(pinned_sb);
	}

T
Tejun Heo 已提交
1913 1914 1915 1916 1917 1918
	return dentry;
}

static void cgroup_kill_sb(struct super_block *sb)
{
	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
1919
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
T
Tejun Heo 已提交
1920

1921 1922 1923 1924
	/*
	 * If @root doesn't have any mounts or children, start killing it.
	 * This prevents new mounts by disabling percpu_ref_tryget_live().
	 * cgroup_mount() may wait for @root's release.
1925 1926
	 *
	 * And don't kill the default root.
1927
	 */
1928
	if (!list_empty(&root->cgrp.self.children) ||
1929
	    root == &cgrp_dfl_root)
1930 1931 1932 1933
		cgroup_put(&root->cgrp);
	else
		percpu_ref_kill(&root->cgrp.self.refcnt);

T
Tejun Heo 已提交
1934
	kernfs_kill_sb(sb);
1935 1936 1937 1938
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
A
Al Viro 已提交
1939
	.mount = cgroup_mount,
1940 1941 1942
	.kill_sb = cgroup_kill_sb,
};

1943
/**
1944
 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1945 1946 1947 1948
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
1949 1950 1951 1952 1953
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
T
Tejun Heo 已提交
1954
 * Return value is the same as kernfs_path().
1955
 */
T
Tejun Heo 已提交
1956
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1957
{
1958
	struct cgroup_root *root;
1959
	struct cgroup *cgrp;
T
Tejun Heo 已提交
1960 1961
	int hierarchy_id = 1;
	char *path = NULL;
1962 1963

	mutex_lock(&cgroup_mutex);
1964
	down_read(&css_set_rwsem);
1965

1966 1967
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

1968 1969
	if (root) {
		cgrp = task_cgroup_from_root(task, root);
T
Tejun Heo 已提交
1970
		path = cgroup_path(cgrp, buf, buflen);
1971 1972
	} else {
		/* if no hierarchy exists, everyone is in "/" */
T
Tejun Heo 已提交
1973 1974
		if (strlcpy(buf, "/", buflen) < buflen)
			path = buf;
1975 1976
	}

1977
	up_read(&css_set_rwsem);
1978
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
1979
	return path;
1980
}
1981
EXPORT_SYMBOL_GPL(task_cgroup_path);
1982

1983
/* used to track tasks and other necessary states during migration */
1984
struct cgroup_taskset {
1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
	/* the src and dst cset list running through cset->mg_node */
	struct list_head	src_csets;
	struct list_head	dst_csets;

	/*
	 * Fields for cgroup_taskset_*() iteration.
	 *
	 * Before migration is committed, the target migration tasks are on
	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
	 * or ->dst_csets depending on whether migration is committed.
	 *
	 * ->cur_csets and ->cur_task point to the current task position
	 * during iteration.
	 */
	struct list_head	*csets;
	struct css_set		*cur_cset;
	struct task_struct	*cur_task;
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
};

/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
 *
 * @tset iteration is initialized and the first task is returned.
 */
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
{
2013 2014 2015 2016
	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
	tset->cur_task = NULL;

	return cgroup_taskset_next(tset);
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
}

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
{
2028 2029
	struct css_set *cset = tset->cur_cset;
	struct task_struct *task = tset->cur_task;
2030

2031 2032 2033 2034 2035 2036
	while (&cset->mg_node != tset->csets) {
		if (!task)
			task = list_first_entry(&cset->mg_tasks,
						struct task_struct, cg_list);
		else
			task = list_next_entry(task, cg_list);
2037

2038 2039 2040 2041 2042
		if (&task->cg_list != &cset->mg_tasks) {
			tset->cur_cset = cset;
			tset->cur_task = task;
			return task;
		}
2043

2044 2045 2046
		cset = list_next_entry(cset, mg_node);
		task = NULL;
	}
2047

2048
	return NULL;
2049 2050
}

2051
/**
B
Ben Blum 已提交
2052
 * cgroup_task_migrate - move a task from one cgroup to another.
2053
 * @old_cgrp: the cgroup @tsk is being migrated from
2054 2055
 * @tsk: the task being migrated
 * @new_cset: the new css_set @tsk is being attached to
B
Ben Blum 已提交
2056
 *
2057
 * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
B
Ben Blum 已提交
2058
 */
2059 2060 2061
static void cgroup_task_migrate(struct cgroup *old_cgrp,
				struct task_struct *tsk,
				struct css_set *new_cset)
B
Ben Blum 已提交
2062
{
2063
	struct css_set *old_cset;
B
Ben Blum 已提交
2064

2065 2066 2067
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

B
Ben Blum 已提交
2068
	/*
2069 2070 2071
	 * We are synchronized through cgroup_threadgroup_rwsem against
	 * PF_EXITING setting such that we can't race against cgroup_exit()
	 * changing the css_set to init_css_set and dropping the old one.
B
Ben Blum 已提交
2072
	 */
2073
	WARN_ON_ONCE(tsk->flags & PF_EXITING);
2074
	old_cset = task_css_set(tsk);
B
Ben Blum 已提交
2075

2076
	get_css_set(new_cset);
2077
	rcu_assign_pointer(tsk->cgroups, new_cset);
B
Ben Blum 已提交
2078

2079 2080 2081 2082 2083 2084 2085
	/*
	 * Use move_tail so that cgroup_taskset_first() still returns the
	 * leader after migration.  This works because cgroup_migrate()
	 * ensures that the dst_cset of the leader is the first on the
	 * tset's dst_csets list.
	 */
	list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
B
Ben Blum 已提交
2086 2087

	/*
2088 2089 2090
	 * We just gained a reference on old_cset by taking it from the
	 * task. As trading it for new_cset is protected by cgroup_mutex,
	 * we're safe to drop it here; it will be freed under RCU.
B
Ben Blum 已提交
2091
	 */
Z
Zefan Li 已提交
2092
	put_css_set_locked(old_cset);
B
Ben Blum 已提交
2093 2094
}

L
Li Zefan 已提交
2095
/**
2096 2097
 * cgroup_migrate_finish - cleanup after attach
 * @preloaded_csets: list of preloaded css_sets
B
Ben Blum 已提交
2098
 *
2099 2100
 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
 * those functions for details.
B
Ben Blum 已提交
2101
 */
2102
static void cgroup_migrate_finish(struct list_head *preloaded_csets)
B
Ben Blum 已提交
2103
{
2104
	struct css_set *cset, *tmp_cset;
B
Ben Blum 已提交
2105

2106 2107 2108 2109 2110 2111 2112
	lockdep_assert_held(&cgroup_mutex);

	down_write(&css_set_rwsem);
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
		cset->mg_src_cgrp = NULL;
		cset->mg_dst_cset = NULL;
		list_del_init(&cset->mg_preload_node);
Z
Zefan Li 已提交
2113
		put_css_set_locked(cset);
2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
	}
	up_write(&css_set_rwsem);
}

/**
 * cgroup_migrate_add_src - add a migration source css_set
 * @src_cset: the source css_set to add
 * @dst_cgrp: the destination cgroup
 * @preloaded_csets: list of preloaded css_sets
 *
 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
 * @src_cset and add it to @preloaded_csets, which should later be cleaned
 * up by cgroup_migrate_finish().
 *
2128 2129 2130 2131 2132
 * This function may be called without holding cgroup_threadgroup_rwsem
 * even if the target is a process.  Threads may be created and destroyed
 * but as long as cgroup_mutex is not dropped, no new css_set can be put
 * into play and the preloaded css_sets are guaranteed to cover all
 * migrations.
2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
 */
static void cgroup_migrate_add_src(struct css_set *src_cset,
				   struct cgroup *dst_cgrp,
				   struct list_head *preloaded_csets)
{
	struct cgroup *src_cgrp;

	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);

	if (!list_empty(&src_cset->mg_preload_node))
		return;

	WARN_ON(src_cset->mg_src_cgrp);
	WARN_ON(!list_empty(&src_cset->mg_tasks));
	WARN_ON(!list_empty(&src_cset->mg_node));

	src_cset->mg_src_cgrp = src_cgrp;
	get_css_set(src_cset);
	list_add(&src_cset->mg_preload_node, preloaded_csets);
}

/**
 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2159
 * @dst_cgrp: the destination cgroup (may be %NULL)
2160 2161 2162 2163
 * @preloaded_csets: list of preloaded source css_sets
 *
 * Tasks are about to be moved to @dst_cgrp and all the source css_sets
 * have been preloaded to @preloaded_csets.  This function looks up and
2164 2165 2166
 * pins all destination css_sets, links each to its source, and append them
 * to @preloaded_csets.  If @dst_cgrp is %NULL, the destination of each
 * source css_set is assumed to be its cgroup on the default hierarchy.
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
 *
 * This function must be called after cgroup_migrate_add_src() has been
 * called on each migration source css_set.  After migration is performed
 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
 * @preloaded_csets.
 */
static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
				      struct list_head *preloaded_csets)
{
	LIST_HEAD(csets);
2177
	struct css_set *src_cset, *tmp_cset;
2178 2179 2180

	lockdep_assert_held(&cgroup_mutex);

2181 2182 2183 2184
	/*
	 * Except for the root, child_subsys_mask must be zero for a cgroup
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
2185
	if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) &&
2186 2187 2188
	    dst_cgrp->child_subsys_mask)
		return -EBUSY;

2189
	/* look up the dst cset for each src cset and link it to src */
2190
	list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
2191 2192
		struct css_set *dst_cset;

2193 2194
		dst_cset = find_css_set(src_cset,
					dst_cgrp ?: src_cset->dfl_cgrp);
2195 2196 2197 2198
		if (!dst_cset)
			goto err;

		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2199 2200 2201 2202 2203 2204 2205 2206 2207

		/*
		 * If src cset equals dst, it's noop.  Drop the src.
		 * cgroup_migrate() will skip the cset too.  Note that we
		 * can't handle src == dst as some nodes are used by both.
		 */
		if (src_cset == dst_cset) {
			src_cset->mg_src_cgrp = NULL;
			list_del_init(&src_cset->mg_preload_node);
Z
Zefan Li 已提交
2208 2209
			put_css_set(src_cset);
			put_css_set(dst_cset);
2210 2211 2212
			continue;
		}

2213 2214 2215 2216 2217
		src_cset->mg_dst_cset = dst_cset;

		if (list_empty(&dst_cset->mg_preload_node))
			list_add(&dst_cset->mg_preload_node, &csets);
		else
Z
Zefan Li 已提交
2218
			put_css_set(dst_cset);
2219 2220
	}

2221
	list_splice_tail(&csets, preloaded_csets);
2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
	return 0;
err:
	cgroup_migrate_finish(&csets);
	return -ENOMEM;
}

/**
 * cgroup_migrate - migrate a process or task to a cgroup
 * @cgrp: the destination cgroup
 * @leader: the leader of the process or the task to migrate
 * @threadgroup: whether @leader points to the whole process or a single task
 *
 * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
2235
 * process, the caller must be holding cgroup_threadgroup_rwsem.  The
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
 * caller is also responsible for invoking cgroup_migrate_add_src() and
 * cgroup_migrate_prepare_dst() on the targets before invoking this
 * function and following up with cgroup_migrate_finish().
 *
 * As long as a controller's ->can_attach() doesn't fail, this function is
 * guaranteed to succeed.  This means that, excluding ->can_attach()
 * failure, when migrating multiple targets, the success or failure can be
 * decided for all targets by invoking group_migrate_prepare_dst() before
 * actually starting migrating.
 */
static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
			  bool threadgroup)
B
Ben Blum 已提交
2248
{
2249 2250 2251 2252 2253
	struct cgroup_taskset tset = {
		.src_csets	= LIST_HEAD_INIT(tset.src_csets),
		.dst_csets	= LIST_HEAD_INIT(tset.dst_csets),
		.csets		= &tset.src_csets,
	};
T
Tejun Heo 已提交
2254
	struct cgroup_subsys_state *css, *failed_css = NULL;
2255 2256 2257
	struct css_set *cset, *tmp_cset;
	struct task_struct *task, *tmp_task;
	int i, ret;
B
Ben Blum 已提交
2258

2259 2260 2261 2262 2263
	/*
	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 */
2264
	down_write(&css_set_rwsem);
2265
	rcu_read_lock();
2266
	task = leader;
B
Ben Blum 已提交
2267
	do {
2268 2269
		/* @task either already exited or can't exit until the end */
		if (task->flags & PF_EXITING)
2270
			goto next;
2271

2272 2273
		/* leave @task alone if post_fork() hasn't linked it yet */
		if (list_empty(&task->cg_list))
2274
			goto next;
2275

2276
		cset = task_css_set(task);
2277
		if (!cset->mg_src_cgrp)
2278
			goto next;
2279

2280
		/*
2281 2282
		 * cgroup_taskset_first() must always return the leader.
		 * Take care to avoid disturbing the ordering.
2283
		 */
2284 2285 2286 2287 2288 2289
		list_move_tail(&task->cg_list, &cset->mg_tasks);
		if (list_empty(&cset->mg_node))
			list_add_tail(&cset->mg_node, &tset.src_csets);
		if (list_empty(&cset->mg_dst_cset->mg_node))
			list_move_tail(&cset->mg_dst_cset->mg_node,
				       &tset.dst_csets);
2290
	next:
2291 2292
		if (!threadgroup)
			break;
2293
	} while_each_thread(leader, task);
2294
	rcu_read_unlock();
2295
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
2296

2297
	/* methods shouldn't be called if no task is actually migrating */
2298 2299
	if (list_empty(&tset.src_csets))
		return 0;
2300

2301
	/* check that we can legitimately attach to the cgroup */
2302
	for_each_e_css(css, i, cgrp) {
T
Tejun Heo 已提交
2303
		if (css->ss->can_attach) {
2304 2305
			ret = css->ss->can_attach(css, &tset);
			if (ret) {
T
Tejun Heo 已提交
2306
				failed_css = css;
B
Ben Blum 已提交
2307 2308 2309 2310 2311 2312
				goto out_cancel_attach;
			}
		}
	}

	/*
2313 2314 2315
	 * Now that we're guaranteed success, proceed to move all tasks to
	 * the new cgroup.  There are no failure cases after here, so this
	 * is the commit point.
B
Ben Blum 已提交
2316
	 */
2317
	down_write(&css_set_rwsem);
2318 2319 2320 2321
	list_for_each_entry(cset, &tset.src_csets, mg_node) {
		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
			cgroup_task_migrate(cset->mg_src_cgrp, task,
					    cset->mg_dst_cset);
B
Ben Blum 已提交
2322
	}
2323
	up_write(&css_set_rwsem);
B
Ben Blum 已提交
2324 2325

	/*
2326 2327 2328
	 * Migration is committed, all target tasks are now on dst_csets.
	 * Nothing is sensitive to fork() after this point.  Notify
	 * controllers that migration is complete.
B
Ben Blum 已提交
2329
	 */
2330
	tset.csets = &tset.dst_csets;
B
Ben Blum 已提交
2331

2332
	for_each_e_css(css, i, cgrp)
T
Tejun Heo 已提交
2333 2334
		if (css->ss->attach)
			css->ss->attach(css, &tset);
B
Ben Blum 已提交
2335

2336
	ret = 0;
2337 2338
	goto out_release_tset;

B
Ben Blum 已提交
2339
out_cancel_attach:
2340
	for_each_e_css(css, i, cgrp) {
2341 2342 2343 2344
		if (css == failed_css)
			break;
		if (css->ss->cancel_attach)
			css->ss->cancel_attach(css, &tset);
B
Ben Blum 已提交
2345
	}
2346 2347 2348 2349
out_release_tset:
	down_write(&css_set_rwsem);
	list_splice_init(&tset.dst_csets, &tset.src_csets);
	list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
2350
		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2351 2352 2353
		list_del_init(&cset->mg_node);
	}
	up_write(&css_set_rwsem);
2354
	return ret;
B
Ben Blum 已提交
2355 2356
}

2357 2358 2359 2360 2361 2362
/**
 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
 * @dst_cgrp: the cgroup to attach to
 * @leader: the task or the leader of the threadgroup to be attached
 * @threadgroup: attach the whole threadgroup?
 *
2363
 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
 */
static int cgroup_attach_task(struct cgroup *dst_cgrp,
			      struct task_struct *leader, bool threadgroup)
{
	LIST_HEAD(preloaded_csets);
	struct task_struct *task;
	int ret;

	/* look up all src csets */
	down_read(&css_set_rwsem);
	rcu_read_lock();
	task = leader;
	do {
		cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
				       &preloaded_csets);
		if (!threadgroup)
			break;
	} while_each_thread(leader, task);
	rcu_read_unlock();
	up_read(&css_set_rwsem);

	/* prepare dst csets and commit */
	ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
	if (!ret)
		ret = cgroup_migrate(dst_cgrp, leader, threadgroup);

	cgroup_migrate_finish(&preloaded_csets);
	return ret;
B
Ben Blum 已提交
2392 2393
}

2394 2395 2396
static int cgroup_procs_write_permission(struct task_struct *task,
					 struct cgroup *dst_cgrp,
					 struct kernfs_open_file *of)
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410
{
	const struct cred *cred = current_cred();
	const struct cred *tcred = get_task_cred(task);
	int ret = 0;

	/*
	 * even if we're attaching all tasks in the thread group, we only
	 * need to check permissions on one of them.
	 */
	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
	    !uid_eq(cred->euid, tcred->uid) &&
	    !uid_eq(cred->euid, tcred->suid))
		ret = -EACCES;

2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
	if (!ret && cgroup_on_dfl(dst_cgrp)) {
		struct super_block *sb = of->file->f_path.dentry->d_sb;
		struct cgroup *cgrp;
		struct inode *inode;

		down_read(&css_set_rwsem);
		cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
		up_read(&css_set_rwsem);

		while (!cgroup_is_descendant(dst_cgrp, cgrp))
			cgrp = cgroup_parent(cgrp);

		ret = -ENOMEM;
		inode = kernfs_get_inode(sb, cgrp->procs_kn);
		if (inode) {
			ret = inode_permission(inode, MAY_WRITE);
			iput(inode);
		}
	}

2431 2432 2433 2434
	put_cred(tcred);
	return ret;
}

B
Ben Blum 已提交
2435 2436
/*
 * Find the task_struct of the task to attach by vpid and pass it along to the
2437
 * function to attach either it or all tasks in its threadgroup. Will lock
2438
 * cgroup_mutex and threadgroup.
2439
 */
2440 2441
static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
				    size_t nbytes, loff_t off, bool threadgroup)
2442 2443
{
	struct task_struct *tsk;
2444
	struct cgroup *cgrp;
2445
	pid_t pid;
2446 2447
	int ret;

2448 2449 2450
	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
		return -EINVAL;

2451 2452
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
B
Ben Blum 已提交
2453 2454
		return -ENODEV;

T
Tejun Heo 已提交
2455
	percpu_down_write(&cgroup_threadgroup_rwsem);
2456
	rcu_read_lock();
2457
	if (pid) {
2458
		tsk = find_task_by_vpid(pid);
B
Ben Blum 已提交
2459
		if (!tsk) {
S
SeongJae Park 已提交
2460
			ret = -ESRCH;
T
Tejun Heo 已提交
2461
			goto out_unlock_rcu;
2462
		}
2463
	} else {
2464
		tsk = current;
2465
	}
2466 2467

	if (threadgroup)
2468
		tsk = tsk->group_leader;
2469 2470

	/*
2471
	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2472 2473 2474
	 * trapped in a cpuset, or RT worker may be born in a cgroup
	 * with no rt_runtime allocated.  Just say no.
	 */
2475
	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2476
		ret = -EINVAL;
T
Tejun Heo 已提交
2477
		goto out_unlock_rcu;
2478 2479
	}

2480 2481 2482
	get_task_struct(tsk);
	rcu_read_unlock();

2483
	ret = cgroup_procs_write_permission(tsk, cgrp, of);
2484 2485
	if (!ret)
		ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2486

2487
	put_task_struct(tsk);
T
Tejun Heo 已提交
2488 2489 2490 2491 2492 2493
	goto out_unlock_threadgroup;

out_unlock_rcu:
	rcu_read_unlock();
out_unlock_threadgroup:
	percpu_up_write(&cgroup_threadgroup_rwsem);
2494
	cgroup_kn_unlock(of->kn);
2495
	return ret ?: nbytes;
2496 2497
}

2498 2499 2500 2501 2502 2503 2504
/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
2505
	struct cgroup_root *root;
2506 2507
	int retval = 0;

T
Tejun Heo 已提交
2508
	mutex_lock(&cgroup_mutex);
2509
	for_each_root(root) {
2510 2511
		struct cgroup *from_cgrp;

2512
		if (root == &cgrp_dfl_root)
2513 2514
			continue;

2515 2516 2517
		down_read(&css_set_rwsem);
		from_cgrp = task_cgroup_from_root(from, root);
		up_read(&css_set_rwsem);
2518

L
Li Zefan 已提交
2519
		retval = cgroup_attach_task(from_cgrp, tsk, false);
2520 2521 2522
		if (retval)
			break;
	}
T
Tejun Heo 已提交
2523
	mutex_unlock(&cgroup_mutex);
2524 2525 2526 2527 2528

	return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);

2529 2530
static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
B
Ben Blum 已提交
2531
{
2532
	return __cgroup_procs_write(of, buf, nbytes, off, false);
B
Ben Blum 已提交
2533 2534
}

2535 2536
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
				  char *buf, size_t nbytes, loff_t off)
2537
{
2538
	return __cgroup_procs_write(of, buf, nbytes, off, true);
2539 2540
}

2541 2542
static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
					  char *buf, size_t nbytes, loff_t off)
2543
{
2544
	struct cgroup *cgrp;
2545

2546
	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
2547

2548 2549
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
2550
		return -ENODEV;
2551
	spin_lock(&release_agent_path_lock);
2552 2553
	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
		sizeof(cgrp->root->release_agent_path));
2554
	spin_unlock(&release_agent_path_lock);
2555
	cgroup_kn_unlock(of->kn);
2556
	return nbytes;
2557 2558
}

2559
static int cgroup_release_agent_show(struct seq_file *seq, void *v)
2560
{
2561
	struct cgroup *cgrp = seq_css(seq)->cgroup;
2562

2563
	spin_lock(&release_agent_path_lock);
2564
	seq_puts(seq, cgrp->root->release_agent_path);
2565
	spin_unlock(&release_agent_path_lock);
2566 2567 2568 2569
	seq_putc(seq, '\n');
	return 0;
}

2570
static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
2571
{
2572
	seq_puts(seq, "0\n");
2573 2574 2575
	return 0;
}

2576
static void cgroup_print_ss_mask(struct seq_file *seq, unsigned long ss_mask)
2577
{
2578 2579 2580
	struct cgroup_subsys *ss;
	bool printed = false;
	int ssid;
2581

2582 2583 2584 2585 2586
	for_each_subsys_which(ss, ssid, &ss_mask) {
		if (printed)
			seq_putc(seq, ' ');
		seq_printf(seq, "%s", ss->name);
		printed = true;
2587
	}
2588 2589
	if (printed)
		seq_putc(seq, '\n');
2590 2591
}

2592 2593
/* show controllers which are currently attached to the default hierarchy */
static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
2594
{
2595 2596
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2597 2598
	cgroup_print_ss_mask(seq, cgrp->root->subsys_mask &
			     ~cgrp_dfl_root_inhibit_ss_mask);
2599
	return 0;
2600 2601
}

2602 2603
/* show controllers which are enabled from the parent */
static int cgroup_controllers_show(struct seq_file *seq, void *v)
2604
{
2605 2606
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2607
	cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
2608
	return 0;
2609 2610
}

2611 2612
/* show controllers which are enabled for a given cgroup's children */
static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2613
{
2614 2615
	struct cgroup *cgrp = seq_css(seq)->cgroup;

2616
	cgroup_print_ss_mask(seq, cgrp->subtree_control);
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
	return 0;
}

/**
 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
 * @cgrp: root of the subtree to update csses for
 *
 * @cgrp's child_subsys_mask has changed and its subtree's (self excluded)
 * css associations need to be updated accordingly.  This function looks up
 * all css_sets which are attached to the subtree, creates the matching
 * updated css_sets and migrates the tasks to the new ones.
 */
static int cgroup_update_dfl_csses(struct cgroup *cgrp)
{
	LIST_HEAD(preloaded_csets);
	struct cgroup_subsys_state *css;
	struct css_set *src_cset;
	int ret;

	lockdep_assert_held(&cgroup_mutex);

T
Tejun Heo 已提交
2638 2639
	percpu_down_write(&cgroup_threadgroup_rwsem);

2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
	/* look up all csses currently attached to @cgrp's subtree */
	down_read(&css_set_rwsem);
	css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
		struct cgrp_cset_link *link;

		/* self is not affected by child_subsys_mask change */
		if (css->cgroup == cgrp)
			continue;

		list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
			cgroup_migrate_add_src(link->cset, cgrp,
					       &preloaded_csets);
	}
	up_read(&css_set_rwsem);

	/* NULL dst indicates self on default hierarchy */
	ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
	if (ret)
		goto out_finish;

	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
		struct task_struct *last_task = NULL, *task;

		/* src_csets precede dst_csets, break on the first dst_cset */
		if (!src_cset->mg_src_cgrp)
			break;

		/*
		 * All tasks in src_cset need to be migrated to the
		 * matching dst_cset.  Empty it process by process.  We
		 * walk tasks but migrate processes.  The leader might even
		 * belong to a different cset but such src_cset would also
		 * be among the target src_csets because the default
		 * hierarchy enforces per-process membership.
		 */
		while (true) {
			down_read(&css_set_rwsem);
			task = list_first_entry_or_null(&src_cset->tasks,
						struct task_struct, cg_list);
			if (task) {
				task = task->group_leader;
				WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp);
				get_task_struct(task);
			}
			up_read(&css_set_rwsem);

			if (!task)
				break;

			/* guard against possible infinite loop */
			if (WARN(last_task == task,
				 "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n"))
				goto out_finish;
			last_task = task;

			ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);

			put_task_struct(task);

			if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
				goto out_finish;
		}
	}

out_finish:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
2706
	percpu_up_write(&cgroup_threadgroup_rwsem);
2707 2708 2709 2710
	return ret;
}

/* change the enabled child controllers for a cgroup in the default hierarchy */
2711 2712 2713
static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
2714
{
2715 2716
	unsigned long enable = 0, disable = 0;
	unsigned long css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
2717
	struct cgroup *cgrp, *child;
2718
	struct cgroup_subsys *ss;
2719
	char *tok;
2720 2721 2722
	int ssid, ret;

	/*
2723 2724
	 * Parse input - space separated list of subsystem names prefixed
	 * with either + or -.
2725
	 */
2726 2727
	buf = strstrip(buf);
	while ((tok = strsep(&buf, " "))) {
2728 2729
		unsigned long tmp_ss_mask = ~cgrp_dfl_root_inhibit_ss_mask;

2730 2731
		if (tok[0] == '\0')
			continue;
2732 2733
		for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
			if (ss->disabled || strcmp(tok + 1, ss->name))
2734 2735 2736
				continue;

			if (*tok == '+') {
2737 2738
				enable |= 1 << ssid;
				disable &= ~(1 << ssid);
2739
			} else if (*tok == '-') {
2740 2741
				disable |= 1 << ssid;
				enable &= ~(1 << ssid);
2742 2743 2744 2745 2746 2747 2748 2749 2750
			} else {
				return -EINVAL;
			}
			break;
		}
		if (ssid == CGROUP_SUBSYS_COUNT)
			return -EINVAL;
	}

2751 2752 2753
	cgrp = cgroup_kn_lock_live(of->kn);
	if (!cgrp)
		return -ENODEV;
2754 2755 2756

	for_each_subsys(ss, ssid) {
		if (enable & (1 << ssid)) {
2757
			if (cgrp->subtree_control & (1 << ssid)) {
2758 2759 2760 2761
				enable &= ~(1 << ssid);
				continue;
			}

2762 2763 2764
			/* unavailable or not enabled on the parent? */
			if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
			    (cgroup_parent(cgrp) &&
2765
			     !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
2766 2767 2768
				ret = -ENOENT;
				goto out_unlock;
			}
2769
		} else if (disable & (1 << ssid)) {
2770
			if (!(cgrp->subtree_control & (1 << ssid))) {
2771 2772 2773 2774 2775 2776
				disable &= ~(1 << ssid);
				continue;
			}

			/* a child has it enabled? */
			cgroup_for_each_live_child(child, cgrp) {
2777
				if (child->subtree_control & (1 << ssid)) {
2778
					ret = -EBUSY;
2779
					goto out_unlock;
2780 2781 2782 2783 2784 2785 2786
				}
			}
		}
	}

	if (!enable && !disable) {
		ret = 0;
2787
		goto out_unlock;
2788 2789 2790
	}

	/*
2791
	 * Except for the root, subtree_control must be zero for a cgroup
2792 2793
	 * with tasks so that child cgroups don't compete against tasks.
	 */
T
Tejun Heo 已提交
2794
	if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
2795 2796 2797 2798 2799
		ret = -EBUSY;
		goto out_unlock;
	}

	/*
2800 2801 2802 2803
	 * Update subsys masks and calculate what needs to be done.  More
	 * subsystems than specified may need to be enabled or disabled
	 * depending on subsystem dependencies.
	 */
2804 2805 2806 2807
	old_sc = cgrp->subtree_control;
	old_ss = cgrp->child_subsys_mask;
	new_sc = (old_sc | enable) & ~disable;
	new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc);
2808

2809 2810
	css_enable = ~old_ss & new_ss;
	css_disable = old_ss & ~new_ss;
2811 2812
	enable |= css_enable;
	disable |= css_disable;
2813

2814 2815 2816 2817 2818 2819
	/*
	 * Because css offlining is asynchronous, userland might try to
	 * re-enable the same controller while the previous instance is
	 * still around.  In such cases, wait till it's gone using
	 * offline_waitq.
	 */
2820
	for_each_subsys_which(ss, ssid, &css_enable) {
2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838
		cgroup_for_each_live_child(child, cgrp) {
			DEFINE_WAIT(wait);

			if (!cgroup_css(child, ss))
				continue;

			cgroup_get(child);
			prepare_to_wait(&child->offline_waitq, &wait,
					TASK_UNINTERRUPTIBLE);
			cgroup_kn_unlock(of->kn);
			schedule();
			finish_wait(&child->offline_waitq, &wait);
			cgroup_put(child);

			return restart_syscall();
		}
	}

2839 2840 2841
	cgrp->subtree_control = new_sc;
	cgrp->child_subsys_mask = new_ss;

2842 2843 2844 2845 2846
	/*
	 * Create new csses or make the existing ones visible.  A css is
	 * created invisible if it's being implicitly enabled through
	 * dependency.  An invisible css is made visible when the userland
	 * explicitly enables it.
2847 2848 2849 2850 2851 2852
	 */
	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
2853 2854 2855 2856 2857
			if (css_enable & (1 << ssid))
				ret = create_css(child, ss,
					cgrp->subtree_control & (1 << ssid));
			else
				ret = cgroup_populate_dir(child, 1 << ssid);
2858 2859 2860 2861 2862
			if (ret)
				goto err_undo_css;
		}
	}

2863 2864 2865 2866 2867
	/*
	 * At this point, cgroup_e_css() results reflect the new csses
	 * making the following cgroup_update_dfl_csses() properly update
	 * css associations of all tasks in the subtree.
	 */
2868 2869 2870 2871
	ret = cgroup_update_dfl_csses(cgrp);
	if (ret)
		goto err_undo_css;

2872 2873 2874
	/*
	 * All tasks are migrated out of disabled csses.  Kill or hide
	 * them.  A css is hidden when the userland requests it to be
2875 2876 2877 2878
	 * disabled while other subsystems are still depending on it.  The
	 * css must not actively control resources and be in the vanilla
	 * state if it's made visible again later.  Controllers which may
	 * be depended upon should provide ->css_reset() for this purpose.
2879
	 */
2880 2881 2882 2883
	for_each_subsys(ss, ssid) {
		if (!(disable & (1 << ssid)))
			continue;

2884
		cgroup_for_each_live_child(child, cgrp) {
2885 2886 2887 2888 2889
			struct cgroup_subsys_state *css = cgroup_css(child, ss);

			if (css_disable & (1 << ssid)) {
				kill_css(css);
			} else {
2890
				cgroup_clear_dir(child, 1 << ssid);
2891 2892 2893
				if (ss->css_reset)
					ss->css_reset(css);
			}
2894
		}
2895 2896
	}

2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914
	/*
	 * The effective csses of all the descendants (excluding @cgrp) may
	 * have changed.  Subsystems can optionally subscribe to this event
	 * by implementing ->css_e_css_changed() which is invoked if any of
	 * the effective csses seen from the css's cgroup may have changed.
	 */
	for_each_subsys(ss, ssid) {
		struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss);
		struct cgroup_subsys_state *css;

		if (!ss->css_e_css_changed || !this_css)
			continue;

		css_for_each_descendant_pre(css, this_css)
			if (css != this_css)
				ss->css_e_css_changed(css);
	}

2915 2916 2917
	kernfs_activate(cgrp->kn);
	ret = 0;
out_unlock:
2918
	cgroup_kn_unlock(of->kn);
2919
	return ret ?: nbytes;
2920 2921

err_undo_css:
2922 2923
	cgrp->subtree_control = old_sc;
	cgrp->child_subsys_mask = old_ss;
2924 2925 2926 2927 2928 2929 2930

	for_each_subsys(ss, ssid) {
		if (!(enable & (1 << ssid)))
			continue;

		cgroup_for_each_live_child(child, cgrp) {
			struct cgroup_subsys_state *css = cgroup_css(child, ss);
2931 2932 2933 2934 2935

			if (!css)
				continue;

			if (css_enable & (1 << ssid))
2936
				kill_css(css);
2937 2938
			else
				cgroup_clear_dir(child, 1 << ssid);
2939 2940 2941 2942 2943
		}
	}
	goto out_unlock;
}

2944 2945 2946 2947 2948 2949
static int cgroup_populated_show(struct seq_file *seq, void *v)
{
	seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt);
	return 0;
}

T
Tejun Heo 已提交
2950 2951
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
				 size_t nbytes, loff_t off)
2952
{
T
Tejun Heo 已提交
2953 2954 2955
	struct cgroup *cgrp = of->kn->parent->priv;
	struct cftype *cft = of->kn->priv;
	struct cgroup_subsys_state *css;
2956
	int ret;
2957

T
Tejun Heo 已提交
2958 2959 2960
	if (cft->write)
		return cft->write(of, buf, nbytes, off);

T
Tejun Heo 已提交
2961 2962 2963 2964 2965 2966 2967 2968 2969
	/*
	 * kernfs guarantees that a file isn't deleted with operations in
	 * flight, which means that the matching css is and stays alive and
	 * doesn't need to be pinned.  The RCU locking is not necessary
	 * either.  It's just for the convenience of using cgroup_css().
	 */
	rcu_read_lock();
	css = cgroup_css(cgrp, cft->ss);
	rcu_read_unlock();
2970

2971
	if (cft->write_u64) {
2972 2973 2974 2975 2976 2977 2978 2979 2980
		unsigned long long v;
		ret = kstrtoull(buf, 0, &v);
		if (!ret)
			ret = cft->write_u64(css, cft, v);
	} else if (cft->write_s64) {
		long long v;
		ret = kstrtoll(buf, 0, &v);
		if (!ret)
			ret = cft->write_s64(css, cft, v);
2981
	} else {
2982
		ret = -EINVAL;
2983
	}
T
Tejun Heo 已提交
2984

2985
	return ret ?: nbytes;
2986 2987
}

2988
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
2989
{
T
Tejun Heo 已提交
2990
	return seq_cft(seq)->seq_start(seq, ppos);
2991 2992
}

2993
static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
2994
{
T
Tejun Heo 已提交
2995
	return seq_cft(seq)->seq_next(seq, v, ppos);
2996 2997
}

2998
static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
2999
{
T
Tejun Heo 已提交
3000
	seq_cft(seq)->seq_stop(seq, v);
3001 3002
}

3003
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
3004
{
3005 3006
	struct cftype *cft = seq_cft(m);
	struct cgroup_subsys_state *css = seq_css(m);
3007

3008 3009
	if (cft->seq_show)
		return cft->seq_show(m, arg);
3010

3011
	if (cft->read_u64)
3012 3013 3014 3015 3016 3017
		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
	else if (cft->read_s64)
		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
	else
		return -EINVAL;
	return 0;
3018 3019
}

T
Tejun Heo 已提交
3020 3021 3022 3023
static struct kernfs_ops cgroup_kf_single_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_show		= cgroup_seqfile_show,
3024 3025
};

T
Tejun Heo 已提交
3026 3027 3028 3029 3030 3031 3032 3033
static struct kernfs_ops cgroup_kf_ops = {
	.atomic_write_len	= PAGE_SIZE,
	.write			= cgroup_file_write,
	.seq_start		= cgroup_seqfile_start,
	.seq_next		= cgroup_seqfile_next,
	.seq_stop		= cgroup_seqfile_stop,
	.seq_show		= cgroup_seqfile_show,
};
3034 3035 3036 3037

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
T
Tejun Heo 已提交
3038 3039
static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
			 const char *new_name_str)
3040
{
T
Tejun Heo 已提交
3041
	struct cgroup *cgrp = kn->priv;
3042 3043
	int ret;

T
Tejun Heo 已提交
3044
	if (kernfs_type(kn) != KERNFS_DIR)
3045
		return -ENOTDIR;
T
Tejun Heo 已提交
3046
	if (kn->parent != new_parent)
3047
		return -EIO;
3048

3049 3050
	/*
	 * This isn't a proper migration and its usefulness is very
3051
	 * limited.  Disallow on the default hierarchy.
3052
	 */
3053
	if (cgroup_on_dfl(cgrp))
3054
		return -EPERM;
L
Li Zefan 已提交
3055

3056
	/*
T
Tejun Heo 已提交
3057
	 * We're gonna grab cgroup_mutex which nests outside kernfs
3058
	 * active_ref.  kernfs_rename() doesn't require active_ref
T
Tejun Heo 已提交
3059
	 * protection.  Break them before grabbing cgroup_mutex.
3060 3061 3062
	 */
	kernfs_break_active_protection(new_parent);
	kernfs_break_active_protection(kn);
L
Li Zefan 已提交
3063

T
Tejun Heo 已提交
3064
	mutex_lock(&cgroup_mutex);
L
Li Zefan 已提交
3065

T
Tejun Heo 已提交
3066
	ret = kernfs_rename(kn, new_parent, new_name_str);
L
Li Zefan 已提交
3067

T
Tejun Heo 已提交
3068
	mutex_unlock(&cgroup_mutex);
3069 3070 3071

	kernfs_unbreak_active_protection(kn);
	kernfs_unbreak_active_protection(new_parent);
T
Tejun Heo 已提交
3072
	return ret;
L
Li Zefan 已提交
3073 3074
}

3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088
/* set uid and gid of cgroup dirs and files to that of the creator */
static int cgroup_kn_set_ugid(struct kernfs_node *kn)
{
	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
			       .ia_uid = current_fsuid(),
			       .ia_gid = current_fsgid(), };

	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
		return 0;

	return kernfs_setattr(kn, &iattr);
}

3089
static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
3090
{
T
Tejun Heo 已提交
3091
	char name[CGROUP_FILE_NAME_MAX];
T
Tejun Heo 已提交
3092 3093
	struct kernfs_node *kn;
	struct lock_class_key *key = NULL;
3094
	int ret;
T
Tejun Heo 已提交
3095

T
Tejun Heo 已提交
3096 3097 3098 3099 3100
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	key = &cft->lockdep_key;
#endif
	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
T
Tejun Heo 已提交
3101
				  NULL, key);
3102 3103 3104 3105
	if (IS_ERR(kn))
		return PTR_ERR(kn);

	ret = cgroup_kn_set_ugid(kn);
3106
	if (ret) {
3107
		kernfs_remove(kn);
3108 3109 3110
		return ret;
	}

3111 3112 3113
	if (cft->write == cgroup_procs_write)
		cgrp->procs_kn = kn;
	else if (cft->seq_show == cgroup_populated_show)
3114
		cgrp->populated_kn = kn;
3115
	return 0;
3116 3117
}

3118 3119 3120 3121 3122 3123 3124
/**
 * cgroup_addrm_files - add or remove files to a cgroup directory
 * @cgrp: the target cgroup
 * @cfts: array of cftypes to be added
 * @is_add: whether to add or remove
 *
 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3125 3126 3127
 * For removals, this function never fails.  If addition fails, this
 * function doesn't remove files already added.  The caller is responsible
 * for cleaning up.
3128
 */
3129 3130
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add)
3131
{
A
Aristeu Rozanski 已提交
3132
	struct cftype *cft;
3133 3134
	int ret;

3135
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
3136 3137

	for (cft = cfts; cft->name[0] != '\0'; cft++) {
3138
		/* does cft->flags tell us to skip this file on @cgrp? */
3139
		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
T
Tejun Heo 已提交
3140
			continue;
3141
		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3142
			continue;
T
Tejun Heo 已提交
3143
		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3144
			continue;
T
Tejun Heo 已提交
3145
		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3146 3147
			continue;

3148
		if (is_add) {
3149
			ret = cgroup_add_file(cgrp, cft);
3150
			if (ret) {
3151 3152
				pr_warn("%s: failed to add %s, err=%d\n",
					__func__, cft->name, ret);
3153 3154
				return ret;
			}
3155 3156
		} else {
			cgroup_rm_file(cgrp, cft);
T
Tejun Heo 已提交
3157
		}
3158
	}
3159
	return 0;
3160 3161
}

3162
static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3163 3164
{
	LIST_HEAD(pending);
3165
	struct cgroup_subsys *ss = cfts[0].ss;
3166
	struct cgroup *root = &ss->root->cgrp;
3167
	struct cgroup_subsys_state *css;
3168
	int ret = 0;
3169

3170
	lockdep_assert_held(&cgroup_mutex);
3171 3172

	/* add/rm files for all cgroups created before */
3173
	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3174 3175
		struct cgroup *cgrp = css->cgroup;

3176 3177 3178
		if (cgroup_is_dead(cgrp))
			continue;

3179
		ret = cgroup_addrm_files(cgrp, cfts, is_add);
3180 3181
		if (ret)
			break;
3182
	}
3183 3184 3185

	if (is_add && !ret)
		kernfs_activate(root->kn);
3186
	return ret;
3187 3188
}

3189
static void cgroup_exit_cftypes(struct cftype *cfts)
3190
{
3191
	struct cftype *cft;
3192

T
Tejun Heo 已提交
3193 3194 3195 3196 3197
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		/* free copy for custom atomic_write_len, see init_cftypes() */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
			kfree(cft->kf_ops);
		cft->kf_ops = NULL;
3198
		cft->ss = NULL;
3199 3200

		/* revert flags set by cgroup core while adding @cfts */
3201
		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
T
Tejun Heo 已提交
3202
	}
3203 3204
}

T
Tejun Heo 已提交
3205
static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3206 3207 3208
{
	struct cftype *cft;

T
Tejun Heo 已提交
3209 3210 3211
	for (cft = cfts; cft->name[0] != '\0'; cft++) {
		struct kernfs_ops *kf_ops;

T
Tejun Heo 已提交
3212 3213
		WARN_ON(cft->ss || cft->kf_ops);

T
Tejun Heo 已提交
3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230
		if (cft->seq_start)
			kf_ops = &cgroup_kf_ops;
		else
			kf_ops = &cgroup_kf_single_ops;

		/*
		 * Ugh... if @cft wants a custom max_write_len, we need to
		 * make a copy of kf_ops to set its atomic_write_len.
		 */
		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
			if (!kf_ops) {
				cgroup_exit_cftypes(cfts);
				return -ENOMEM;
			}
			kf_ops->atomic_write_len = cft->max_write_len;
		}
3231

T
Tejun Heo 已提交
3232
		cft->kf_ops = kf_ops;
3233
		cft->ss = ss;
T
Tejun Heo 已提交
3234
	}
3235

T
Tejun Heo 已提交
3236
	return 0;
3237 3238
}

3239 3240
static int cgroup_rm_cftypes_locked(struct cftype *cfts)
{
3241
	lockdep_assert_held(&cgroup_mutex);
3242 3243 3244 3245 3246 3247 3248 3249

	if (!cfts || !cfts[0].ss)
		return -ENOENT;

	list_del(&cfts->node);
	cgroup_apply_cftypes(cfts, false);
	cgroup_exit_cftypes(cfts);
	return 0;
3250 3251
}

3252 3253 3254 3255
/**
 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
3256 3257 3258
 * Unregister @cfts.  Files described by @cfts are removed from all
 * existing cgroups and all future cgroups won't have them either.  This
 * function can be called anytime whether @cfts' subsys is attached or not.
3259 3260
 *
 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3261
 * registered.
3262
 */
3263
int cgroup_rm_cftypes(struct cftype *cfts)
3264
{
3265
	int ret;
3266

3267
	mutex_lock(&cgroup_mutex);
3268
	ret = cgroup_rm_cftypes_locked(cfts);
3269
	mutex_unlock(&cgroup_mutex);
3270
	return ret;
T
Tejun Heo 已提交
3271 3272
}

3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286
/**
 * cgroup_add_cftypes - add an array of cftypes to a subsystem
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Register @cfts to @ss.  Files described by @cfts are created for all
 * existing cgroups to which @ss is attached and all future cgroups will
 * have them too.  This function can be called anytime whether @ss is
 * attached or not.
 *
 * Returns 0 on successful registration, -errno on failure.  Note that this
 * function currently returns 0 as long as @cfts registration is successful
 * even if some file creation attempts on existing cgroups fail.
 */
3287
static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3288
{
3289
	int ret;
3290

3291 3292 3293
	if (ss->disabled)
		return 0;

3294 3295
	if (!cfts || cfts[0].name[0] == '\0')
		return 0;
3296

T
Tejun Heo 已提交
3297 3298 3299
	ret = cgroup_init_cftypes(ss, cfts);
	if (ret)
		return ret;
3300

3301
	mutex_lock(&cgroup_mutex);
3302

T
Tejun Heo 已提交
3303
	list_add_tail(&cfts->node, &ss->cfts);
3304
	ret = cgroup_apply_cftypes(cfts, true);
3305
	if (ret)
3306
		cgroup_rm_cftypes_locked(cfts);
3307

3308
	mutex_unlock(&cgroup_mutex);
3309
	return ret;
3310 3311
}

3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324
/**
 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the default hierarchy.
 */
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
	struct cftype *cft;

	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3325
		cft->flags |= __CFTYPE_ONLY_ON_DFL;
3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336
	return cgroup_add_cftypes(ss, cfts);
}

/**
 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Similar to cgroup_add_cftypes() but the added files are only used for
 * the legacy hierarchies.
 */
3337 3338
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
3339 3340
	struct cftype *cft;

3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
	/*
	 * If legacy_flies_on_dfl, we want to show the legacy files on the
	 * dfl hierarchy but iff the target subsystem hasn't been updated
	 * for the dfl hierarchy yet.
	 */
	if (!cgroup_legacy_files_on_dfl ||
	    ss->dfl_cftypes != ss->legacy_cftypes) {
		for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
			cft->flags |= __CFTYPE_NOT_ON_DFL;
	}

3352 3353 3354
	return cgroup_add_cftypes(ss, cfts);
}

L
Li Zefan 已提交
3355 3356 3357 3358 3359 3360
/**
 * cgroup_task_count - count the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 *
 * Return the number of tasks in the cgroup.
 */
3361
static int cgroup_task_count(const struct cgroup *cgrp)
3362 3363
{
	int count = 0;
3364
	struct cgrp_cset_link *link;
3365

3366
	down_read(&css_set_rwsem);
3367 3368
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
3369
	up_read(&css_set_rwsem);
3370 3371 3372
	return count;
}

3373
/**
3374
 * css_next_child - find the next child of a given css
3375 3376
 * @pos: the current position (%NULL to initiate traversal)
 * @parent: css whose children to walk
3377
 *
3378
 * This function returns the next child of @parent and should be called
3379
 * under either cgroup_mutex or RCU read lock.  The only requirement is
3380 3381 3382 3383 3384 3385 3386 3387 3388
 * that @parent and @pos are accessible.  The next sibling is guaranteed to
 * be returned regardless of their states.
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3389
 */
3390 3391
struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
					   struct cgroup_subsys_state *parent)
3392
{
3393
	struct cgroup_subsys_state *next;
3394

T
Tejun Heo 已提交
3395
	cgroup_assert_mutex_or_rcu_locked();
3396 3397

	/*
3398 3399 3400 3401 3402 3403 3404 3405 3406 3407
	 * @pos could already have been unlinked from the sibling list.
	 * Once a cgroup is removed, its ->sibling.next is no longer
	 * updated when its next sibling changes.  CSS_RELEASED is set when
	 * @pos is taken off list, at which time its next pointer is valid,
	 * and, as releases are serialized, the one pointed to by the next
	 * pointer is guaranteed to not have started release yet.  This
	 * implies that if we observe !CSS_RELEASED on @pos in this RCU
	 * critical section, the one pointed to by its next pointer is
	 * guaranteed to not have finished its RCU grace period even if we
	 * have dropped rcu_read_lock() inbetween iterations.
3408
	 *
3409 3410 3411 3412 3413 3414 3415
	 * If @pos has CSS_RELEASED set, its next pointer can't be
	 * dereferenced; however, as each css is given a monotonically
	 * increasing unique serial number and always appended to the
	 * sibling list, the next one can be found by walking the parent's
	 * children until the first css with higher serial number than
	 * @pos's.  While this path can be slower, it happens iff iteration
	 * races against release and the race window is very small.
3416
	 */
3417
	if (!pos) {
3418 3419 3420
		next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
	} else if (likely(!(pos->flags & CSS_RELEASED))) {
		next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3421
	} else {
3422
		list_for_each_entry_rcu(next, &parent->children, sibling)
3423 3424
			if (next->serial_nr > pos->serial_nr)
				break;
3425 3426
	}

3427 3428
	/*
	 * @next, if not pointing to the head, can be dereferenced and is
3429
	 * the next sibling.
3430
	 */
3431 3432
	if (&next->sibling != &parent->children)
		return next;
3433
	return NULL;
3434 3435
}

3436
/**
3437
 * css_next_descendant_pre - find the next descendant for pre-order walk
3438
 * @pos: the current position (%NULL to initiate traversal)
3439
 * @root: css whose descendants to walk
3440
 *
3441
 * To be used by css_for_each_descendant_pre().  Find the next descendant
3442 3443
 * to visit for pre-order traversal of @root's descendants.  @root is
 * included in the iteration and the first node to be visited.
3444
 *
3445 3446 3447 3448
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @root are accessible and @pos is a descendant of @root.
3449 3450 3451 3452 3453 3454 3455
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3456
 */
3457 3458 3459
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
			struct cgroup_subsys_state *root)
3460
{
3461
	struct cgroup_subsys_state *next;
3462

T
Tejun Heo 已提交
3463
	cgroup_assert_mutex_or_rcu_locked();
3464

3465
	/* if first iteration, visit @root */
3466
	if (!pos)
3467
		return root;
3468 3469

	/* visit the first child if exists */
3470
	next = css_next_child(NULL, pos);
3471 3472 3473 3474
	if (next)
		return next;

	/* no child, visit my or the closest ancestor's next sibling */
3475
	while (pos != root) {
T
Tejun Heo 已提交
3476
		next = css_next_child(pos, pos->parent);
3477
		if (next)
3478
			return next;
T
Tejun Heo 已提交
3479
		pos = pos->parent;
3480
	}
3481 3482 3483 3484

	return NULL;
}

3485
/**
3486 3487
 * css_rightmost_descendant - return the rightmost descendant of a css
 * @pos: css of interest
3488
 *
3489 3490
 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
 * is returned.  This can be used during pre-order traversal to skip
3491
 * subtree of @pos.
3492
 *
3493 3494 3495 3496
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct rightmost descendant as
 * long as @pos is accessible.
3497
 */
3498 3499
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
3500
{
3501
	struct cgroup_subsys_state *last, *tmp;
3502

T
Tejun Heo 已提交
3503
	cgroup_assert_mutex_or_rcu_locked();
3504 3505 3506 3507 3508

	do {
		last = pos;
		/* ->prev isn't RCU safe, walk ->next till the end */
		pos = NULL;
3509
		css_for_each_child(tmp, last)
3510 3511 3512 3513 3514 3515
			pos = tmp;
	} while (pos);

	return last;
}

3516 3517
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
3518
{
3519
	struct cgroup_subsys_state *last;
3520 3521 3522

	do {
		last = pos;
3523
		pos = css_next_child(NULL, pos);
3524 3525 3526 3527 3528 3529
	} while (pos);

	return last;
}

/**
3530
 * css_next_descendant_post - find the next descendant for post-order walk
3531
 * @pos: the current position (%NULL to initiate traversal)
3532
 * @root: css whose descendants to walk
3533
 *
3534
 * To be used by css_for_each_descendant_post().  Find the next descendant
3535 3536
 * to visit for post-order traversal of @root's descendants.  @root is
 * included in the iteration and the last node to be visited.
3537
 *
3538 3539 3540 3541 3542
 * While this function requires cgroup_mutex or RCU read locking, it
 * doesn't require the whole traversal to be contained in a single critical
 * section.  This function will return the correct next descendant as long
 * as both @pos and @cgroup are accessible and @pos is a descendant of
 * @cgroup.
3543 3544 3545 3546 3547 3548 3549
 *
 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 * css which finished ->css_online() is guaranteed to be visible in the
 * future iterations and will stay visible until the last reference is put.
 * A css which hasn't finished ->css_online() or already finished
 * ->css_offline() may show up during traversal.  It's each subsystem's
 * responsibility to synchronize against on/offlining.
3550
 */
3551 3552 3553
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
			 struct cgroup_subsys_state *root)
3554
{
3555
	struct cgroup_subsys_state *next;
3556

T
Tejun Heo 已提交
3557
	cgroup_assert_mutex_or_rcu_locked();
3558

3559 3560 3561
	/* if first iteration, visit leftmost descendant which may be @root */
	if (!pos)
		return css_leftmost_descendant(root);
3562

3563 3564 3565 3566
	/* if we visited @root, we're done */
	if (pos == root)
		return NULL;

3567
	/* if there's an unvisited sibling, visit its leftmost descendant */
T
Tejun Heo 已提交
3568
	next = css_next_child(pos, pos->parent);
3569
	if (next)
3570
		return css_leftmost_descendant(next);
3571 3572

	/* no sibling left, visit parent */
T
Tejun Heo 已提交
3573
	return pos->parent;
3574 3575
}

3576 3577 3578 3579 3580 3581 3582 3583 3584
/**
 * css_has_online_children - does a css have online children
 * @css: the target css
 *
 * Returns %true if @css has any online children; otherwise, %false.  This
 * function can be called from any context but the caller is responsible
 * for synchronizing against on/offlining as necessary.
 */
bool css_has_online_children(struct cgroup_subsys_state *css)
3585
{
3586 3587
	struct cgroup_subsys_state *child;
	bool ret = false;
3588 3589

	rcu_read_lock();
3590
	css_for_each_child(child, css) {
3591
		if (child->flags & CSS_ONLINE) {
3592 3593
			ret = true;
			break;
3594 3595 3596
		}
	}
	rcu_read_unlock();
3597
	return ret;
3598 3599
}

3600
/**
3601
 * css_advance_task_iter - advance a task itererator to the next css_set
3602 3603 3604
 * @it: the iterator to advance
 *
 * Advance @it to the next css_set to walk.
3605
 */
3606
static void css_advance_task_iter(struct css_task_iter *it)
3607
{
T
Tejun Heo 已提交
3608
	struct list_head *l = it->cset_pos;
3609 3610 3611 3612 3613 3614
	struct cgrp_cset_link *link;
	struct css_set *cset;

	/* Advance to the next non-empty css_set */
	do {
		l = l->next;
T
Tejun Heo 已提交
3615 3616
		if (l == it->cset_head) {
			it->cset_pos = NULL;
3617 3618
			return;
		}
3619 3620 3621 3622 3623 3624 3625 3626

		if (it->ss) {
			cset = container_of(l, struct css_set,
					    e_cset_node[it->ss->id]);
		} else {
			link = list_entry(l, struct cgrp_cset_link, cset_link);
			cset = link->cset;
		}
T
Tejun Heo 已提交
3627 3628
	} while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));

T
Tejun Heo 已提交
3629
	it->cset_pos = l;
T
Tejun Heo 已提交
3630 3631

	if (!list_empty(&cset->tasks))
T
Tejun Heo 已提交
3632
		it->task_pos = cset->tasks.next;
T
Tejun Heo 已提交
3633
	else
T
Tejun Heo 已提交
3634 3635 3636 3637
		it->task_pos = cset->mg_tasks.next;

	it->tasks_head = &cset->tasks;
	it->mg_tasks_head = &cset->mg_tasks;
3638 3639
}

3640
/**
3641 3642
 * css_task_iter_start - initiate task iteration
 * @css: the css to walk tasks of
3643 3644
 * @it: the task iterator to use
 *
3645 3646 3647 3648
 * Initiate iteration through the tasks of @css.  The caller can call
 * css_task_iter_next() to walk through the tasks until the function
 * returns NULL.  On completion of iteration, css_task_iter_end() must be
 * called.
3649 3650 3651 3652 3653
 *
 * Note that this function acquires a lock which is released when the
 * iteration finishes.  The caller can't sleep while iteration is in
 * progress.
 */
3654 3655
void css_task_iter_start(struct cgroup_subsys_state *css,
			 struct css_task_iter *it)
3656
	__acquires(css_set_rwsem)
3657
{
3658 3659
	/* no one should try to iterate before mounting cgroups */
	WARN_ON_ONCE(!use_task_css_set_links);
3660

3661
	down_read(&css_set_rwsem);
3662

3663 3664 3665 3666 3667 3668 3669
	it->ss = css->ss;

	if (it->ss)
		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
	else
		it->cset_pos = &css->cgroup->cset_links;

T
Tejun Heo 已提交
3670
	it->cset_head = it->cset_pos;
3671

3672
	css_advance_task_iter(it);
3673 3674
}

3675
/**
3676
 * css_task_iter_next - return the next task for the iterator
3677 3678 3679
 * @it: the task iterator being iterated
 *
 * The "next" function for task iteration.  @it should have been
3680 3681
 * initialized via css_task_iter_start().  Returns NULL when the iteration
 * reaches the end.
3682
 */
3683
struct task_struct *css_task_iter_next(struct css_task_iter *it)
3684 3685
{
	struct task_struct *res;
T
Tejun Heo 已提交
3686
	struct list_head *l = it->task_pos;
3687 3688

	/* If the iterator cg is NULL, we have no tasks */
T
Tejun Heo 已提交
3689
	if (!it->cset_pos)
3690 3691
		return NULL;
	res = list_entry(l, struct task_struct, cg_list);
T
Tejun Heo 已提交
3692 3693 3694 3695 3696 3697

	/*
	 * Advance iterator to find next entry.  cset->tasks is consumed
	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
	 * next cset.
	 */
3698
	l = l->next;
T
Tejun Heo 已提交
3699

T
Tejun Heo 已提交
3700 3701
	if (l == it->tasks_head)
		l = it->mg_tasks_head->next;
T
Tejun Heo 已提交
3702

T
Tejun Heo 已提交
3703
	if (l == it->mg_tasks_head)
3704
		css_advance_task_iter(it);
T
Tejun Heo 已提交
3705
	else
T
Tejun Heo 已提交
3706
		it->task_pos = l;
T
Tejun Heo 已提交
3707

3708 3709 3710
	return res;
}

3711
/**
3712
 * css_task_iter_end - finish task iteration
3713 3714
 * @it: the task iterator to finish
 *
3715
 * Finish task iteration started by css_task_iter_start().
3716
 */
3717
void css_task_iter_end(struct css_task_iter *it)
3718
	__releases(css_set_rwsem)
3719
{
3720
	up_read(&css_set_rwsem);
3721 3722 3723
}

/**
3724 3725 3726
 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
3727
 *
3728 3729 3730 3731 3732
 * Locking rules between cgroup_post_fork() and the migration path
 * guarantee that, if a task is forking while being migrated, the new child
 * is guaranteed to be either visible in the source cgroup after the
 * parent's migration is complete or put into the target cgroup.  No task
 * can slip out of migration through forking.
3733
 */
3734
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
3735
{
3736 3737
	LIST_HEAD(preloaded_csets);
	struct cgrp_cset_link *link;
3738
	struct css_task_iter it;
3739
	struct task_struct *task;
3740
	int ret;
3741

3742
	mutex_lock(&cgroup_mutex);
3743

3744 3745 3746 3747 3748
	/* all tasks in @from are being moved, all csets are source */
	down_read(&css_set_rwsem);
	list_for_each_entry(link, &from->cset_links, cset_link)
		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
	up_read(&css_set_rwsem);
3749

3750 3751 3752
	ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
	if (ret)
		goto out_err;
3753

3754 3755 3756 3757
	/*
	 * Migrate tasks one-by-one until @form is empty.  This fails iff
	 * ->can_attach() fails.
	 */
3758
	do {
3759
		css_task_iter_start(&from->self, &it);
3760 3761 3762 3763 3764 3765
		task = css_task_iter_next(&it);
		if (task)
			get_task_struct(task);
		css_task_iter_end(&it);

		if (task) {
3766
			ret = cgroup_migrate(to, task, false);
3767 3768 3769
			put_task_struct(task);
		}
	} while (task && !ret);
3770 3771
out_err:
	cgroup_migrate_finish(&preloaded_csets);
T
Tejun Heo 已提交
3772
	mutex_unlock(&cgroup_mutex);
3773
	return ret;
3774 3775
}

3776
/*
3777
 * Stuff for reading the 'tasks'/'procs' files.
3778 3779 3780 3781 3782 3783 3784 3785
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811
/* which pidlist file are we talking about? */
enum cgroup_filetype {
	CGROUP_FILE_PROCS,
	CGROUP_FILE_TASKS,
};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {
	/*
	 * used to find which pidlist is wanted. doesn't change as long as
	 * this particular list stays in the list.
	*/
	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
	/* array of xids */
	pid_t *list;
	/* how many elements the above list has */
	int length;
	/* each of these stored in a list by its cgroup */
	struct list_head links;
	/* pointer to the cgroup we belong to, for list removal purposes */
	struct cgroup *owner;
3812 3813
	/* for delayed destruction */
	struct delayed_work destroy_dwork;
3814 3815
};

3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
/*
 * The following two functions "fix" the issue where there are more pids
 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 * TODO: replace with a kernel-wide solution to this problem
 */
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(count * sizeof(pid_t));
	else
		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
3829

3830 3831
static void pidlist_free(void *p)
{
3832
	kvfree(p);
3833 3834
}

3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861
/*
 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 * should be left afterwards.
 */
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
{
	struct cgroup_pidlist *l, *tmp_l;

	mutex_lock(&cgrp->pidlist_mutex);
	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
	mutex_unlock(&cgrp->pidlist_mutex);

	flush_workqueue(cgroup_pidlist_destroy_wq);
	BUG_ON(!list_empty(&cgrp->pidlists));
}

static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
						destroy_dwork);
	struct cgroup_pidlist *tofree = NULL;

	mutex_lock(&l->owner->pidlist_mutex);

	/*
3862 3863
	 * Destroy iff we didn't get queued again.  The state won't change
	 * as destroy_dwork can only be queued while locked.
3864
	 */
3865
	if (!delayed_work_pending(dwork)) {
3866 3867 3868 3869 3870 3871 3872 3873 3874 3875
		list_del(&l->links);
		pidlist_free(l->list);
		put_pid_ns(l->key.ns);
		tofree = l;
	}

	mutex_unlock(&l->owner->pidlist_mutex);
	kfree(tofree);
}

3876
/*
3877
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3878
 * Returns the number of unique elements.
3879
 */
3880
static int pidlist_uniq(pid_t *list, int length)
3881
{
3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905
	int src, dest = 1;

	/*
	 * we presume the 0th element is unique, so i starts at 1. trivial
	 * edge cases first; no work needs to be done for either
	 */
	if (length == 0 || length == 1)
		return length;
	/* src and dest walk down the list; dest counts unique elements */
	for (src = 1; src < length; src++) {
		/* find next unique element */
		while (list[src] == list[src-1]) {
			src++;
			if (src == length)
				goto after;
		}
		/* dest always points to where the next unique element goes */
		list[dest] = list[src];
		dest++;
	}
after:
	return dest;
}

3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916
/*
 * The two pid files - task and cgroup.procs - guaranteed that the result
 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 * different per namespace, each namespace needs differently sorted list,
 * making it impossible to use, for example, single rbtree of member tasks
 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 * per open file is dangerous, so cgroup had to implement shared pool of
 * pidlists keyed by cgroup and namespace.
 *
 * All this extra complexity was caused by the original implementation
 * committing to an entirely unnecessary property.  In the long term, we
3917 3918 3919
 * want to do away with it.  Explicitly scramble sort order if on the
 * default hierarchy so that no such expectation exists in the new
 * interface.
3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933
 *
 * Scrambling is done by swapping every two consecutive bits, which is
 * non-identity one-to-one mapping which disturbs sort order sufficiently.
 */
static pid_t pid_fry(pid_t pid)
{
	unsigned a = pid & 0x55555555;
	unsigned b = pid & 0xAAAAAAAA;

	return (a << 1) | (b >> 1);
}

static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
{
3934
	if (cgroup_on_dfl(cgrp))
3935 3936 3937 3938 3939
		return pid_fry(pid);
	else
		return pid;
}

3940 3941 3942 3943 3944
static int cmppid(const void *a, const void *b)
{
	return *(pid_t *)a - *(pid_t *)b;
}

3945 3946 3947 3948 3949
static int fried_cmppid(const void *a, const void *b)
{
	return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
}

T
Tejun Heo 已提交
3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{
	struct cgroup_pidlist *l;
	/* don't need task_nsproxy() if we're looking at ourself */
	struct pid_namespace *ns = task_active_pid_ns(current);

	lockdep_assert_held(&cgrp->pidlist_mutex);

	list_for_each_entry(l, &cgrp->pidlists, links)
		if (l->key.type == type && l->key.ns == ns)
			return l;
	return NULL;
}

3965 3966 3967 3968 3969 3970
/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
T
Tejun Heo 已提交
3971 3972
static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
						enum cgroup_filetype type)
3973 3974
{
	struct cgroup_pidlist *l;
3975

T
Tejun Heo 已提交
3976 3977 3978 3979 3980 3981
	lockdep_assert_held(&cgrp->pidlist_mutex);

	l = cgroup_pidlist_find(cgrp, type);
	if (l)
		return l;

3982
	/* entry not found; create a new one */
3983
	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
T
Tejun Heo 已提交
3984
	if (!l)
3985
		return l;
T
Tejun Heo 已提交
3986

3987
	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
3988
	l->key.type = type;
T
Tejun Heo 已提交
3989 3990
	/* don't need task_nsproxy() if we're looking at ourself */
	l->key.ns = get_pid_ns(task_active_pid_ns(current));
3991 3992 3993 3994 3995
	l->owner = cgrp;
	list_add(&l->links, &cgrp->pidlists);
	return l;
}

3996 3997 3998
/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
3999 4000
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
4001 4002 4003 4004
{
	pid_t *array;
	int length;
	int pid, n = 0; /* used for populating the array */
4005
	struct css_task_iter it;
4006
	struct task_struct *tsk;
4007 4008
	struct cgroup_pidlist *l;

4009 4010
	lockdep_assert_held(&cgrp->pidlist_mutex);

4011 4012 4013 4014 4015 4016 4017
	/*
	 * If cgroup gets more users after we read count, we won't have
	 * enough space - tough.  This race is indistinguishable to the
	 * caller from the case that the additional cgroup users didn't
	 * show up until sometime later on.
	 */
	length = cgroup_task_count(cgrp);
4018
	array = pidlist_allocate(length);
4019 4020 4021
	if (!array)
		return -ENOMEM;
	/* now, populate the array */
4022
	css_task_iter_start(&cgrp->self, &it);
4023
	while ((tsk = css_task_iter_next(&it))) {
4024
		if (unlikely(n == length))
4025
			break;
4026
		/* get tgid or pid for procs or tasks file respectively */
4027 4028 4029 4030
		if (type == CGROUP_FILE_PROCS)
			pid = task_tgid_vnr(tsk);
		else
			pid = task_pid_vnr(tsk);
4031 4032
		if (pid > 0) /* make sure to only use valid results */
			array[n++] = pid;
4033
	}
4034
	css_task_iter_end(&it);
4035 4036
	length = n;
	/* now sort & (if procs) strip out duplicates */
4037
	if (cgroup_on_dfl(cgrp))
4038 4039 4040
		sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
	else
		sort(array, length, sizeof(pid_t), cmppid, NULL);
4041
	if (type == CGROUP_FILE_PROCS)
4042
		length = pidlist_uniq(array, length);
T
Tejun Heo 已提交
4043 4044

	l = cgroup_pidlist_find_create(cgrp, type);
4045
	if (!l) {
4046
		pidlist_free(array);
4047
		return -ENOMEM;
4048
	}
T
Tejun Heo 已提交
4049 4050

	/* store array, freeing old if necessary */
4051
	pidlist_free(l->list);
4052 4053
	l->list = array;
	l->length = length;
4054
	*lp = l;
4055
	return 0;
4056 4057
}

B
Balbir Singh 已提交
4058
/**
L
Li Zefan 已提交
4059
 * cgroupstats_build - build and fill cgroupstats
B
Balbir Singh 已提交
4060 4061 4062
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
L
Li Zefan 已提交
4063 4064 4065
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
B
Balbir Singh 已提交
4066 4067 4068
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
T
Tejun Heo 已提交
4069
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4070
	struct cgroup *cgrp;
4071
	struct css_task_iter it;
B
Balbir Singh 已提交
4072
	struct task_struct *tsk;
4073

T
Tejun Heo 已提交
4074 4075 4076 4077 4078
	/* it should be kernfs_node belonging to cgroupfs and is a directory */
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
		return -EINVAL;

4079 4080
	mutex_lock(&cgroup_mutex);

B
Balbir Singh 已提交
4081
	/*
T
Tejun Heo 已提交
4082
	 * We aren't being called from kernfs and there's no guarantee on
4083
	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
T
Tejun Heo 已提交
4084
	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
B
Balbir Singh 已提交
4085
	 */
T
Tejun Heo 已提交
4086 4087
	rcu_read_lock();
	cgrp = rcu_dereference(kn->priv);
4088
	if (!cgrp || cgroup_is_dead(cgrp)) {
T
Tejun Heo 已提交
4089
		rcu_read_unlock();
4090
		mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4091 4092
		return -ENOENT;
	}
4093
	rcu_read_unlock();
B
Balbir Singh 已提交
4094

4095
	css_task_iter_start(&cgrp->self, &it);
4096
	while ((tsk = css_task_iter_next(&it))) {
B
Balbir Singh 已提交
4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115
		switch (tsk->state) {
		case TASK_RUNNING:
			stats->nr_running++;
			break;
		case TASK_INTERRUPTIBLE:
			stats->nr_sleeping++;
			break;
		case TASK_UNINTERRUPTIBLE:
			stats->nr_uninterruptible++;
			break;
		case TASK_STOPPED:
			stats->nr_stopped++;
			break;
		default:
			if (delayacct_is_task_waiting_on_io(tsk))
				stats->nr_io_wait++;
			break;
		}
	}
4116
	css_task_iter_end(&it);
B
Balbir Singh 已提交
4117

4118
	mutex_unlock(&cgroup_mutex);
T
Tejun Heo 已提交
4119
	return 0;
B
Balbir Singh 已提交
4120 4121
}

4122

4123
/*
4124
 * seq_file methods for the tasks/procs files. The seq_file position is the
4125
 * next pid to display; the seq_file iterator is a pointer to the pid
4126
 * in the cgroup->l->list array.
4127
 */
4128

4129
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
4130
{
4131 4132 4133 4134 4135 4136
	/*
	 * Initially we receive a position value that corresponds to
	 * one more than the last pid shown (or 0 on the first call or
	 * after a seek to the start). Use a binary-search to find the
	 * next pid to display, if any
	 */
T
Tejun Heo 已提交
4137
	struct kernfs_open_file *of = s->private;
4138
	struct cgroup *cgrp = seq_css(s)->cgroup;
4139
	struct cgroup_pidlist *l;
4140
	enum cgroup_filetype type = seq_cft(s)->private;
4141
	int index = 0, pid = *pos;
4142 4143 4144 4145 4146
	int *iter, ret;

	mutex_lock(&cgrp->pidlist_mutex);

	/*
4147
	 * !NULL @of->priv indicates that this isn't the first start()
4148
	 * after open.  If the matching pidlist is around, we can use that.
4149
	 * Look for it.  Note that @of->priv can't be used directly.  It
4150 4151
	 * could already have been destroyed.
	 */
4152 4153
	if (of->priv)
		of->priv = cgroup_pidlist_find(cgrp, type);
4154 4155 4156 4157 4158

	/*
	 * Either this is the first start() after open or the matching
	 * pidlist has been destroyed inbetween.  Create a new one.
	 */
4159 4160 4161
	if (!of->priv) {
		ret = pidlist_array_load(cgrp, type,
					 (struct cgroup_pidlist **)&of->priv);
4162 4163 4164
		if (ret)
			return ERR_PTR(ret);
	}
4165
	l = of->priv;
4166 4167

	if (pid) {
4168
		int end = l->length;
S
Stephen Rothwell 已提交
4169

4170 4171
		while (index < end) {
			int mid = (index + end) / 2;
4172
			if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
4173 4174
				index = mid;
				break;
4175
			} else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
4176 4177 4178 4179 4180 4181
				index = mid + 1;
			else
				end = mid;
		}
	}
	/* If we're off the end of the array, we're done */
4182
	if (index >= l->length)
4183 4184
		return NULL;
	/* Update the abstract position to be the actual pid that we found */
4185
	iter = l->list + index;
4186
	*pos = cgroup_pid_fry(cgrp, *iter);
4187 4188 4189
	return iter;
}

4190
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
4191
{
T
Tejun Heo 已提交
4192
	struct kernfs_open_file *of = s->private;
4193
	struct cgroup_pidlist *l = of->priv;
4194

4195 4196
	if (l)
		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
4197
				 CGROUP_PIDLIST_DESTROY_DELAY);
4198
	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
4199 4200
}

4201
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4202
{
T
Tejun Heo 已提交
4203
	struct kernfs_open_file *of = s->private;
4204
	struct cgroup_pidlist *l = of->priv;
4205 4206
	pid_t *p = v;
	pid_t *end = l->list + l->length;
4207 4208 4209 4210 4211 4212 4213 4214
	/*
	 * Advance to the next pid in the array. If this goes off the
	 * end, we're done
	 */
	p++;
	if (p >= end) {
		return NULL;
	} else {
4215
		*pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
4216 4217 4218 4219
		return p;
	}
}

4220
static int cgroup_pidlist_show(struct seq_file *s, void *v)
4221
{
4222 4223 4224
	seq_printf(s, "%d\n", *(int *)v);

	return 0;
4225
}
4226

4227 4228
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
4229
{
4230
	return notify_on_release(css->cgroup);
4231 4232
}

4233 4234
static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
4235 4236
{
	if (val)
4237
		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4238
	else
4239
		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4240 4241 4242
	return 0;
}

4243 4244
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
4245
{
4246
	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4247 4248
}

4249 4250
static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
4251 4252
{
	if (val)
4253
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4254
	else
4255
		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4256 4257 4258
	return 0;
}

4259 4260
/* cgroup core interface files for the default hierarchy */
static struct cftype cgroup_dfl_base_files[] = {
4261
	{
4262
		.name = "cgroup.procs",
4263 4264 4265 4266
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4267
		.private = CGROUP_FILE_PROCS,
4268
		.write = cgroup_procs_write,
B
Ben Blum 已提交
4269
		.mode = S_IRUGO | S_IWUSR,
4270
	},
4271 4272
	{
		.name = "cgroup.controllers",
4273
		.flags = CFTYPE_ONLY_ON_ROOT,
4274 4275 4276 4277
		.seq_show = cgroup_root_controllers_show,
	},
	{
		.name = "cgroup.controllers",
4278
		.flags = CFTYPE_NOT_ON_ROOT,
4279 4280 4281 4282 4283
		.seq_show = cgroup_controllers_show,
	},
	{
		.name = "cgroup.subtree_control",
		.seq_show = cgroup_subtree_control_show,
4284
		.write = cgroup_subtree_control_write,
4285
	},
4286 4287
	{
		.name = "cgroup.populated",
4288
		.flags = CFTYPE_NOT_ON_ROOT,
4289 4290
		.seq_show = cgroup_populated_show,
	},
4291 4292
	{ }	/* terminate */
};
4293

4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315
/* cgroup core interface files for the legacy hierarchies */
static struct cftype cgroup_legacy_base_files[] = {
	{
		.name = "cgroup.procs",
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
		.private = CGROUP_FILE_PROCS,
		.write = cgroup_procs_write,
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "cgroup.clone_children",
		.read_u64 = cgroup_clone_children_read,
		.write_u64 = cgroup_clone_children_write,
	},
	{
		.name = "cgroup.sane_behavior",
		.flags = CFTYPE_ONLY_ON_ROOT,
		.seq_show = cgroup_sane_behavior_show,
	},
4316 4317
	{
		.name = "tasks",
4318 4319 4320 4321
		.seq_start = cgroup_pidlist_start,
		.seq_next = cgroup_pidlist_next,
		.seq_stop = cgroup_pidlist_stop,
		.seq_show = cgroup_pidlist_show,
4322
		.private = CGROUP_FILE_TASKS,
4323
		.write = cgroup_tasks_write,
4324 4325 4326 4327 4328 4329 4330
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "notify_on_release",
		.read_u64 = cgroup_read_notify_on_release,
		.write_u64 = cgroup_write_notify_on_release,
	},
4331 4332
	{
		.name = "release_agent",
4333
		.flags = CFTYPE_ONLY_ON_ROOT,
4334
		.seq_show = cgroup_release_agent_show,
4335
		.write = cgroup_release_agent_write,
4336
		.max_write_len = PATH_MAX - 1,
4337
	},
T
Tejun Heo 已提交
4338
	{ }	/* terminate */
4339 4340
};

4341
/**
4342
 * cgroup_populate_dir - create subsys files in a cgroup directory
4343 4344
 * @cgrp: target cgroup
 * @subsys_mask: mask of the subsystem ids whose files should be added
4345 4346
 *
 * On failure, no file is added.
4347
 */
4348
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
4349 4350
{
	struct cgroup_subsys *ss;
4351
	int i, ret = 0;
4352

4353
	/* process cftsets of each subsystem */
4354
	for_each_subsys(ss, i) {
T
Tejun Heo 已提交
4355
		struct cftype *cfts;
4356

4357
		if (!(subsys_mask & (1 << i)))
4358
			continue;
4359

T
Tejun Heo 已提交
4360 4361
		list_for_each_entry(cfts, &ss->cfts, node) {
			ret = cgroup_addrm_files(cgrp, cfts, true);
4362 4363 4364
			if (ret < 0)
				goto err;
		}
4365 4366
	}
	return 0;
4367 4368 4369
err:
	cgroup_clear_dir(cgrp, subsys_mask);
	return ret;
4370 4371
}

4372 4373 4374 4375 4376 4377 4378
/*
 * css destruction is four-stage process.
 *
 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
 *    Implemented in kill_css().
 *
 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
4379 4380 4381
 *    and thus css_tryget_online() is guaranteed to fail, the css can be
 *    offlined by invoking offline_css().  After offlining, the base ref is
 *    put.  Implemented in css_killed_work_fn().
4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393
 *
 * 3. When the percpu_ref reaches zero, the only possible remaining
 *    accessors are inside RCU read sections.  css_release() schedules the
 *    RCU callback.
 *
 * 4. After the grace period, the css can be freed.  Implemented in
 *    css_free_work_fn().
 *
 * It is actually hairier because both step 2 and 4 require process context
 * and thus involve punting to css->destroy_work adding two additional
 * steps to the already complex sequence.
 */
4394
static void css_free_work_fn(struct work_struct *work)
4395 4396
{
	struct cgroup_subsys_state *css =
4397
		container_of(work, struct cgroup_subsys_state, destroy_work);
4398
	struct cgroup_subsys *ss = css->ss;
4399
	struct cgroup *cgrp = css->cgroup;
4400

4401 4402
	percpu_ref_exit(&css->refcnt);

4403
	if (ss) {
4404
		/* css free path */
4405 4406
		int id = css->id;

4407 4408
		if (css->parent)
			css_put(css->parent);
4409

4410 4411
		ss->css_free(css);
		cgroup_idr_remove(&ss->css_idr, id);
4412 4413 4414 4415 4416
		cgroup_put(cgrp);
	} else {
		/* cgroup free path */
		atomic_dec(&cgrp->root->nr_cgrps);
		cgroup_pidlist_destroy_all(cgrp);
4417
		cancel_work_sync(&cgrp->release_agent_work);
4418

T
Tejun Heo 已提交
4419
		if (cgroup_parent(cgrp)) {
4420 4421 4422 4423 4424 4425
			/*
			 * We get a ref to the parent, and put the ref when
			 * this cgroup is being freed, so it's guaranteed
			 * that the parent won't be destroyed before its
			 * children.
			 */
T
Tejun Heo 已提交
4426
			cgroup_put(cgroup_parent(cgrp));
4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437
			kernfs_put(cgrp->kn);
			kfree(cgrp);
		} else {
			/*
			 * This is root cgroup's refcnt reaching zero,
			 * which indicates that the root should be
			 * released.
			 */
			cgroup_destroy_root(cgrp->root);
		}
	}
4438 4439
}

4440
static void css_free_rcu_fn(struct rcu_head *rcu_head)
4441 4442
{
	struct cgroup_subsys_state *css =
4443
		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
4444

4445
	INIT_WORK(&css->destroy_work, css_free_work_fn);
4446
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4447 4448
}

4449
static void css_release_work_fn(struct work_struct *work)
4450 4451
{
	struct cgroup_subsys_state *css =
4452
		container_of(work, struct cgroup_subsys_state, destroy_work);
4453
	struct cgroup_subsys *ss = css->ss;
4454
	struct cgroup *cgrp = css->cgroup;
4455

4456 4457
	mutex_lock(&cgroup_mutex);

4458
	css->flags |= CSS_RELEASED;
4459 4460
	list_del_rcu(&css->sibling);

4461 4462
	if (ss) {
		/* css release path */
4463
		cgroup_idr_replace(&ss->css_idr, NULL, css->id);
4464 4465
		if (ss->css_released)
			ss->css_released(css);
4466 4467 4468 4469
	} else {
		/* cgroup release path */
		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
		cgrp->id = -1;
4470 4471 4472 4473 4474 4475 4476 4477 4478

		/*
		 * There are two control paths which try to determine
		 * cgroup from dentry without going through kernfs -
		 * cgroupstats_build() and css_tryget_online_from_dir().
		 * Those are supported by RCU protecting clearing of
		 * cgrp->kn->priv backpointer.
		 */
		RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
4479
	}
4480

4481 4482
	mutex_unlock(&cgroup_mutex);

4483
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4484 4485 4486 4487 4488 4489 4490
}

static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4491 4492
	INIT_WORK(&css->destroy_work, css_release_work_fn);
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4493 4494
}

4495 4496
static void init_and_link_css(struct cgroup_subsys_state *css,
			      struct cgroup_subsys *ss, struct cgroup *cgrp)
4497
{
4498 4499
	lockdep_assert_held(&cgroup_mutex);

4500 4501
	cgroup_get(cgrp);

4502
	memset(css, 0, sizeof(*css));
4503
	css->cgroup = cgrp;
4504
	css->ss = ss;
4505 4506
	INIT_LIST_HEAD(&css->sibling);
	INIT_LIST_HEAD(&css->children);
4507
	css->serial_nr = css_serial_nr_next++;
4508

T
Tejun Heo 已提交
4509 4510
	if (cgroup_parent(cgrp)) {
		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
4511 4512
		css_get(css->parent);
	}
4513

4514
	BUG_ON(cgroup_css(cgrp, ss));
4515 4516
}

4517
/* invoke ->css_online() on a new CSS and mark it online if successful */
4518
static int online_css(struct cgroup_subsys_state *css)
4519
{
4520
	struct cgroup_subsys *ss = css->ss;
T
Tejun Heo 已提交
4521 4522
	int ret = 0;

4523 4524
	lockdep_assert_held(&cgroup_mutex);

4525
	if (ss->css_online)
4526
		ret = ss->css_online(css);
4527
	if (!ret) {
4528
		css->flags |= CSS_ONLINE;
4529
		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4530
	}
T
Tejun Heo 已提交
4531
	return ret;
4532 4533
}

4534
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4535
static void offline_css(struct cgroup_subsys_state *css)
4536
{
4537
	struct cgroup_subsys *ss = css->ss;
4538 4539 4540 4541 4542 4543

	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

4544
	if (ss->css_offline)
4545
		ss->css_offline(css);
4546

4547
	css->flags &= ~CSS_ONLINE;
4548
	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4549 4550

	wake_up_all(&css->cgroup->offline_waitq);
4551 4552
}

4553 4554 4555 4556
/**
 * create_css - create a cgroup_subsys_state
 * @cgrp: the cgroup new css will be associated with
 * @ss: the subsys of new css
4557
 * @visible: whether to create control knobs for the new css or not
4558 4559
 *
 * Create a new css associated with @cgrp - @ss pair.  On success, the new
4560 4561
 * css is online and installed in @cgrp with all interface files created if
 * @visible.  Returns 0 on success, -errno on failure.
4562
 */
4563 4564
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
		      bool visible)
4565
{
T
Tejun Heo 已提交
4566
	struct cgroup *parent = cgroup_parent(cgrp);
4567
	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
4568 4569 4570 4571 4572
	struct cgroup_subsys_state *css;
	int err;

	lockdep_assert_held(&cgroup_mutex);

4573
	css = ss->css_alloc(parent_css);
4574 4575 4576
	if (IS_ERR(css))
		return PTR_ERR(css);

4577
	init_and_link_css(css, ss, cgrp);
4578

4579
	err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
4580
	if (err)
4581
		goto err_free_css;
4582

4583 4584 4585 4586
	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
	if (err < 0)
		goto err_free_percpu_ref;
	css->id = err;
4587

4588 4589 4590 4591 4592
	if (visible) {
		err = cgroup_populate_dir(cgrp, 1 << ss->id);
		if (err)
			goto err_free_id;
	}
4593 4594

	/* @css is ready to be brought online now, make it visible */
4595
	list_add_tail_rcu(&css->sibling, &parent_css->children);
4596
	cgroup_idr_replace(&ss->css_idr, css, css->id);
4597 4598 4599

	err = online_css(css);
	if (err)
4600
		goto err_list_del;
4601

4602
	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
T
Tejun Heo 已提交
4603
	    cgroup_parent(parent)) {
4604
		pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4605
			current->comm, current->pid, ss->name);
4606
		if (!strcmp(ss->name, "memory"))
4607
			pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4608 4609 4610 4611 4612
		ss->warned_broken_hierarchy = true;
	}

	return 0;

4613 4614
err_list_del:
	list_del_rcu(&css->sibling);
4615
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4616 4617
err_free_id:
	cgroup_idr_remove(&ss->css_idr, css->id);
4618
err_free_percpu_ref:
4619
	percpu_ref_exit(&css->refcnt);
4620
err_free_css:
4621
	call_rcu(&css->rcu_head, css_free_rcu_fn);
4622 4623 4624
	return err;
}

4625 4626
static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
			umode_t mode)
4627
{
4628 4629
	struct cgroup *parent, *cgrp;
	struct cgroup_root *root;
4630
	struct cgroup_subsys *ss;
T
Tejun Heo 已提交
4631
	struct kernfs_node *kn;
4632
	struct cftype *base_files;
4633
	int ssid, ret;
4634

4635 4636 4637 4638 4639
	/* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
	 */
	if (strchr(name, '\n'))
		return -EINVAL;

4640 4641 4642 4643
	parent = cgroup_kn_lock_live(parent_kn);
	if (!parent)
		return -ENODEV;
	root = parent->root;
4644

T
Tejun Heo 已提交
4645
	/* allocate the cgroup and its ID, 0 is reserved for the root */
4646
	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
T
Tejun Heo 已提交
4647 4648 4649
	if (!cgrp) {
		ret = -ENOMEM;
		goto out_unlock;
4650 4651
	}

4652
	ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
4653 4654 4655
	if (ret)
		goto out_free_cgrp;

4656 4657 4658 4659
	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
4660
	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
4661
	if (cgrp->id < 0) {
T
Tejun Heo 已提交
4662
		ret = -ENOMEM;
4663
		goto out_cancel_ref;
4664 4665
	}

4666
	init_cgroup_housekeeping(cgrp);
4667

4668
	cgrp->self.parent = &parent->self;
T
Tejun Heo 已提交
4669
	cgrp->root = root;
4670

4671 4672 4673
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

4674 4675
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4676

T
Tejun Heo 已提交
4677
	/* create the directory */
T
Tejun Heo 已提交
4678
	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
T
Tejun Heo 已提交
4679
	if (IS_ERR(kn)) {
T
Tejun Heo 已提交
4680 4681
		ret = PTR_ERR(kn);
		goto out_free_id;
T
Tejun Heo 已提交
4682 4683
	}
	cgrp->kn = kn;
4684

4685
	/*
4686 4687
	 * This extra ref will be put in cgroup_free_fn() and guarantees
	 * that @cgrp->kn is always accessible.
4688
	 */
4689
	kernfs_get(kn);
4690

4691
	cgrp->self.serial_nr = css_serial_nr_next++;
4692

4693
	/* allocation complete, commit to creation */
4694
	list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
4695
	atomic_inc(&root->nr_cgrps);
4696
	cgroup_get(parent);
4697

4698 4699 4700 4701
	/*
	 * @cgrp is now fully operational.  If something fails after this
	 * point, it'll be released via the normal destruction path.
	 */
4702
	cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4703

T
Tejun Heo 已提交
4704 4705 4706
	ret = cgroup_kn_set_ugid(kn);
	if (ret)
		goto out_destroy;
4707

4708 4709 4710 4711 4712 4713
	if (cgroup_on_dfl(cgrp))
		base_files = cgroup_dfl_base_files;
	else
		base_files = cgroup_legacy_base_files;

	ret = cgroup_addrm_files(cgrp, base_files, true);
T
Tejun Heo 已提交
4714 4715
	if (ret)
		goto out_destroy;
4716

4717
	/* let's create and online css's */
T
Tejun Heo 已提交
4718
	for_each_subsys(ss, ssid) {
4719
		if (parent->child_subsys_mask & (1 << ssid)) {
4720 4721
			ret = create_css(cgrp, ss,
					 parent->subtree_control & (1 << ssid));
T
Tejun Heo 已提交
4722 4723
			if (ret)
				goto out_destroy;
T
Tejun Heo 已提交
4724
		}
4725
	}
4726

4727 4728
	/*
	 * On the default hierarchy, a child doesn't automatically inherit
4729
	 * subtree_control from the parent.  Each is configured manually.
4730
	 */
4731 4732 4733 4734
	if (!cgroup_on_dfl(cgrp)) {
		cgrp->subtree_control = parent->subtree_control;
		cgroup_refresh_child_subsys_mask(cgrp);
	}
T
Tejun Heo 已提交
4735 4736

	kernfs_activate(kn);
4737

T
Tejun Heo 已提交
4738 4739
	ret = 0;
	goto out_unlock;
4740

T
Tejun Heo 已提交
4741
out_free_id:
4742
	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
4743
out_cancel_ref:
4744
	percpu_ref_exit(&cgrp->self.refcnt);
T
Tejun Heo 已提交
4745
out_free_cgrp:
4746
	kfree(cgrp);
T
Tejun Heo 已提交
4747
out_unlock:
4748
	cgroup_kn_unlock(parent_kn);
T
Tejun Heo 已提交
4749
	return ret;
4750

T
Tejun Heo 已提交
4751
out_destroy:
4752
	cgroup_destroy_locked(cgrp);
T
Tejun Heo 已提交
4753
	goto out_unlock;
4754 4755
}

4756 4757
/*
 * This is called when the refcnt of a css is confirmed to be killed.
4758 4759
 * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
 * initate destruction and put the css ref from kill_css().
4760 4761
 */
static void css_killed_work_fn(struct work_struct *work)
4762
{
4763 4764
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
4765

4766
	mutex_lock(&cgroup_mutex);
4767
	offline_css(css);
4768
	mutex_unlock(&cgroup_mutex);
4769 4770

	css_put(css);
4771 4772
}

4773 4774
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
4775 4776 4777 4778
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4779
	INIT_WORK(&css->destroy_work, css_killed_work_fn);
4780
	queue_work(cgroup_destroy_wq, &css->destroy_work);
4781 4782
}

4783 4784 4785 4786 4787 4788
/**
 * kill_css - destroy a css
 * @css: css to destroy
 *
 * This function initiates destruction of @css by removing cgroup interface
 * files and putting its base reference.  ->css_offline() will be invoked
4789 4790
 * asynchronously once css_tryget_online() is guaranteed to fail and when
 * the reference count reaches zero, @css will be released.
4791 4792
 */
static void kill_css(struct cgroup_subsys_state *css)
T
Tejun Heo 已提交
4793
{
4794
	lockdep_assert_held(&cgroup_mutex);
4795

T
Tejun Heo 已提交
4796 4797 4798 4799
	/*
	 * This must happen before css is disassociated with its cgroup.
	 * See seq_css() for details.
	 */
4800
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4801

T
Tejun Heo 已提交
4802 4803 4804 4805 4806 4807 4808 4809 4810
	/*
	 * Killing would put the base ref, but we need to keep it alive
	 * until after ->css_offline().
	 */
	css_get(css);

	/*
	 * cgroup core guarantees that, by the time ->css_offline() is
	 * invoked, no new css reference will be given out via
4811
	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
T
Tejun Heo 已提交
4812 4813 4814 4815 4816 4817 4818
	 * proceed to offlining css's because percpu_ref_kill() doesn't
	 * guarantee that the ref is seen as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.
	 */
	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
4819 4820 4821 4822 4823 4824 4825 4826
}

/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
4827 4828 4829
 * guarantee that css_tryget_online() won't succeed by the time
 * ->css_offline() is invoked.  To satisfy all the requirements,
 * destruction is implemented in the following two steps.
4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
4845 4846
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4847
{
T
Tejun Heo 已提交
4848
	struct cgroup_subsys_state *css;
4849
	bool empty;
T
Tejun Heo 已提交
4850
	int ssid;
4851

4852 4853
	lockdep_assert_held(&cgroup_mutex);

4854
	/*
4855
	 * css_set_rwsem synchronizes access to ->cset_links and prevents
4856
	 * @cgrp from being removed while put_css_set() is in progress.
4857
	 */
4858
	down_read(&css_set_rwsem);
4859
	empty = list_empty(&cgrp->cset_links);
4860
	up_read(&css_set_rwsem);
4861
	if (!empty)
4862
		return -EBUSY;
L
Li Zefan 已提交
4863

4864
	/*
4865 4866 4867
	 * Make sure there's no live children.  We can't test emptiness of
	 * ->self.children as dead children linger on it while being
	 * drained; otherwise, "rmdir parent/child parent" may fail.
4868
	 */
4869
	if (css_has_online_children(&cgrp->self))
4870 4871
		return -EBUSY;

4872 4873
	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
4874
	 * creation by disabling cgroup_lock_live_group().
4875
	 */
4876
	cgrp->self.flags &= ~CSS_ONLINE;
4877

4878
	/* initiate massacre of all css's */
T
Tejun Heo 已提交
4879 4880
	for_each_css(css, ssid, cgrp)
		kill_css(css);
4881 4882

	/*
4883 4884
	 * Remove @cgrp directory along with the base files.  @cgrp has an
	 * extra ref on its kn.
4885
	 */
4886
	kernfs_remove(cgrp->kn);
4887

T
Tejun Heo 已提交
4888
	check_for_release(cgroup_parent(cgrp));
T
Tejun Heo 已提交
4889

4890
	/* put the base reference */
4891
	percpu_ref_kill(&cgrp->self.refcnt);
4892

4893 4894 4895
	return 0;
};

T
Tejun Heo 已提交
4896
static int cgroup_rmdir(struct kernfs_node *kn)
4897
{
4898
	struct cgroup *cgrp;
T
Tejun Heo 已提交
4899
	int ret = 0;
4900

4901 4902 4903
	cgrp = cgroup_kn_lock_live(kn);
	if (!cgrp)
		return 0;
4904

4905
	ret = cgroup_destroy_locked(cgrp);
4906

4907
	cgroup_kn_unlock(kn);
4908
	return ret;
4909 4910
}

T
Tejun Heo 已提交
4911 4912 4913 4914 4915 4916 4917 4918
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
	.remount_fs		= cgroup_remount,
	.show_options		= cgroup_show_options,
	.mkdir			= cgroup_mkdir,
	.rmdir			= cgroup_rmdir,
	.rename			= cgroup_rename,
};

4919
static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
4920 4921
{
	struct cgroup_subsys_state *css;
D
Diego Calleja 已提交
4922 4923

	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
4924

4925 4926
	mutex_lock(&cgroup_mutex);

4927
	idr_init(&ss->css_idr);
T
Tejun Heo 已提交
4928
	INIT_LIST_HEAD(&ss->cfts);
4929

4930 4931 4932
	/* Create the root cgroup state for this subsystem */
	ss->root = &cgrp_dfl_root;
	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
4933 4934
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
4935
	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
4936 4937 4938 4939 4940 4941 4942

	/*
	 * Root csses are never destroyed and we can't initialize
	 * percpu_ref during early init.  Disable refcnting.
	 */
	css->flags |= CSS_NO_REF;

4943
	if (early) {
4944
		/* allocation can't be done safely during early init */
4945 4946 4947 4948 4949
		css->id = 1;
	} else {
		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
		BUG_ON(css->id < 0);
	}
4950

L
Li Zefan 已提交
4951
	/* Update the init_css_set to contain a subsys
4952
	 * pointer to this state - since the subsystem is
L
Li Zefan 已提交
4953
	 * newly registered, all tasks and hence the
4954
	 * init_css_set is in the subsystem's root cgroup. */
4955
	init_css_set.subsys[ss->id] = css;
4956

4957 4958
	have_fork_callback |= (bool)ss->fork << ss->id;
	have_exit_callback |= (bool)ss->exit << ss->id;
4959

L
Li Zefan 已提交
4960 4961 4962 4963 4964
	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

4965
	BUG_ON(online_css(css));
4966

B
Ben Blum 已提交
4967 4968 4969
	mutex_unlock(&cgroup_mutex);
}

4970
/**
L
Li Zefan 已提交
4971 4972 4973 4974
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
4975 4976 4977
 */
int __init cgroup_init_early(void)
{
4978
	static struct cgroup_sb_opts __initdata opts;
4979
	struct cgroup_subsys *ss;
4980
	int i;
4981

4982
	init_cgroup_root(&cgrp_dfl_root, &opts);
4983 4984
	cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;

4985
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
4986

T
Tejun Heo 已提交
4987
	for_each_subsys(ss, i) {
4988
		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
4989 4990
		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
4991
		     ss->id, ss->name);
4992 4993 4994
		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);

4995
		ss->id = i;
4996
		ss->name = cgroup_subsys_name[i];
4997 4998

		if (ss->early_init)
4999
			cgroup_init_subsys(ss, true);
5000 5001 5002 5003 5004
	}
	return 0;
}

/**
L
Li Zefan 已提交
5005 5006 5007 5008
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
5009 5010 5011
 */
int __init cgroup_init(void)
{
5012
	struct cgroup_subsys *ss;
5013
	unsigned long key;
5014
	int ssid, err;
5015

5016
	BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
5017 5018
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
	BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
5019

T
Tejun Heo 已提交
5020 5021
	mutex_lock(&cgroup_mutex);

5022 5023 5024 5025
	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

5026
	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
5027

T
Tejun Heo 已提交
5028 5029
	mutex_unlock(&cgroup_mutex);

5030
	for_each_subsys(ss, ssid) {
5031 5032 5033 5034 5035 5036 5037 5038 5039 5040
		if (ss->early_init) {
			struct cgroup_subsys_state *css =
				init_css_set.subsys[ss->id];

			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
						   GFP_KERNEL);
			BUG_ON(css->id < 0);
		} else {
			cgroup_init_subsys(ss, false);
		}
5041

T
Tejun Heo 已提交
5042 5043
		list_add_tail(&init_css_set.e_cset_node[ssid],
			      &cgrp_dfl_root.cgrp.e_csets[ssid]);
5044 5045

		/*
5046 5047 5048
		 * Setting dfl_root subsys_mask needs to consider the
		 * disabled flag and cftype registration needs kmalloc,
		 * both of which aren't available during early_init.
5049
		 */
5050 5051 5052 5053 5054 5055 5056 5057
		if (ss->disabled)
			continue;

		cgrp_dfl_root.subsys_mask |= 1 << ss->id;

		if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
			ss->dfl_cftypes = ss->legacy_cftypes;

5058 5059 5060
		if (!ss->dfl_cftypes)
			cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;

5061 5062 5063 5064 5065
		if (ss->dfl_cftypes == ss->legacy_cftypes) {
			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
		} else {
			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
5066
		}
5067 5068 5069

		if (ss->bind)
			ss->bind(init_css_set.subsys[ssid]);
5070 5071
	}

5072 5073 5074
	err = sysfs_create_mount_point(fs_kobj, "cgroup");
	if (err)
		return err;
5075

5076
	err = register_filesystem(&cgroup_fs_type);
5077
	if (err < 0) {
5078
		sysfs_remove_mount_point(fs_kobj, "cgroup");
T
Tejun Heo 已提交
5079
		return err;
5080
	}
5081

L
Li Zefan 已提交
5082
	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
T
Tejun Heo 已提交
5083
	return 0;
5084
}
5085

5086 5087 5088 5089 5090
static int __init cgroup_wq_init(void)
{
	/*
	 * There isn't much point in executing destruction path in
	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
5091
	 * Use 1 for @max_active.
5092 5093 5094 5095
	 *
	 * We would prefer to do this in cgroup_init() above, but that
	 * is called before init_workqueues(): so leave this until after.
	 */
5096
	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5097
	BUG_ON(!cgroup_destroy_wq);
5098 5099 5100 5101 5102 5103 5104 5105 5106

	/*
	 * Used to destroy pidlists and separate to serve as flush domain.
	 * Cap @max_active to 1 too.
	 */
	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
						    0, 1);
	BUG_ON(!cgroup_pidlist_destroy_wq);

5107 5108 5109 5110
	return 0;
}
core_initcall(cgroup_wq_init);

5111 5112 5113 5114 5115
/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 */
Z
Zefan Li 已提交
5116 5117
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
		     struct pid *pid, struct task_struct *tsk)
5118
{
T
Tejun Heo 已提交
5119
	char *buf, *path;
5120
	int retval;
5121
	struct cgroup_root *root;
5122 5123

	retval = -ENOMEM;
T
Tejun Heo 已提交
5124
	buf = kmalloc(PATH_MAX, GFP_KERNEL);
5125 5126 5127 5128
	if (!buf)
		goto out;

	mutex_lock(&cgroup_mutex);
5129
	down_read(&css_set_rwsem);
5130

5131
	for_each_root(root) {
5132
		struct cgroup_subsys *ss;
5133
		struct cgroup *cgrp;
T
Tejun Heo 已提交
5134
		int ssid, count = 0;
5135

T
Tejun Heo 已提交
5136
		if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
5137 5138
			continue;

5139
		seq_printf(m, "%d:", root->hierarchy_id);
T
Tejun Heo 已提交
5140
		for_each_subsys(ss, ssid)
5141
			if (root->subsys_mask & (1 << ssid))
T
Tejun Heo 已提交
5142
				seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
5143 5144 5145
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
5146
		seq_putc(m, ':');
5147
		cgrp = task_cgroup_from_root(tsk, root);
T
Tejun Heo 已提交
5148 5149 5150
		path = cgroup_path(cgrp, buf, PATH_MAX);
		if (!path) {
			retval = -ENAMETOOLONG;
5151
			goto out_unlock;
T
Tejun Heo 已提交
5152 5153
		}
		seq_puts(m, path);
5154 5155 5156
		seq_putc(m, '\n');
	}

Z
Zefan Li 已提交
5157
	retval = 0;
5158
out_unlock:
5159
	up_read(&css_set_rwsem);
5160 5161 5162 5163 5164 5165 5166 5167 5168
	mutex_unlock(&cgroup_mutex);
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
5169
	struct cgroup_subsys *ss;
5170 5171
	int i;

5172
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
B
Ben Blum 已提交
5173 5174 5175 5176 5177
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
5178
	mutex_lock(&cgroup_mutex);
5179 5180

	for_each_subsys(ss, i)
5181 5182
		seq_printf(m, "%s\t%d\t%d\t%d\n",
			   ss->name, ss->root->hierarchy_id,
5183
			   atomic_read(&ss->root->nr_cgrps), !ss->disabled);
5184

5185 5186 5187 5188 5189 5190
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
5191
	return single_open(file, proc_cgroupstats_show, NULL);
5192 5193
}

5194
static const struct file_operations proc_cgroupstats_operations = {
5195 5196 5197 5198 5199 5200
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

5201
/**
5202
 * cgroup_fork - initialize cgroup related fields during copy_process()
L
Li Zefan 已提交
5203
 * @child: pointer to task_struct of forking parent process.
5204
 *
5205 5206 5207
 * A task is associated with the init_css_set until cgroup_post_fork()
 * attaches it to the parent's css_set.  Empty cg_list indicates that
 * @child isn't holding reference to its css_set.
5208 5209 5210
 */
void cgroup_fork(struct task_struct *child)
{
5211
	RCU_INIT_POINTER(child->cgroups, &init_css_set);
5212
	INIT_LIST_HEAD(&child->cg_list);
5213 5214
}

5215
/**
L
Li Zefan 已提交
5216 5217 5218
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
5219 5220 5221
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
5222
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5223
 * list.
L
Li Zefan 已提交
5224
 */
5225 5226
void cgroup_post_fork(struct task_struct *child)
{
5227
	struct cgroup_subsys *ss;
5228 5229
	int i;

5230
	/*
D
Dongsheng Yang 已提交
5231
	 * This may race against cgroup_enable_task_cg_lists().  As that
5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245
	 * function sets use_task_css_set_links before grabbing
	 * tasklist_lock and we just went through tasklist_lock to add
	 * @child, it's guaranteed that either we see the set
	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
	 * @child during its iteration.
	 *
	 * If we won the race, @child is associated with %current's
	 * css_set.  Grabbing css_set_rwsem guarantees both that the
	 * association is stable, and, on completion of the parent's
	 * migration, @child is visible in the source of migration or
	 * already in the destination cgroup.  This guarantee is necessary
	 * when implementing operations which need to migrate all tasks of
	 * a cgroup to another.
	 *
D
Dongsheng Yang 已提交
5246
	 * Note that if we lose to cgroup_enable_task_cg_lists(), @child
5247 5248 5249
	 * will remain in init_css_set.  This is safe because all tasks are
	 * in the init_css_set before cg_links is enabled and there's no
	 * operation which transfers all tasks out of init_css_set.
5250
	 */
5251
	if (use_task_css_set_links) {
5252 5253
		struct css_set *cset;

5254
		down_write(&css_set_rwsem);
5255
		cset = task_css_set(current);
5256 5257 5258 5259 5260
		if (list_empty(&child->cg_list)) {
			rcu_assign_pointer(child->cgroups, cset);
			list_add(&child->cg_list, &cset->tasks);
			get_css_set(cset);
		}
5261
		up_write(&css_set_rwsem);
5262
	}
5263 5264 5265 5266 5267 5268

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
5269 5270
	for_each_subsys_which(ss, i, &have_fork_callback)
		ss->fork(child);
5271
}
5272

5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
5285 5286 5287 5288 5289
 * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
 * call cgroup_exit() while the task is still competent to handle
 * notify_on_release(), then leave the task attached to the root cgroup in
 * each hierarchy for the remainder of its exit.  No need to bother with
 * init_css_set refcnting.  init_css_set never goes away and we can't race
5290
 * with migration path - PF_EXITING is visible to migration path.
5291
 */
5292
void cgroup_exit(struct task_struct *tsk)
5293
{
5294
	struct cgroup_subsys *ss;
5295
	struct css_set *cset;
5296
	bool put_cset = false;
5297
	int i;
5298 5299

	/*
5300 5301
	 * Unlink from @tsk from its css_set.  As migration path can't race
	 * with us, we can check cg_list without grabbing css_set_rwsem.
5302 5303
	 */
	if (!list_empty(&tsk->cg_list)) {
5304
		down_write(&css_set_rwsem);
5305
		list_del_init(&tsk->cg_list);
5306
		up_write(&css_set_rwsem);
5307
		put_cset = true;
5308 5309
	}

5310
	/* Reassign the task to the init_css_set. */
5311 5312
	cset = task_css_set(tsk);
	RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
5313

5314 5315 5316 5317
	/* see cgroup_post_fork() for details */
	for_each_subsys_which(ss, i, &have_exit_callback) {
		struct cgroup_subsys_state *old_css = cset->subsys[i];
		struct cgroup_subsys_state *css = task_css(tsk, i);
5318

5319
		ss->exit(css, old_css, tsk);
5320 5321
	}

5322
	if (put_cset)
Z
Zefan Li 已提交
5323
		put_css_set(cset);
5324
}
5325

5326
static void check_for_release(struct cgroup *cgrp)
5327
{
Z
Zefan Li 已提交
5328
	if (notify_on_release(cgrp) && !cgroup_has_tasks(cgrp) &&
5329 5330
	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
		schedule_work(&cgrp->release_agent_work);
5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
5358 5359 5360 5361 5362
	struct cgroup *cgrp =
		container_of(work, struct cgroup, release_agent_work);
	char *pathbuf = NULL, *agentbuf = NULL, *path;
	char *argv[3], *envp[3];

5363
	mutex_lock(&cgroup_mutex);
5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382

	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
	agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
	if (!pathbuf || !agentbuf)
		goto out;

	path = cgroup_path(cgrp, pathbuf, PATH_MAX);
	if (!path)
		goto out;

	argv[0] = agentbuf;
	argv[1] = path;
	argv[2] = NULL;

	/* minimal command environment */
	envp[0] = "HOME=/";
	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
	envp[2] = NULL;

5383
	mutex_unlock(&cgroup_mutex);
5384
	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
5385
	goto out_free;
5386
out:
5387
	mutex_unlock(&cgroup_mutex);
5388
out_free:
5389 5390
	kfree(agentbuf);
	kfree(pathbuf);
5391
}
5392 5393 5394

static int __init cgroup_disable(char *str)
{
5395
	struct cgroup_subsys *ss;
5396
	char *token;
5397
	int i;
5398 5399 5400 5401

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
5402

T
Tejun Heo 已提交
5403
		for_each_subsys(ss, i) {
5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414
			if (!strcmp(token, ss->name)) {
				ss->disabled = 1;
				printk(KERN_INFO "Disabling %s control group"
					" subsystem\n", ss->name);
				break;
			}
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
K
KAMEZAWA Hiroyuki 已提交
5415

5416 5417 5418 5419 5420 5421 5422 5423
static int __init cgroup_set_legacy_files_on_dfl(char *str)
{
	printk("cgroup: using legacy files on the default hierarchy\n");
	cgroup_legacy_files_on_dfl = true;
	return 0;
}
__setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);

5424
/**
5425
 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
5426 5427
 * @dentry: directory dentry of interest
 * @ss: subsystem of interest
5428
 *
5429 5430 5431
 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
 * to get the corresponding css and return it.  If such css doesn't exist
 * or can't be pinned, an ERR_PTR value is returned.
S
Stephane Eranian 已提交
5432
 */
5433 5434
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
						       struct cgroup_subsys *ss)
S
Stephane Eranian 已提交
5435
{
T
Tejun Heo 已提交
5436 5437
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
	struct cgroup_subsys_state *css = NULL;
S
Stephane Eranian 已提交
5438 5439
	struct cgroup *cgrp;

5440
	/* is @dentry a cgroup dir? */
T
Tejun Heo 已提交
5441 5442
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
S
Stephane Eranian 已提交
5443 5444
		return ERR_PTR(-EBADF);

5445 5446
	rcu_read_lock();

T
Tejun Heo 已提交
5447 5448 5449
	/*
	 * This path doesn't originate from kernfs and @kn could already
	 * have been or be removed at any point.  @kn->priv is RCU
5450
	 * protected for this access.  See css_release_work_fn() for details.
T
Tejun Heo 已提交
5451 5452 5453 5454
	 */
	cgrp = rcu_dereference(kn->priv);
	if (cgrp)
		css = cgroup_css(cgrp, ss);
5455

5456
	if (!css || !css_tryget_online(css))
5457 5458 5459 5460
		css = ERR_PTR(-ENOENT);

	rcu_read_unlock();
	return css;
S
Stephane Eranian 已提交
5461 5462
}

5463 5464 5465 5466 5467 5468 5469 5470 5471 5472
/**
 * css_from_id - lookup css by id
 * @id: the cgroup id
 * @ss: cgroup subsys to be looked into
 *
 * Returns the css if there's valid one with @id, otherwise returns NULL.
 * Should be called under rcu_read_lock().
 */
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
5473
	WARN_ON_ONCE(!rcu_read_lock_held());
5474
	return id > 0 ? idr_find(&ss->css_idr, id) : NULL;
S
Stephane Eranian 已提交
5475 5476
}

5477
#ifdef CONFIG_CGROUP_DEBUG
5478 5479
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
5480 5481 5482 5483 5484 5485 5486 5487 5488
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

5489
static void debug_css_free(struct cgroup_subsys_state *css)
5490
{
5491
	kfree(css);
5492 5493
}

5494 5495
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5496
{
5497
	return cgroup_task_count(css->cgroup);
5498 5499
}

5500 5501
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5502 5503 5504 5505
{
	return (u64)(unsigned long)current->cgroups;
}

5506
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
L
Li Zefan 已提交
5507
					 struct cftype *cft)
5508 5509 5510 5511
{
	u64 count;

	rcu_read_lock();
5512
	count = atomic_read(&task_css_set(current)->refcount);
5513 5514 5515 5516
	rcu_read_unlock();
	return count;
}

5517
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
5518
{
5519
	struct cgrp_cset_link *link;
5520
	struct css_set *cset;
T
Tejun Heo 已提交
5521 5522 5523 5524 5525
	char *name_buf;

	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name_buf)
		return -ENOMEM;
5526

5527
	down_read(&css_set_rwsem);
5528
	rcu_read_lock();
5529
	cset = rcu_dereference(current->cgroups);
5530
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5531 5532
		struct cgroup *c = link->cgrp;

T
Tejun Heo 已提交
5533
		cgroup_name(c, name_buf, NAME_MAX + 1);
5534
		seq_printf(seq, "Root %d group %s\n",
T
Tejun Heo 已提交
5535
			   c->root->hierarchy_id, name_buf);
5536 5537
	}
	rcu_read_unlock();
5538
	up_read(&css_set_rwsem);
T
Tejun Heo 已提交
5539
	kfree(name_buf);
5540 5541 5542 5543
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
5544
static int cgroup_css_links_read(struct seq_file *seq, void *v)
5545
{
5546
	struct cgroup_subsys_state *css = seq_css(seq);
5547
	struct cgrp_cset_link *link;
5548

5549
	down_read(&css_set_rwsem);
5550
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5551
		struct css_set *cset = link->cset;
5552 5553
		struct task_struct *task;
		int count = 0;
T
Tejun Heo 已提交
5554

5555
		seq_printf(seq, "css_set %p\n", cset);
T
Tejun Heo 已提交
5556

5557
		list_for_each_entry(task, &cset->tasks, cg_list) {
T
Tejun Heo 已提交
5558 5559 5560 5561 5562 5563 5564 5565 5566
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
		}

		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
5567
		}
T
Tejun Heo 已提交
5568 5569 5570
		continue;
	overflow:
		seq_puts(seq, "  ...\n");
5571
	}
5572
	up_read(&css_set_rwsem);
5573 5574 5575
	return 0;
}

5576
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
5577
{
Z
Zefan Li 已提交
5578 5579
	return (!cgroup_has_tasks(css->cgroup) &&
		!css_has_online_children(&css->cgroup->self));
5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

5598 5599
	{
		.name = "current_css_set_cg_links",
5600
		.seq_show = current_css_set_cg_links_read,
5601 5602 5603 5604
	},

	{
		.name = "cgroup_css_links",
5605
		.seq_show = cgroup_css_links_read,
5606 5607
	},

5608 5609 5610 5611 5612
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

5613 5614
	{ }	/* terminate */
};
5615

5616
struct cgroup_subsys debug_cgrp_subsys = {
5617 5618
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,
5619
	.legacy_cftypes = debug_files,
5620 5621
};
#endif /* CONFIG_CGROUP_DEBUG */