cgroup.c 158.2 KB
Newer Older
1 2 3 4 5 6
/*
 *  Generic process-grouping system.
 *
 *  Based originally on the cpuset system, extracted by Paul Menage
 *  Copyright (C) 2006 Google, Inc
 *
7 8 9 10
 *  Notifications support
 *  Copyright (C) 2009 Nokia Corporation
 *  Author: Kirill A. Shutemov
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *  Copyright notices from the original cpuset code:
 *  --------------------------------------------------
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
 *  2003-10-10 Written by Simon Derr.
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson.
 *  ---------------------------------------------------
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

#include <linux/cgroup.h>
30
#include <linux/cred.h>
31
#include <linux/ctype.h>
32
#include <linux/errno.h>
33
#include <linux/init_task.h>
34 35 36 37 38 39
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
40
#include <linux/proc_fs.h>
41 42
#include <linux/rcupdate.h>
#include <linux/sched.h>
43
#include <linux/backing-dev.h>
44 45 46 47 48
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/magic.h>
#include <linux/spinlock.h>
#include <linux/string.h>
49
#include <linux/sort.h>
50
#include <linux/kmod.h>
51
#include <linux/module.h>
B
Balbir Singh 已提交
52 53
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
54
#include <linux/hashtable.h>
55
#include <linux/namei.h>
L
Li Zefan 已提交
56
#include <linux/pid_namespace.h>
57
#include <linux/idr.h>
58
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
59 60
#include <linux/eventfd.h>
#include <linux/poll.h>
61
#include <linux/flex_array.h> /* used in cgroup_attach_task */
62
#include <linux/kthread.h>
B
Balbir Singh 已提交
63

A
Arun Sharma 已提交
64
#include <linux/atomic.h>
65

T
Tejun Heo 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * cgroup_mutex is the master lock.  Any modification to cgroup or its
 * hierarchy must be performed while holding it.
 *
 * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify
 * cgroupfs_root of any cgroup hierarchy - subsys list, flags,
 * release_agent_path and so on.  Modifying requires both cgroup_mutex and
 * cgroup_root_mutex.  Readers can acquire either of the two.  This is to
 * break the following locking order cycle.
 *
 *  A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem
 *  B. namespace_sem -> cgroup_mutex
 *
 * B happens only through cgroup_show_options() and using cgroup_root_mutex
 * breaks it.
 */
T
Tejun Heo 已提交
82 83
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
84
EXPORT_SYMBOL_GPL(cgroup_mutex);	/* only for lockdep */
T
Tejun Heo 已提交
85
#else
86
static DEFINE_MUTEX(cgroup_mutex);
T
Tejun Heo 已提交
87 88
#endif

T
Tejun Heo 已提交
89
static DEFINE_MUTEX(cgroup_root_mutex);
90

B
Ben Blum 已提交
91 92
/*
 * Generate an array of cgroup subsystem pointers. At boot time, this is
93
 * populated with the built in subsystems, and modular subsystems are
B
Ben Blum 已提交
94 95 96
 * registered after that. The mutable section of this array is protected by
 * cgroup_mutex.
 */
97
#define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys,
98
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
99
static struct cgroup_subsys *cgroup_subsys[CGROUP_SUBSYS_COUNT] = {
100 101 102 103
#include <linux/cgroup_subsys.h>
};

/*
104 105 106
 * The dummy hierarchy, reserved for the subsystems that are otherwise
 * unattached - it never has more than a single cgroup, and all tasks are
 * part of that cgroup.
107
 */
108 109 110 111
static struct cgroupfs_root cgroup_dummy_root;

/* dummy_top is a shorthand for the dummy hierarchy's top cgroup */
static struct cgroup * const cgroup_dummy_top = &cgroup_dummy_root.top_cgroup;
112

T
Tejun Heo 已提交
113 114 115 116 117 118 119
/*
 * cgroupfs file entry, pointed to from leaf dentry->d_fsdata.
 */
struct cfent {
	struct list_head		node;
	struct dentry			*dentry;
	struct cftype			*type;
120
	struct cgroup_subsys_state	*css;
L
Li Zefan 已提交
121 122 123

	/* file xattrs */
	struct simple_xattrs		xattrs;
T
Tejun Heo 已提交
124 125
};

K
KAMEZAWA Hiroyuki 已提交
126 127 128 129 130 131 132 133 134 135
/*
 * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
 * cgroup_subsys->use_id != 0.
 */
#define CSS_ID_MAX	(65535)
struct css_id {
	/*
	 * The css to which this ID points. This pointer is set to valid value
	 * after cgroup is populated. If cgroup is removed, this will be NULL.
	 * This pointer is expected to be RCU-safe because destroy()
T
Tejun Heo 已提交
136 137
	 * is called after synchronize_rcu(). But for safe use, css_tryget()
	 * should be used for avoiding race.
K
KAMEZAWA Hiroyuki 已提交
138
	 */
A
Arnd Bergmann 已提交
139
	struct cgroup_subsys_state __rcu *css;
K
KAMEZAWA Hiroyuki 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	/*
	 * ID of this css.
	 */
	unsigned short id;
	/*
	 * Depth in hierarchy which this ID belongs to.
	 */
	unsigned short depth;
	/*
	 * ID is freed by RCU. (and lookup routine is RCU safe.)
	 */
	struct rcu_head rcu_head;
	/*
	 * Hierarchy of CSS ID belongs to.
	 */
	unsigned short stack[0]; /* Array of Length (depth+1) */
};

158
/*
L
Lucas De Marchi 已提交
159
 * cgroup_event represents events which userspace want to receive.
160 161 162
 */
struct cgroup_event {
	/*
163
	 * css which the event belongs to.
164
	 */
165
	struct cgroup_subsys_state *css;
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	/*
	 * Control file which the event associated.
	 */
	struct cftype *cft;
	/*
	 * eventfd to signal userspace about the event.
	 */
	struct eventfd_ctx *eventfd;
	/*
	 * Each of these stored in a list by the cgroup.
	 */
	struct list_head list;
	/*
	 * All fields below needed to unregister event when
	 * userspace closes eventfd.
	 */
	poll_table pt;
	wait_queue_head_t *wqh;
	wait_queue_t wait;
	struct work_struct remove;
};
K
KAMEZAWA Hiroyuki 已提交
187

188 189
/* The list of hierarchy roots */

190 191
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
192

T
Tejun Heo 已提交
193 194 195 196 197
/*
 * Hierarchy ID allocation and mapping.  It follows the same exclusion
 * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for
 * writes, either for reads.
 */
198
static DEFINE_IDR(cgroup_hierarchy_idr);
199

200 201
static struct cgroup_name root_cgroup_name = { .name = "/" };

202 203 204 205 206
/*
 * Assign a monotonically increasing serial number to cgroups.  It
 * guarantees cgroups with bigger numbers are newer than those with smaller
 * numbers.  Also, as cgroups are always appended to the parent's
 * ->children list, it guarantees that sibling cgroups are always sorted in
207 208
 * the ascending serial number order on the list.  Protected by
 * cgroup_mutex.
209
 */
210
static u64 cgroup_serial_nr_next = 1;
211

212
/* This flag indicates whether tasks in the fork and exit paths should
L
Li Zefan 已提交
213 214 215
 * check for fork/exit handlers to call. This avoids us having to do
 * extra work in the fork/exit path if none of the subsystems need to
 * be called.
216
 */
217
static int need_forkexit_callback __read_mostly;
218

219 220
static struct cftype cgroup_base_files[];

221
static void cgroup_destroy_css_killed(struct cgroup *cgrp);
222
static int cgroup_destroy_locked(struct cgroup *cgrp);
223 224
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add);
225

T
Tejun Heo 已提交
226 227 228 229 230 231
/**
 * cgroup_css - obtain a cgroup's css for the specified subsystem
 * @cgrp: the cgroup of interest
 * @subsys_id: the subsystem of interest
 *
 * Return @cgrp's css (cgroup_subsys_state) associated with @subsys_id.
232 233 234 235
 * This function must be called either under cgroup_mutex or
 * rcu_read_lock() and the caller is responsible for pinning the returned
 * css if it wants to keep accessing it outside the said locks.  This
 * function may return %NULL if @cgrp doesn't have @subsys_id enabled.
T
Tejun Heo 已提交
236 237 238 239
 */
static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
					      int subsys_id)
{
240 241
	return rcu_dereference_check(cgrp->subsys[subsys_id],
				     lockdep_is_held(&cgroup_mutex));
T
Tejun Heo 已提交
242 243
}

244
/* convenient tests for these bits */
245
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
246
{
247
	return test_bit(CGRP_DEAD, &cgrp->flags);
248 249
}

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
/**
 * cgroup_is_descendant - test ancestry
 * @cgrp: the cgroup to be tested
 * @ancestor: possible ancestor of @cgrp
 *
 * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
 * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
 * and @ancestor are accessible.
 */
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
{
	while (cgrp) {
		if (cgrp == ancestor)
			return true;
		cgrp = cgrp->parent;
	}
	return false;
}
EXPORT_SYMBOL_GPL(cgroup_is_descendant);
269

270
static int cgroup_is_releasable(const struct cgroup *cgrp)
271 272
{
	const int bits =
273 274 275
		(1 << CGRP_RELEASABLE) |
		(1 << CGRP_NOTIFY_ON_RELEASE);
	return (cgrp->flags & bits) == bits;
276 277
}

278
static int notify_on_release(const struct cgroup *cgrp)
279
{
280
	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
281 282
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
/**
 * for_each_subsys - iterate all loaded cgroup subsystems
 * @ss: the iteration cursor
 * @i: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
 *
 * Should be called under cgroup_mutex.
 */
#define for_each_subsys(ss, i)						\
	for ((i) = 0; (i) < CGROUP_SUBSYS_COUNT; (i)++)			\
		if (({ lockdep_assert_held(&cgroup_mutex);		\
		       !((ss) = cgroup_subsys[i]); })) { }		\
		else

/**
 * for_each_builtin_subsys - iterate all built-in cgroup subsystems
 * @ss: the iteration cursor
 * @i: the index of @ss, CGROUP_BUILTIN_SUBSYS_COUNT after reaching the end
 *
 * Bulit-in subsystems are always present and iteration itself doesn't
 * require any synchronization.
 */
#define for_each_builtin_subsys(ss, i)					\
	for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT &&		\
	     (((ss) = cgroup_subsys[i]) || true); (i)++)

308 309 310
/* iterate each subsystem attached to a hierarchy */
#define for_each_root_subsys(root, ss)					\
	list_for_each_entry((ss), &(root)->subsys_list, sibling)
311

312 313 314
/* iterate across the active hierarchies */
#define for_each_active_root(root)					\
	list_for_each_entry((root), &cgroup_roots, root_list)
315

316 317 318 319 320
static inline struct cgroup *__d_cgrp(struct dentry *dentry)
{
	return dentry->d_fsdata;
}

T
Tejun Heo 已提交
321
static inline struct cfent *__d_cfe(struct dentry *dentry)
322 323 324 325
{
	return dentry->d_fsdata;
}

T
Tejun Heo 已提交
326 327 328 329 330
static inline struct cftype *__d_cft(struct dentry *dentry)
{
	return __d_cfe(dentry)->type;
}

331 332 333 334
/**
 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
 * @cgrp: the cgroup to be checked for liveness
 *
T
Tejun Heo 已提交
335 336
 * On success, returns true; the mutex should be later unlocked.  On
 * failure returns false with no lock held.
337
 */
338
static bool cgroup_lock_live_group(struct cgroup *cgrp)
339 340
{
	mutex_lock(&cgroup_mutex);
341
	if (cgroup_is_dead(cgrp)) {
342 343 344 345 346 347
		mutex_unlock(&cgroup_mutex);
		return false;
	}
	return true;
}

348 349 350
/* the list of cgroups eligible for automatic release. Protected by
 * release_list_lock */
static LIST_HEAD(release_list);
351
static DEFINE_RAW_SPINLOCK(release_list_lock);
352 353
static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
354
static void check_for_release(struct cgroup *cgrp);
355

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
/*
 * A cgroup can be associated with multiple css_sets as different tasks may
 * belong to different cgroups on different hierarchies.  In the other
 * direction, a css_set is naturally associated with multiple cgroups.
 * This M:N relationship is represented by the following link structure
 * which exists for each association and allows traversing the associations
 * from both sides.
 */
struct cgrp_cset_link {
	/* the cgroup and css_set this link associates */
	struct cgroup		*cgrp;
	struct css_set		*cset;

	/* list of cgrp_cset_links anchored at cgrp->cset_links */
	struct list_head	cset_link;

	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
	struct list_head	cgrp_link;
374 375 376 377 378 379 380 381 382 383
};

/* The default css_set - used by init and its children prior to any
 * hierarchies being mounted. It contains a pointer to the root state
 * for each subsystem. Also used to anchor the list of css_sets. Not
 * reference-counted, to improve performance when child cgroups
 * haven't been created.
 */

static struct css_set init_css_set;
384
static struct cgrp_cset_link init_cgrp_cset_link;
385

386 387
static int cgroup_init_idr(struct cgroup_subsys *ss,
			   struct cgroup_subsys_state *css);
K
KAMEZAWA Hiroyuki 已提交
388

389 390 391
/*
 * css_set_lock protects the list of css_set objects, and the chain of
 * tasks off each css_set.  Nests outside task->alloc_lock due to
392
 * css_task_iter_start().
393
 */
394 395 396
static DEFINE_RWLOCK(css_set_lock);
static int css_set_count;

397 398 399 400 401
/*
 * hash table for cgroup groups. This improves the performance to find
 * an existing css_set. This hash doesn't (currently) take into
 * account cgroups in empty hierarchies.
 */
402
#define CSS_SET_HASH_BITS	7
403
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
404

405
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
406
{
407
	unsigned long key = 0UL;
408 409
	struct cgroup_subsys *ss;
	int i;
410

411
	for_each_subsys(ss, i)
412 413
		key += (unsigned long)css[i];
	key = (key >> 16) ^ key;
414

415
	return key;
416 417
}

418 419
/*
 * We don't maintain the lists running through each css_set to its task
420 421 422
 * until after the first call to css_task_iter_start().  This reduces the
 * fork()/exit() overhead for people who have cgroups compiled into their
 * kernel but not actually in use.
423
 */
424
static int use_task_css_set_links __read_mostly;
425

426
static void __put_css_set(struct css_set *cset, int taskexit)
427
{
428
	struct cgrp_cset_link *link, *tmp_link;
429

430 431 432 433 434
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * rwlock
	 */
435
	if (atomic_add_unless(&cset->refcount, -1, 1))
436 437
		return;
	write_lock(&css_set_lock);
438
	if (!atomic_dec_and_test(&cset->refcount)) {
439 440 441
		write_unlock(&css_set_lock);
		return;
	}
442

443
	/* This css_set is dead. unlink it and release cgroup refcounts */
444
	hash_del(&cset->hlist);
445 446
	css_set_count--;

447
	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
448
		struct cgroup *cgrp = link->cgrp;
449

450 451
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
L
Li Zefan 已提交
452

453
		/* @cgrp can't go away while we're holding css_set_lock */
T
Tejun Heo 已提交
454
		if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) {
455
			if (taskexit)
456 457
				set_bit(CGRP_RELEASABLE, &cgrp->flags);
			check_for_release(cgrp);
458
		}
459 460

		kfree(link);
461
	}
462 463

	write_unlock(&css_set_lock);
464
	kfree_rcu(cset, rcu_head);
465 466
}

467 468 469
/*
 * refcounted get/put for css_set objects
 */
470
static inline void get_css_set(struct css_set *cset)
471
{
472
	atomic_inc(&cset->refcount);
473 474
}

475
static inline void put_css_set(struct css_set *cset)
476
{
477
	__put_css_set(cset, 0);
478 479
}

480
static inline void put_css_set_taskexit(struct css_set *cset)
481
{
482
	__put_css_set(cset, 1);
483 484
}

485
/**
486
 * compare_css_sets - helper function for find_existing_css_set().
487 488
 * @cset: candidate css_set being tested
 * @old_cset: existing css_set for a task
489 490 491
 * @new_cgrp: cgroup that's being entered by the task
 * @template: desired set of css pointers in css_set (pre-calculated)
 *
L
Li Zefan 已提交
492
 * Returns true if "cset" matches "old_cset" except for the hierarchy
493 494
 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
 */
495 496
static bool compare_css_sets(struct css_set *cset,
			     struct css_set *old_cset,
497 498 499 500 501
			     struct cgroup *new_cgrp,
			     struct cgroup_subsys_state *template[])
{
	struct list_head *l1, *l2;

502
	if (memcmp(template, cset->subsys, sizeof(cset->subsys))) {
503 504 505 506 507 508 509 510 511 512 513 514 515
		/* Not all subsystems matched */
		return false;
	}

	/*
	 * Compare cgroup pointers in order to distinguish between
	 * different cgroups in heirarchies with no subsystems. We
	 * could get by with just this check alone (and skip the
	 * memcmp above) but on most setups the memcmp check will
	 * avoid the need for this more expensive check on almost all
	 * candidates.
	 */

516 517
	l1 = &cset->cgrp_links;
	l2 = &old_cset->cgrp_links;
518
	while (1) {
519
		struct cgrp_cset_link *link1, *link2;
520
		struct cgroup *cgrp1, *cgrp2;
521 522 523 524

		l1 = l1->next;
		l2 = l2->next;
		/* See if we reached the end - both lists are equal length. */
525 526
		if (l1 == &cset->cgrp_links) {
			BUG_ON(l2 != &old_cset->cgrp_links);
527 528
			break;
		} else {
529
			BUG_ON(l2 == &old_cset->cgrp_links);
530 531
		}
		/* Locate the cgroups associated with these links. */
532 533 534 535
		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
		cgrp1 = link1->cgrp;
		cgrp2 = link2->cgrp;
536
		/* Hierarchies should be linked in the same order. */
537
		BUG_ON(cgrp1->root != cgrp2->root);
538 539 540 541 542 543 544 545

		/*
		 * If this hierarchy is the hierarchy of the cgroup
		 * that's changing, then we need to check that this
		 * css_set points to the new cgroup; if it's any other
		 * hierarchy, then this css_set should point to the
		 * same cgroup as the old css_set.
		 */
546 547
		if (cgrp1->root == new_cgrp->root) {
			if (cgrp1 != new_cgrp)
548 549
				return false;
		} else {
550
			if (cgrp1 != cgrp2)
551 552 553 554 555 556
				return false;
		}
	}
	return true;
}

557 558 559 560 561
/**
 * find_existing_css_set - init css array and find the matching css_set
 * @old_cset: the css_set that we're using before the cgroup transition
 * @cgrp: the cgroup that we're moving into
 * @template: out param for the new set of csses, should be clear on entry
562
 */
563 564 565
static struct css_set *find_existing_css_set(struct css_set *old_cset,
					struct cgroup *cgrp,
					struct cgroup_subsys_state *template[])
566
{
567
	struct cgroupfs_root *root = cgrp->root;
568
	struct cgroup_subsys *ss;
569
	struct css_set *cset;
570
	unsigned long key;
571
	int i;
572

B
Ben Blum 已提交
573 574 575 576 577
	/*
	 * Build the set of subsystem state objects that we want to see in the
	 * new css_set. while subsystems can change globally, the entries here
	 * won't change, so no need for locking.
	 */
578
	for_each_subsys(ss, i) {
579
		if (root->subsys_mask & (1UL << i)) {
580 581 582
			/* Subsystem is in this hierarchy. So we want
			 * the subsystem state from the new
			 * cgroup */
T
Tejun Heo 已提交
583
			template[i] = cgroup_css(cgrp, i);
584 585 586
		} else {
			/* Subsystem is not in this hierarchy, so we
			 * don't want to change the subsystem state */
587
			template[i] = old_cset->subsys[i];
588 589 590
		}
	}

591
	key = css_set_hash(template);
592 593
	hash_for_each_possible(css_set_table, cset, hlist, key) {
		if (!compare_css_sets(cset, old_cset, cgrp, template))
594 595 596
			continue;

		/* This css_set matches what we need */
597
		return cset;
598
	}
599 600 601 602 603

	/* No existing cgroup group matched */
	return NULL;
}

604
static void free_cgrp_cset_links(struct list_head *links_to_free)
605
{
606
	struct cgrp_cset_link *link, *tmp_link;
607

608 609
	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
		list_del(&link->cset_link);
610 611 612 613
		kfree(link);
	}
}

614 615 616 617 618 619 620
/**
 * allocate_cgrp_cset_links - allocate cgrp_cset_links
 * @count: the number of links to allocate
 * @tmp_links: list_head the allocated links are put on
 *
 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
 * through ->cset_link.  Returns 0 on success or -errno.
621
 */
622
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
623
{
624
	struct cgrp_cset_link *link;
625
	int i;
626 627 628

	INIT_LIST_HEAD(tmp_links);

629
	for (i = 0; i < count; i++) {
630
		link = kzalloc(sizeof(*link), GFP_KERNEL);
631
		if (!link) {
632
			free_cgrp_cset_links(tmp_links);
633 634
			return -ENOMEM;
		}
635
		list_add(&link->cset_link, tmp_links);
636 637 638 639
	}
	return 0;
}

640 641
/**
 * link_css_set - a helper function to link a css_set to a cgroup
642
 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
643
 * @cset: the css_set to be linked
644 645
 * @cgrp: the destination cgroup
 */
646 647
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
			 struct cgroup *cgrp)
648
{
649
	struct cgrp_cset_link *link;
650

651 652 653
	BUG_ON(list_empty(tmp_links));
	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
	link->cset = cset;
654
	link->cgrp = cgrp;
655
	list_move(&link->cset_link, &cgrp->cset_links);
656 657 658 659
	/*
	 * Always add links to the tail of the list so that the list
	 * is sorted by order of hierarchy creation
	 */
660
	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
661 662
}

663 664 665 666 667 668 669
/**
 * find_css_set - return a new css_set with one cgroup updated
 * @old_cset: the baseline css_set
 * @cgrp: the cgroup to be updated
 *
 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
 * substituted into the appropriate hierarchy.
670
 */
671 672
static struct css_set *find_css_set(struct css_set *old_cset,
				    struct cgroup *cgrp)
673
{
674
	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
675
	struct css_set *cset;
676 677
	struct list_head tmp_links;
	struct cgrp_cset_link *link;
678
	unsigned long key;
679

680 681
	lockdep_assert_held(&cgroup_mutex);

682 683
	/* First see if we already have a cgroup group that matches
	 * the desired set */
684
	read_lock(&css_set_lock);
685 686 687
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
		get_css_set(cset);
688
	read_unlock(&css_set_lock);
689

690 691
	if (cset)
		return cset;
692

693
	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
694
	if (!cset)
695 696
		return NULL;

697
	/* Allocate all the cgrp_cset_link objects that we'll need */
698
	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
699
		kfree(cset);
700 701 702
		return NULL;
	}

703
	atomic_set(&cset->refcount, 1);
704
	INIT_LIST_HEAD(&cset->cgrp_links);
705 706
	INIT_LIST_HEAD(&cset->tasks);
	INIT_HLIST_NODE(&cset->hlist);
707 708 709

	/* Copy the set of subsystem state objects generated in
	 * find_existing_css_set() */
710
	memcpy(cset->subsys, template, sizeof(cset->subsys));
711 712 713

	write_lock(&css_set_lock);
	/* Add reference counts and links from the new css_set. */
714
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
715
		struct cgroup *c = link->cgrp;
716

717 718
		if (c->root == cgrp->root)
			c = cgrp;
719
		link_css_set(&tmp_links, cset, c);
720
	}
721

722
	BUG_ON(!list_empty(&tmp_links));
723 724

	css_set_count++;
725 726

	/* Add this cgroup group to the hash table */
727 728
	key = css_set_hash(cset->subsys);
	hash_add(css_set_table, &cset->hlist, key);
729

730 731
	write_unlock(&css_set_lock);

732
	return cset;
733 734
}

735 736 737 738 739 740 741
/*
 * Return the cgroup for "task" from the given hierarchy. Must be
 * called with cgroup_mutex held.
 */
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
					    struct cgroupfs_root *root)
{
742
	struct css_set *cset;
743 744 745 746 747 748 749 750 751
	struct cgroup *res = NULL;

	BUG_ON(!mutex_is_locked(&cgroup_mutex));
	read_lock(&css_set_lock);
	/*
	 * No need to lock the task - since we hold cgroup_mutex the
	 * task can't change groups, so the only thing that can happen
	 * is that it exits and its css is set back to init_css_set.
	 */
752
	cset = task_css_set(task);
753
	if (cset == &init_css_set) {
754 755
		res = &root->top_cgroup;
	} else {
756 757 758
		struct cgrp_cset_link *link;

		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
759
			struct cgroup *c = link->cgrp;
760

761 762 763 764 765 766 767 768 769 770 771
			if (c->root == root) {
				res = c;
				break;
			}
		}
	}
	read_unlock(&css_set_lock);
	BUG_ON(!res);
	return res;
}

772 773 774 775 776 777 778 779 780 781
/*
 * There is one global cgroup mutex. We also require taking
 * task_lock() when dereferencing a task's cgroup subsys pointers.
 * See "The task_lock() exception", at the end of this comment.
 *
 * A task must hold cgroup_mutex to modify cgroups.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding cgroup_mutex can't rely on the count
 * field not changing.  However, if the count goes to zero, then only
782
 * cgroup_attach_task() can increment it again.  Because a count of zero
783 784 785 786 787 788 789 790 791 792 793 794 795
 * means that no tasks are currently attached, therefore there is no
 * way a task attached to that cgroup can fork (the other way to
 * increment the count).  So code holding cgroup_mutex can safely
 * assume that if the count is zero, it will stay zero. Similarly, if
 * a task holds cgroup_mutex on a cgroup with zero count, it
 * knows that the cgroup won't be removed, as cgroup_rmdir()
 * needs that mutex.
 *
 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
 * (usually) take cgroup_mutex.  These are the two most performance
 * critical pieces of code here.  The exception occurs on cgroup_exit(),
 * when a task in a notify_on_release cgroup exits.  Then cgroup_mutex
 * is taken, and if the cgroup count is zero, a usermode call made
L
Li Zefan 已提交
796 797
 * to the release agent with the name of the cgroup (path relative to
 * the root of cgroup file system) as the argument.
798 799 800 801 802 803 804 805 806 807 808
 *
 * A cgroup can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cgroups is empty.  Since all
 * tasks in the system use _some_ cgroup, and since there is always at
 * least one task in the system (init, pid == 1), therefore, top_cgroup
 * always has either children cgroups and/or using tasks.  So we don't
 * need a special hack to ensure that top_cgroup cannot be deleted.
 *
 *	The task_lock() exception
 *
 * The need for this exception arises from the action of
809
 * cgroup_attach_task(), which overwrites one task's cgroup pointer with
L
Li Zefan 已提交
810
 * another.  It does so using cgroup_mutex, however there are
811 812 813
 * several performance critical places that need to reference
 * task->cgroup without the expense of grabbing a system global
 * mutex.  Therefore except as noted below, when dereferencing or, as
814
 * in cgroup_attach_task(), modifying a task's cgroup pointer we use
815 816 817 818
 * task_lock(), which acts on a spinlock (task->alloc_lock) already in
 * the task_struct routinely used for such matters.
 *
 * P.S.  One more locking exception.  RCU is used to guard the
819
 * update of a tasks cgroup pointer by cgroup_attach_task()
820 821 822 823 824 825 826 827 828
 */

/*
 * A couple of forward declarations required, due to cyclic reference loop:
 * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
 * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
 * -> cgroup_mkdir.
 */

829
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
A
Al Viro 已提交
830
static struct dentry *cgroup_lookup(struct inode *, struct dentry *, unsigned int);
831
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
832
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
833
static const struct inode_operations cgroup_dir_inode_operations;
834
static const struct file_operations proc_cgroupstats_operations;
835 836

static struct backing_dev_info cgroup_backing_dev_info = {
837
	.name		= "cgroup",
838
	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
839
};
840

841
static int alloc_css_id(struct cgroup_subsys_state *child_css);
K
KAMEZAWA Hiroyuki 已提交
842

A
Al Viro 已提交
843
static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
844 845 846 847
{
	struct inode *inode = new_inode(sb);

	if (inode) {
848
		inode->i_ino = get_next_ino();
849
		inode->i_mode = mode;
850 851
		inode->i_uid = current_fsuid();
		inode->i_gid = current_fsgid();
852 853 854 855 856 857
		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
		inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
	}
	return inode;
}

858 859 860 861 862 863 864 865 866 867 868
static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry)
{
	struct cgroup_name *name;

	name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL);
	if (!name)
		return NULL;
	strcpy(name->name, dentry->d_name.name);
	return name;
}

869 870
static void cgroup_free_fn(struct work_struct *work)
{
871
	struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
872 873 874 875 876

	mutex_lock(&cgroup_mutex);
	cgrp->root->number_of_cgroups--;
	mutex_unlock(&cgroup_mutex);

877 878 879 880 881 882 883
	/*
	 * We get a ref to the parent's dentry, and put the ref when
	 * this cgroup is being freed, so it's guaranteed that the
	 * parent won't be destroyed before its children.
	 */
	dput(cgrp->parent->dentry);

884 885
	/*
	 * Drop the active superblock reference that we took when we
886 887
	 * created the cgroup. This will free cgrp->root, if we are
	 * holding the last reference to @sb.
888 889 890 891 892 893 894 895 896 897 898
	 */
	deactivate_super(cgrp->root->sb);

	/*
	 * if we're getting rid of the cgroup, refcount should ensure
	 * that there are no pidlists left.
	 */
	BUG_ON(!list_empty(&cgrp->pidlists));

	simple_xattrs_free(&cgrp->xattrs);

899
	kfree(rcu_dereference_raw(cgrp->name));
900 901 902 903 904 905 906
	kfree(cgrp);
}

static void cgroup_free_rcu(struct rcu_head *head)
{
	struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);

907 908
	INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
	schedule_work(&cgrp->destroy_work);
909 910
}

911 912 913 914
static void cgroup_diput(struct dentry *dentry, struct inode *inode)
{
	/* is dentry a directory ? if so, kfree() associated cgroup */
	if (S_ISDIR(inode->i_mode)) {
915
		struct cgroup *cgrp = dentry->d_fsdata;
916

917
		BUG_ON(!(cgroup_is_dead(cgrp)));
918
		call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
T
Tejun Heo 已提交
919 920 921 922 923 924 925
	} else {
		struct cfent *cfe = __d_cfe(dentry);
		struct cgroup *cgrp = dentry->d_parent->d_fsdata;

		WARN_ONCE(!list_empty(&cfe->node) &&
			  cgrp != &cgrp->root->top_cgroup,
			  "cfe still linked for %s\n", cfe->type->name);
L
Li Zefan 已提交
926
		simple_xattrs_free(&cfe->xattrs);
T
Tejun Heo 已提交
927
		kfree(cfe);
928 929 930 931
	}
	iput(inode);
}

932 933 934 935 936
static int cgroup_delete(const struct dentry *d)
{
	return 1;
}

937 938 939 940 941 942 943 944 945
static void remove_dir(struct dentry *d)
{
	struct dentry *parent = dget(d->d_parent);

	d_delete(d);
	simple_rmdir(parent->d_inode, d);
	dput(parent);
}

946
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
T
Tejun Heo 已提交
947 948 949 950 951 952
{
	struct cfent *cfe;

	lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
	lockdep_assert_held(&cgroup_mutex);

953 954 955 956
	/*
	 * If we're doing cleanup due to failure of cgroup_create(),
	 * the corresponding @cfe may not exist.
	 */
T
Tejun Heo 已提交
957 958 959 960 961 962 963 964
	list_for_each_entry(cfe, &cgrp->files, node) {
		struct dentry *d = cfe->dentry;

		if (cft && cfe->type != cft)
			continue;

		dget(d);
		d_delete(d);
965
		simple_unlink(cgrp->dentry->d_inode, d);
T
Tejun Heo 已提交
966 967 968
		list_del_init(&cfe->node);
		dput(d);

969
		break;
970
	}
T
Tejun Heo 已提交
971 972
}

973
/**
974
 * cgroup_clear_dir - remove subsys files in a cgroup directory
975
 * @cgrp: target cgroup
976 977
 * @subsys_mask: mask of the subsystem ids whose files should be removed
 */
978
static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
T
Tejun Heo 已提交
979
{
980
	struct cgroup_subsys *ss;
981
	int i;
T
Tejun Heo 已提交
982

983
	for_each_subsys(ss, i) {
984
		struct cftype_set *set;
985 986

		if (!test_bit(i, &subsys_mask))
987 988
			continue;
		list_for_each_entry(set, &ss->cftsets, node)
989
			cgroup_addrm_files(cgrp, set->cfts, false);
990
	}
991 992 993 994 995 996 997
}

/*
 * NOTE : the dentry must have been dget()'ed
 */
static void cgroup_d_remove_dir(struct dentry *dentry)
{
N
Nick Piggin 已提交
998
	struct dentry *parent;
999

N
Nick Piggin 已提交
1000 1001
	parent = dentry->d_parent;
	spin_lock(&parent->d_lock);
1002
	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1003
	list_del_init(&dentry->d_u.d_child);
N
Nick Piggin 已提交
1004 1005
	spin_unlock(&dentry->d_lock);
	spin_unlock(&parent->d_lock);
1006 1007 1008
	remove_dir(dentry);
}

B
Ben Blum 已提交
1009
/*
B
Ben Blum 已提交
1010 1011 1012
 * Call with cgroup_mutex held. Drops reference counts on modules, including
 * any duplicate ones that parse_cgroupfs_options took. If this function
 * returns an error, no reference counts are touched.
B
Ben Blum 已提交
1013
 */
1014
static int rebind_subsystems(struct cgroupfs_root *root,
1015
			     unsigned long added_mask, unsigned removed_mask)
1016
{
1017
	struct cgroup *cgrp = &root->top_cgroup;
1018
	struct cgroup_subsys *ss;
1019
	unsigned long pinned = 0;
1020
	int i, ret;
1021

B
Ben Blum 已提交
1022
	BUG_ON(!mutex_is_locked(&cgroup_mutex));
T
Tejun Heo 已提交
1023
	BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
B
Ben Blum 已提交
1024

1025
	/* Check that any added subsystems are currently free */
1026
	for_each_subsys(ss, i) {
1027
		if (!(added_mask & (1 << i)))
1028
			continue;
1029

1030
		/* is the subsystem mounted elsewhere? */
1031
		if (ss->root != &cgroup_dummy_root) {
1032 1033 1034 1035 1036 1037 1038 1039
			ret = -EBUSY;
			goto out_put;
		}

		/* pin the module */
		if (!try_module_get(ss->module)) {
			ret = -ENOENT;
			goto out_put;
1040
		}
1041 1042 1043 1044 1045 1046 1047
		pinned |= 1 << i;
	}

	/* subsys could be missing if unloaded between parsing and here */
	if (added_mask != pinned) {
		ret = -ENOENT;
		goto out_put;
1048 1049
	}

1050 1051
	ret = cgroup_populate_dir(cgrp, added_mask);
	if (ret)
1052
		goto out_put;
1053 1054 1055 1056 1057 1058 1059

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
	cgroup_clear_dir(cgrp, removed_mask);

1060
	for_each_subsys(ss, i) {
1061
		unsigned long bit = 1UL << i;
1062

1063
		if (bit & added_mask) {
1064
			/* We're binding this subsystem to this hierarchy */
T
Tejun Heo 已提交
1065 1066 1067
			BUG_ON(cgroup_css(cgrp, i));
			BUG_ON(!cgroup_css(cgroup_dummy_top, i));
			BUG_ON(cgroup_css(cgroup_dummy_top, i)->cgroup != cgroup_dummy_top);
1068

1069 1070
			rcu_assign_pointer(cgrp->subsys[i],
					   cgroup_css(cgroup_dummy_top, i));
T
Tejun Heo 已提交
1071
			cgroup_css(cgrp, i)->cgroup = cgrp;
1072

1073
			list_move(&ss->sibling, &root->subsys_list);
1074
			ss->root = root;
1075
			if (ss->bind)
T
Tejun Heo 已提交
1076
				ss->bind(cgroup_css(cgrp, i));
1077

B
Ben Blum 已提交
1078
			/* refcount was already taken, and we're keeping it */
1079
			root->subsys_mask |= bit;
1080
		} else if (bit & removed_mask) {
1081
			/* We're removing this subsystem */
T
Tejun Heo 已提交
1082 1083
			BUG_ON(cgroup_css(cgrp, i) != cgroup_css(cgroup_dummy_top, i));
			BUG_ON(cgroup_css(cgrp, i)->cgroup != cgrp);
1084

1085
			if (ss->bind)
T
Tejun Heo 已提交
1086
				ss->bind(cgroup_css(cgroup_dummy_top, i));
1087

T
Tejun Heo 已提交
1088
			cgroup_css(cgroup_dummy_top, i)->cgroup = cgroup_dummy_top;
1089 1090
			RCU_INIT_POINTER(cgrp->subsys[i], NULL);

1091 1092
			cgroup_subsys[i]->root = &cgroup_dummy_root;
			list_move(&ss->sibling, &cgroup_dummy_root.subsys_list);
1093

B
Ben Blum 已提交
1094 1095
			/* subsystem is now free - drop reference on module */
			module_put(ss->module);
1096
			root->subsys_mask &= ~bit;
1097 1098 1099
		}
	}

1100 1101 1102 1103 1104 1105
	/*
	 * Mark @root has finished binding subsystems.  @root->subsys_mask
	 * now matches the bound subsystems.
	 */
	root->flags |= CGRP_ROOT_SUBSYS_BOUND;

1106
	return 0;
1107 1108 1109 1110 1111 1112

out_put:
	for_each_subsys(ss, i)
		if (pinned & (1 << i))
			module_put(ss->module);
	return ret;
1113 1114
}

1115
static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
1116
{
1117
	struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
1118 1119
	struct cgroup_subsys *ss;

T
Tejun Heo 已提交
1120
	mutex_lock(&cgroup_root_mutex);
1121
	for_each_root_subsys(root, ss)
1122
		seq_printf(seq, ",%s", ss->name);
1123 1124
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
		seq_puts(seq, ",sane_behavior");
1125
	if (root->flags & CGRP_ROOT_NOPREFIX)
1126
		seq_puts(seq, ",noprefix");
1127
	if (root->flags & CGRP_ROOT_XATTR)
A
Aristeu Rozanski 已提交
1128
		seq_puts(seq, ",xattr");
1129 1130
	if (strlen(root->release_agent_path))
		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
1131
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags))
1132
		seq_puts(seq, ",clone_children");
1133 1134
	if (strlen(root->name))
		seq_printf(seq, ",name=%s", root->name);
T
Tejun Heo 已提交
1135
	mutex_unlock(&cgroup_root_mutex);
1136 1137 1138 1139
	return 0;
}

struct cgroup_sb_opts {
1140
	unsigned long subsys_mask;
1141
	unsigned long flags;
1142
	char *release_agent;
1143
	bool cpuset_clone_children;
1144
	char *name;
1145 1146
	/* User explicitly requested empty subsystem */
	bool none;
1147 1148

	struct cgroupfs_root *new_root;
1149

1150 1151
};

B
Ben Blum 已提交
1152
/*
1153 1154 1155 1156
 * Convert a hierarchy specifier into a bitmask of subsystems and
 * flags. Call with cgroup_mutex held to protect the cgroup_subsys[]
 * array. This function takes refcounts on subsystems to be used, unless it
 * returns error, in which case no refcounts are taken.
B
Ben Blum 已提交
1157
 */
B
Ben Blum 已提交
1158
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1159
{
1160 1161
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
1162
	unsigned long mask = (unsigned long)-1;
1163 1164
	struct cgroup_subsys *ss;
	int i;
1165

B
Ben Blum 已提交
1166 1167
	BUG_ON(!mutex_is_locked(&cgroup_mutex));

1168 1169 1170
#ifdef CONFIG_CPUSETS
	mask = ~(1UL << cpuset_subsys_id);
#endif
1171

1172
	memset(opts, 0, sizeof(*opts));
1173 1174 1175 1176

	while ((token = strsep(&o, ",")) != NULL) {
		if (!*token)
			return -EINVAL;
1177
		if (!strcmp(token, "none")) {
1178 1179
			/* Explicitly have no subsystems */
			opts->none = true;
1180 1181 1182 1183 1184 1185 1186 1187 1188
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
1189 1190 1191 1192
		if (!strcmp(token, "__DEVEL__sane_behavior")) {
			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
			continue;
		}
1193
		if (!strcmp(token, "noprefix")) {
1194
			opts->flags |= CGRP_ROOT_NOPREFIX;
1195 1196 1197
			continue;
		}
		if (!strcmp(token, "clone_children")) {
1198
			opts->cpuset_clone_children = true;
1199 1200
			continue;
		}
A
Aristeu Rozanski 已提交
1201
		if (!strcmp(token, "xattr")) {
1202
			opts->flags |= CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
1203 1204
			continue;
		}
1205
		if (!strncmp(token, "release_agent=", 14)) {
1206 1207 1208
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
1209
			opts->release_agent =
1210
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1211 1212
			if (!opts->release_agent)
				return -ENOMEM;
1213 1214 1215
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
1233
					      MAX_CGROUP_ROOT_NAMELEN - 1,
1234 1235 1236
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
1237 1238 1239 1240

			continue;
		}

1241
		for_each_subsys(ss, i) {
1242 1243 1244 1245 1246 1247 1248 1249
			if (strcmp(token, ss->name))
				continue;
			if (ss->disabled)
				continue;

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
1250
			set_bit(i, &opts->subsys_mask);
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

	/*
	 * If the 'all' option was specified select all the subsystems,
1261 1262
	 * otherwise if 'none', 'name=' and a subsystem name options
	 * were not specified, let's default to 'all'
1263
	 */
1264 1265 1266 1267
	if (all_ss || (!one_ss && !opts->none && !opts->name))
		for_each_subsys(ss, i)
			if (!ss->disabled)
				set_bit(i, &opts->subsys_mask);
1268

1269 1270
	/* Consistency checks */

1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
		pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");

		if (opts->flags & CGRP_ROOT_NOPREFIX) {
			pr_err("cgroup: sane_behavior: noprefix is not allowed\n");
			return -EINVAL;
		}

		if (opts->cpuset_clone_children) {
			pr_err("cgroup: sane_behavior: clone_children is not allowed\n");
			return -EINVAL;
		}
	}

1285 1286 1287 1288 1289
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
1290
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1291 1292
		return -EINVAL;

1293 1294

	/* Can't specify "none" and some subsystems */
1295
	if (opts->subsys_mask && opts->none)
1296 1297 1298 1299 1300 1301
		return -EINVAL;

	/*
	 * We either have to specify by name or by subsystems. (So all
	 * empty hierarchies must have a name).
	 */
1302
	if (!opts->subsys_mask && !opts->name)
1303 1304 1305 1306 1307 1308 1309 1310 1311
		return -EINVAL;

	return 0;
}

static int cgroup_remount(struct super_block *sb, int *flags, char *data)
{
	int ret = 0;
	struct cgroupfs_root *root = sb->s_fs_info;
1312
	struct cgroup *cgrp = &root->top_cgroup;
1313
	struct cgroup_sb_opts opts;
1314
	unsigned long added_mask, removed_mask;
1315

1316 1317 1318 1319 1320
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
		pr_err("cgroup: sane_behavior: remount is not allowed\n");
		return -EINVAL;
	}

1321
	mutex_lock(&cgrp->dentry->d_inode->i_mutex);
1322
	mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1323
	mutex_lock(&cgroup_root_mutex);
1324 1325 1326 1327 1328 1329

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

1330
	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1331 1332 1333
		pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n",
			   task_tgid_nr(current), current->comm);

1334 1335
	added_mask = opts.subsys_mask & ~root->subsys_mask;
	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1336

B
Ben Blum 已提交
1337
	/* Don't allow flags or name to change at remount */
1338
	if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) ||
B
Ben Blum 已提交
1339
	    (opts.name && strcmp(opts.name, root->name))) {
1340 1341 1342
		pr_err("cgroup: option or name mismatch, new: 0x%lx \"%s\", old: 0x%lx \"%s\"\n",
		       opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "",
		       root->flags & CGRP_ROOT_OPTION_MASK, root->name);
1343 1344 1345 1346
		ret = -EINVAL;
		goto out_unlock;
	}

1347 1348 1349 1350 1351 1352
	/* remounting is not allowed for populated hierarchies */
	if (root->number_of_cgroups > 1) {
		ret = -EBUSY;
		goto out_unlock;
	}

1353
	ret = rebind_subsystems(root, added_mask, removed_mask);
1354
	if (ret)
1355
		goto out_unlock;
1356

1357 1358
	if (opts.release_agent)
		strcpy(root->release_agent_path, opts.release_agent);
1359
 out_unlock:
1360
	kfree(opts.release_agent);
1361
	kfree(opts.name);
T
Tejun Heo 已提交
1362
	mutex_unlock(&cgroup_root_mutex);
1363
	mutex_unlock(&cgroup_mutex);
1364
	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
1365 1366 1367
	return ret;
}

1368
static const struct super_operations cgroup_ops = {
1369 1370 1371 1372 1373 1374
	.statfs = simple_statfs,
	.drop_inode = generic_delete_inode,
	.show_options = cgroup_show_options,
	.remount_fs = cgroup_remount,
};

1375 1376 1377 1378
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
	INIT_LIST_HEAD(&cgrp->sibling);
	INIT_LIST_HEAD(&cgrp->children);
T
Tejun Heo 已提交
1379
	INIT_LIST_HEAD(&cgrp->files);
1380
	INIT_LIST_HEAD(&cgrp->cset_links);
1381
	INIT_LIST_HEAD(&cgrp->release_list);
1382 1383
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
T
Tejun Heo 已提交
1384
	cgrp->dummy_css.cgroup = cgrp;
1385 1386
	INIT_LIST_HEAD(&cgrp->event_list);
	spin_lock_init(&cgrp->event_list_lock);
A
Aristeu Rozanski 已提交
1387
	simple_xattrs_init(&cgrp->xattrs);
1388
}
1389

1390 1391
static void init_cgroup_root(struct cgroupfs_root *root)
{
1392
	struct cgroup *cgrp = &root->top_cgroup;
1393

1394 1395 1396
	INIT_LIST_HEAD(&root->subsys_list);
	INIT_LIST_HEAD(&root->root_list);
	root->number_of_cgroups = 1;
1397
	cgrp->root = root;
1398
	RCU_INIT_POINTER(cgrp->name, &root_cgroup_name);
1399
	init_cgroup_housekeeping(cgrp);
1400
	idr_init(&root->cgroup_idr);
1401 1402
}

1403
static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end)
1404
{
1405
	int id;
1406

T
Tejun Heo 已提交
1407 1408 1409
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&cgroup_root_mutex);

1410 1411
	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end,
			      GFP_KERNEL);
1412 1413 1414 1415
	if (id < 0)
		return id;

	root->hierarchy_id = id;
1416 1417 1418 1419 1420
	return 0;
}

static void cgroup_exit_root_id(struct cgroupfs_root *root)
{
T
Tejun Heo 已提交
1421 1422 1423
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&cgroup_root_mutex);

1424
	if (root->hierarchy_id) {
1425
		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
1426 1427
		root->hierarchy_id = 0;
	}
1428 1429
}

1430 1431
static int cgroup_test_super(struct super_block *sb, void *data)
{
1432
	struct cgroup_sb_opts *opts = data;
1433 1434
	struct cgroupfs_root *root = sb->s_fs_info;

1435 1436 1437
	/* If we asked for a name then it must match */
	if (opts->name && strcmp(opts->name, root->name))
		return 0;
1438

1439 1440 1441 1442
	/*
	 * If we asked for subsystems (or explicitly for no
	 * subsystems) then they must match
	 */
1443 1444
	if ((opts->subsys_mask || opts->none)
	    && (opts->subsys_mask != root->subsys_mask))
1445 1446 1447 1448 1449
		return 0;

	return 1;
}

1450 1451 1452 1453
static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
{
	struct cgroupfs_root *root;

1454
	if (!opts->subsys_mask && !opts->none)
1455 1456 1457 1458 1459 1460 1461
		return NULL;

	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root)
		return ERR_PTR(-ENOMEM);

	init_cgroup_root(root);
1462

1463 1464 1465 1466 1467 1468 1469 1470
	/*
	 * We need to set @root->subsys_mask now so that @root can be
	 * matched by cgroup_test_super() before it finishes
	 * initialization; otherwise, competing mounts with the same
	 * options may try to bind the same subsystems instead of waiting
	 * for the first one leading to unexpected mount errors.
	 * SUBSYS_BOUND will be set once actual binding is complete.
	 */
1471
	root->subsys_mask = opts->subsys_mask;
1472 1473 1474 1475 1476
	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
1477 1478
	if (opts->cpuset_clone_children)
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags);
1479 1480 1481
	return root;
}

1482
static void cgroup_free_root(struct cgroupfs_root *root)
1483
{
1484 1485 1486
	if (root) {
		/* hierarhcy ID shoulid already have been released */
		WARN_ON_ONCE(root->hierarchy_id);
1487

1488
		idr_destroy(&root->cgroup_idr);
1489 1490
		kfree(root);
	}
1491 1492
}

1493 1494 1495
static int cgroup_set_super(struct super_block *sb, void *data)
{
	int ret;
1496 1497 1498 1499 1500 1501
	struct cgroup_sb_opts *opts = data;

	/* If we don't have a new root, we can't set up a new sb */
	if (!opts->new_root)
		return -EINVAL;

1502
	BUG_ON(!opts->subsys_mask && !opts->none);
1503 1504 1505 1506 1507

	ret = set_anon_super(sb, NULL);
	if (ret)
		return ret;

1508 1509
	sb->s_fs_info = opts->new_root;
	opts->new_root->sb = sb;
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520

	sb->s_blocksize = PAGE_CACHE_SIZE;
	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
	sb->s_magic = CGROUP_SUPER_MAGIC;
	sb->s_op = &cgroup_ops;

	return 0;
}

static int cgroup_get_rootdir(struct super_block *sb)
{
A
Al Viro 已提交
1521 1522
	static const struct dentry_operations cgroup_dops = {
		.d_iput = cgroup_diput,
1523
		.d_delete = cgroup_delete,
A
Al Viro 已提交
1524 1525
	};

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
	struct inode *inode =
		cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);

	if (!inode)
		return -ENOMEM;

	inode->i_fop = &simple_dir_operations;
	inode->i_op = &cgroup_dir_inode_operations;
	/* directories start off with i_nlink == 2 (for "." entry) */
	inc_nlink(inode);
1536 1537
	sb->s_root = d_make_root(inode);
	if (!sb->s_root)
1538
		return -ENOMEM;
A
Al Viro 已提交
1539 1540
	/* for everything else we want ->d_op set */
	sb->s_d_op = &cgroup_dops;
1541 1542 1543
	return 0;
}

A
Al Viro 已提交
1544
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1545
			 int flags, const char *unused_dev_name,
A
Al Viro 已提交
1546
			 void *data)
1547 1548
{
	struct cgroup_sb_opts opts;
1549
	struct cgroupfs_root *root;
1550 1551
	int ret = 0;
	struct super_block *sb;
1552
	struct cgroupfs_root *new_root;
1553
	struct list_head tmp_links;
T
Tejun Heo 已提交
1554
	struct inode *inode;
1555
	const struct cred *cred;
1556 1557

	/* First find the desired set of subsystems */
B
Ben Blum 已提交
1558
	mutex_lock(&cgroup_mutex);
1559
	ret = parse_cgroupfs_options(data, &opts);
B
Ben Blum 已提交
1560
	mutex_unlock(&cgroup_mutex);
1561 1562
	if (ret)
		goto out_err;
1563

1564 1565 1566 1567 1568 1569 1570
	/*
	 * Allocate a new cgroup root. We may not need it if we're
	 * reusing an existing hierarchy.
	 */
	new_root = cgroup_root_from_opts(&opts);
	if (IS_ERR(new_root)) {
		ret = PTR_ERR(new_root);
1571
		goto out_err;
1572
	}
1573
	opts.new_root = new_root;
1574

1575
	/* Locate an existing or new sb for this hierarchy */
D
David Howells 已提交
1576
	sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts);
1577
	if (IS_ERR(sb)) {
1578
		ret = PTR_ERR(sb);
1579
		cgroup_free_root(opts.new_root);
1580
		goto out_err;
1581 1582
	}

1583 1584 1585 1586
	root = sb->s_fs_info;
	BUG_ON(!root);
	if (root == opts.new_root) {
		/* We used the new root structure, so this is a new hierarchy */
1587
		struct cgroup *root_cgrp = &root->top_cgroup;
1588
		struct cgroupfs_root *existing_root;
1589
		int i;
1590
		struct css_set *cset;
1591 1592 1593 1594 1595 1596

		BUG_ON(sb->s_root != NULL);

		ret = cgroup_get_rootdir(sb);
		if (ret)
			goto drop_new_super;
1597
		inode = sb->s_root->d_inode;
1598

1599
		mutex_lock(&inode->i_mutex);
1600
		mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1601
		mutex_lock(&cgroup_root_mutex);
1602

1603 1604 1605 1606 1607
		root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
					   0, 1, GFP_KERNEL);
		if (root_cgrp->id < 0)
			goto unlock_drop;

T
Tejun Heo 已提交
1608 1609 1610 1611 1612 1613
		/* Check for name clashes with existing mounts */
		ret = -EBUSY;
		if (strlen(root->name))
			for_each_active_root(existing_root)
				if (!strcmp(existing_root->name, root->name))
					goto unlock_drop;
1614

1615 1616 1617 1618 1619 1620 1621
		/*
		 * We're accessing css_set_count without locking
		 * css_set_lock here, but that's OK - it can only be
		 * increased by someone holding cgroup_lock, and
		 * that's us. The worst that can happen is that we
		 * have some link structures left over
		 */
1622
		ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
T
Tejun Heo 已提交
1623 1624
		if (ret)
			goto unlock_drop;
1625

1626 1627
		/* ID 0 is reserved for dummy root, 1 for unified hierarchy */
		ret = cgroup_init_root_id(root, 2, 0);
1628 1629 1630
		if (ret)
			goto unlock_drop;

1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
		sb->s_root->d_fsdata = root_cgrp;
		root_cgrp->dentry = sb->s_root;

		/*
		 * We're inside get_sb() and will call lookup_one_len() to
		 * create the root files, which doesn't work if SELinux is
		 * in use.  The following cred dancing somehow works around
		 * it.  See 2ce9738ba ("cgroupfs: use init_cred when
		 * populating new cgroupfs mount") for more details.
		 */
		cred = override_creds(&init_cred);

1643
		ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
1644 1645 1646
		if (ret)
			goto rm_base_files;

1647
		ret = rebind_subsystems(root, root->subsys_mask, 0);
1648 1649 1650 1651 1652
		if (ret)
			goto rm_base_files;

		revert_creds(cred);

B
Ben Blum 已提交
1653 1654 1655 1656 1657
		/*
		 * There must be no failure case after here, since rebinding
		 * takes care of subsystems' refcounts, which are explicitly
		 * dropped in the failure exit path.
		 */
1658

1659 1660
		list_add(&root->root_list, &cgroup_roots);
		cgroup_root_count++;
1661

1662 1663 1664
		/* Link the top cgroup in this hierarchy into all
		 * the css_set objects */
		write_lock(&css_set_lock);
1665
		hash_for_each(css_set_table, i, cset, hlist)
1666
			link_css_set(&tmp_links, cset, root_cgrp);
1667 1668
		write_unlock(&css_set_lock);

1669
		free_cgrp_cset_links(&tmp_links);
1670

1671
		BUG_ON(!list_empty(&root_cgrp->children));
1672 1673
		BUG_ON(root->number_of_cgroups != 1);

T
Tejun Heo 已提交
1674
		mutex_unlock(&cgroup_root_mutex);
1675
		mutex_unlock(&cgroup_mutex);
1676
		mutex_unlock(&inode->i_mutex);
1677 1678 1679 1680 1681
	} else {
		/*
		 * We re-used an existing hierarchy - the new root (if
		 * any) is not needed
		 */
1682
		cgroup_free_root(opts.new_root);
1683

1684
		if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) {
1685 1686 1687 1688 1689 1690 1691
			if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
				pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
				ret = -EINVAL;
				goto drop_new_super;
			} else {
				pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
			}
1692
		}
1693 1694
	}

1695 1696
	kfree(opts.release_agent);
	kfree(opts.name);
A
Al Viro 已提交
1697
	return dget(sb->s_root);
1698

1699 1700
 rm_base_files:
	free_cgrp_cset_links(&tmp_links);
1701
	cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false);
1702
	revert_creds(cred);
T
Tejun Heo 已提交
1703
 unlock_drop:
1704
	cgroup_exit_root_id(root);
T
Tejun Heo 已提交
1705 1706 1707
	mutex_unlock(&cgroup_root_mutex);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&inode->i_mutex);
1708
 drop_new_super:
1709
	deactivate_locked_super(sb);
1710 1711 1712
 out_err:
	kfree(opts.release_agent);
	kfree(opts.name);
A
Al Viro 已提交
1713
	return ERR_PTR(ret);
1714 1715 1716 1717
}

static void cgroup_kill_sb(struct super_block *sb) {
	struct cgroupfs_root *root = sb->s_fs_info;
1718
	struct cgroup *cgrp = &root->top_cgroup;
1719
	struct cgrp_cset_link *link, *tmp_link;
1720 1721 1722 1723 1724
	int ret;

	BUG_ON(!root);

	BUG_ON(root->number_of_cgroups != 1);
1725
	BUG_ON(!list_empty(&cgrp->children));
1726

1727
	mutex_lock(&cgrp->dentry->d_inode->i_mutex);
1728
	mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1729
	mutex_lock(&cgroup_root_mutex);
1730 1731

	/* Rebind all subsystems back to the default hierarchy */
1732 1733 1734 1735 1736
	if (root->flags & CGRP_ROOT_SUBSYS_BOUND) {
		ret = rebind_subsystems(root, 0, root->subsys_mask);
		/* Shouldn't be able to fail ... */
		BUG_ON(ret);
	}
1737

1738
	/*
1739
	 * Release all the links from cset_links to this hierarchy's
1740 1741 1742
	 * root cgroup
	 */
	write_lock(&css_set_lock);
K
KOSAKI Motohiro 已提交
1743

1744 1745 1746
	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
1747 1748 1749 1750
		kfree(link);
	}
	write_unlock(&css_set_lock);

1751 1752
	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
1753
		cgroup_root_count--;
1754
	}
1755

1756 1757
	cgroup_exit_root_id(root);

T
Tejun Heo 已提交
1758
	mutex_unlock(&cgroup_root_mutex);
1759
	mutex_unlock(&cgroup_mutex);
1760
	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
1761

A
Aristeu Rozanski 已提交
1762 1763
	simple_xattrs_free(&cgrp->xattrs);

1764
	kill_litter_super(sb);
1765
	cgroup_free_root(root);
1766 1767 1768 1769
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
A
Al Viro 已提交
1770
	.mount = cgroup_mount,
1771 1772 1773
	.kill_sb = cgroup_kill_sb,
};

1774 1775
static struct kobject *cgroup_kobj;

L
Li Zefan 已提交
1776 1777 1778 1779 1780 1781
/**
 * cgroup_path - generate the path of a cgroup
 * @cgrp: the cgroup in question
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
1782 1783 1784 1785 1786 1787
 * Writes path of cgroup into buf.  Returns 0 on success, -errno on error.
 *
 * We can't generate cgroup path using dentry->d_name, as accessing
 * dentry->name must be protected by irq-unsafe dentry->d_lock or parent
 * inode's i_mutex, while on the other hand cgroup_path() can be called
 * with some irq-safe spinlocks held.
1788
 */
1789
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1790
{
1791
	int ret = -ENAMETOOLONG;
1792
	char *start;
1793

1794 1795 1796
	if (!cgrp->parent) {
		if (strlcpy(buf, "/", buflen) >= buflen)
			return -ENAMETOOLONG;
1797 1798 1799
		return 0;
	}

1800 1801
	start = buf + buflen - 1;
	*start = '\0';
1802

1803
	rcu_read_lock();
1804
	do {
1805 1806 1807 1808
		const char *name = cgroup_name(cgrp);
		int len;

		len = strlen(name);
1809
		if ((start -= len) < buf)
1810 1811
			goto out;
		memcpy(start, name, len);
1812

1813
		if (--start < buf)
1814
			goto out;
1815
		*start = '/';
1816 1817

		cgrp = cgrp->parent;
1818
	} while (cgrp->parent);
1819
	ret = 0;
1820
	memmove(buf, start, buf + buflen - start);
1821 1822 1823
out:
	rcu_read_unlock();
	return ret;
1824
}
B
Ben Blum 已提交
1825
EXPORT_SYMBOL_GPL(cgroup_path);
1826

1827
/**
1828
 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1829 1830 1831 1832
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
1833 1834 1835 1836 1837 1838
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
 * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short.
1839
 */
1840
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1841 1842
{
	struct cgroupfs_root *root;
1843 1844 1845 1846 1847
	struct cgroup *cgrp;
	int hierarchy_id = 1, ret = 0;

	if (buflen < 2)
		return -ENAMETOOLONG;
1848 1849 1850

	mutex_lock(&cgroup_mutex);

1851 1852
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

1853 1854 1855
	if (root) {
		cgrp = task_cgroup_from_root(task, root);
		ret = cgroup_path(cgrp, buf, buflen);
1856 1857 1858
	} else {
		/* if no hierarchy exists, everyone is in "/" */
		memcpy(buf, "/", 2);
1859 1860 1861 1862 1863
	}

	mutex_unlock(&cgroup_mutex);
	return ret;
}
1864
EXPORT_SYMBOL_GPL(task_cgroup_path);
1865

1866 1867 1868
/*
 * Control Group taskset
 */
1869 1870 1871
struct task_and_cgroup {
	struct task_struct	*task;
	struct cgroup		*cgrp;
L
Li Zefan 已提交
1872
	struct css_set		*cset;
1873 1874
};

1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
struct cgroup_taskset {
	struct task_and_cgroup	single;
	struct flex_array	*tc_array;
	int			tc_array_len;
	int			idx;
	struct cgroup		*cur_cgrp;
};

/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
 *
 * @tset iteration is initialized and the first task is returned.
 */
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
{
	if (tset->tc_array) {
		tset->idx = 0;
		return cgroup_taskset_next(tset);
	} else {
		tset->cur_cgrp = tset->single.cgrp;
		return tset->single.task;
	}
}
EXPORT_SYMBOL_GPL(cgroup_taskset_first);

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
{
	struct task_and_cgroup *tc;

	if (!tset->tc_array || tset->idx >= tset->tc_array_len)
		return NULL;

	tc = flex_array_get(tset->tc_array, tset->idx++);
	tset->cur_cgrp = tc->cgrp;
	return tc->task;
}
EXPORT_SYMBOL_GPL(cgroup_taskset_next);

/**
1922
 * cgroup_taskset_cur_css - return the matching css for the current task
1923
 * @tset: taskset of interest
1924
 * @subsys_id: the ID of the target subsystem
1925
 *
1926 1927 1928
 * Return the css for the current (last returned) task of @tset for
 * subsystem specified by @subsys_id.  This function must be preceded by
 * either cgroup_taskset_first() or cgroup_taskset_next().
1929
 */
1930 1931
struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
						   int subsys_id)
1932
{
1933
	return cgroup_css(tset->cur_cgrp, subsys_id);
1934
}
1935
EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css);
1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947

/**
 * cgroup_taskset_size - return the number of tasks in taskset
 * @tset: taskset of interest
 */
int cgroup_taskset_size(struct cgroup_taskset *tset)
{
	return tset->tc_array ? tset->tc_array_len : 1;
}
EXPORT_SYMBOL_GPL(cgroup_taskset_size);


B
Ben Blum 已提交
1948 1949 1950
/*
 * cgroup_task_migrate - move a task from one cgroup to another.
 *
1951
 * Must be called with cgroup_mutex and threadgroup locked.
B
Ben Blum 已提交
1952
 */
1953 1954 1955
static void cgroup_task_migrate(struct cgroup *old_cgrp,
				struct task_struct *tsk,
				struct css_set *new_cset)
B
Ben Blum 已提交
1956
{
1957
	struct css_set *old_cset;
B
Ben Blum 已提交
1958 1959

	/*
1960 1961 1962
	 * We are synchronized through threadgroup_lock() against PF_EXITING
	 * setting such that we can't race against cgroup_exit() changing the
	 * css_set to init_css_set and dropping the old one.
B
Ben Blum 已提交
1963
	 */
1964
	WARN_ON_ONCE(tsk->flags & PF_EXITING);
1965
	old_cset = task_css_set(tsk);
B
Ben Blum 已提交
1966 1967

	task_lock(tsk);
1968
	rcu_assign_pointer(tsk->cgroups, new_cset);
B
Ben Blum 已提交
1969 1970 1971 1972 1973
	task_unlock(tsk);

	/* Update the css_set linked lists if we're using them */
	write_lock(&css_set_lock);
	if (!list_empty(&tsk->cg_list))
1974
		list_move(&tsk->cg_list, &new_cset->tasks);
B
Ben Blum 已提交
1975 1976 1977
	write_unlock(&css_set_lock);

	/*
1978 1979 1980
	 * We just gained a reference on old_cset by taking it from the
	 * task. As trading it for new_cset is protected by cgroup_mutex,
	 * we're safe to drop it here; it will be freed under RCU.
B
Ben Blum 已提交
1981
	 */
1982 1983
	set_bit(CGRP_RELEASABLE, &old_cgrp->flags);
	put_css_set(old_cset);
B
Ben Blum 已提交
1984 1985
}

L
Li Zefan 已提交
1986
/**
1987
 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
B
Ben Blum 已提交
1988
 * @cgrp: the cgroup to attach to
1989 1990
 * @tsk: the task or the leader of the threadgroup to be attached
 * @threadgroup: attach the whole threadgroup?
B
Ben Blum 已提交
1991
 *
1992
 * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
1993
 * task_lock of @tsk or each thread in the threadgroup individually in turn.
B
Ben Blum 已提交
1994
 */
T
Tejun Heo 已提交
1995 1996
static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
			      bool threadgroup)
B
Ben Blum 已提交
1997 1998 1999 2000 2001
{
	int retval, i, group_size;
	struct cgroup_subsys *ss, *failed_ss = NULL;
	struct cgroupfs_root *root = cgrp->root;
	/* threadgroup list cursor and array */
2002
	struct task_struct *leader = tsk;
2003
	struct task_and_cgroup *tc;
2004
	struct flex_array *group;
2005
	struct cgroup_taskset tset = { };
B
Ben Blum 已提交
2006 2007 2008 2009 2010

	/*
	 * step 0: in order to do expensive, possibly blocking operations for
	 * every thread, we cannot iterate the thread group list, since it needs
	 * rcu or tasklist locked. instead, build an array of all threads in the
2011 2012
	 * group - group_rwsem prevents new threads from appearing, and if
	 * threads exit, this will just be an over-estimate.
B
Ben Blum 已提交
2013
	 */
2014 2015 2016 2017
	if (threadgroup)
		group_size = get_nr_threads(tsk);
	else
		group_size = 1;
2018
	/* flex_array supports very large thread-groups better than kmalloc. */
2019
	group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
B
Ben Blum 已提交
2020 2021
	if (!group)
		return -ENOMEM;
2022
	/* pre-allocate to guarantee space while iterating in rcu read-side. */
2023
	retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
2024 2025
	if (retval)
		goto out_free_group_list;
B
Ben Blum 已提交
2026 2027

	i = 0;
2028 2029 2030 2031 2032 2033
	/*
	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 */
	rcu_read_lock();
B
Ben Blum 已提交
2034
	do {
2035 2036
		struct task_and_cgroup ent;

2037 2038 2039 2040
		/* @tsk either already exited or can't exit until the end */
		if (tsk->flags & PF_EXITING)
			continue;

B
Ben Blum 已提交
2041 2042
		/* as per above, nr_threads may decrease, but not increase. */
		BUG_ON(i >= group_size);
2043 2044
		ent.task = tsk;
		ent.cgrp = task_cgroup_from_root(tsk, root);
2045 2046 2047
		/* nothing to do if this task is already in the cgroup */
		if (ent.cgrp == cgrp)
			continue;
2048 2049 2050 2051
		/*
		 * saying GFP_ATOMIC has no effect here because we did prealloc
		 * earlier, but it's good form to communicate our expectations.
		 */
2052
		retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2053
		BUG_ON(retval != 0);
B
Ben Blum 已提交
2054
		i++;
2055 2056 2057

		if (!threadgroup)
			break;
B
Ben Blum 已提交
2058
	} while_each_thread(leader, tsk);
2059
	rcu_read_unlock();
B
Ben Blum 已提交
2060 2061
	/* remember the number of threads in the array for later. */
	group_size = i;
2062 2063
	tset.tc_array = group;
	tset.tc_array_len = group_size;
B
Ben Blum 已提交
2064

2065 2066
	/* methods shouldn't be called if no task is actually migrating */
	retval = 0;
2067
	if (!group_size)
2068
		goto out_free_group_list;
2069

B
Ben Blum 已提交
2070 2071 2072
	/*
	 * step 1: check that we can legitimately attach to the cgroup.
	 */
2073
	for_each_root_subsys(root, ss) {
T
Tejun Heo 已提交
2074
		struct cgroup_subsys_state *css = cgroup_css(cgrp, ss->subsys_id);
2075

B
Ben Blum 已提交
2076
		if (ss->can_attach) {
2077
			retval = ss->can_attach(css, &tset);
B
Ben Blum 已提交
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089
			if (retval) {
				failed_ss = ss;
				goto out_cancel_attach;
			}
		}
	}

	/*
	 * step 2: make sure css_sets exist for all threads to be migrated.
	 * we use find_css_set, which allocates a new one if necessary.
	 */
	for (i = 0; i < group_size; i++) {
2090 2091
		struct css_set *old_cset;

2092
		tc = flex_array_get(group, i);
2093
		old_cset = task_css_set(tc->task);
L
Li Zefan 已提交
2094 2095
		tc->cset = find_css_set(old_cset, cgrp);
		if (!tc->cset) {
2096 2097
			retval = -ENOMEM;
			goto out_put_css_set_refs;
B
Ben Blum 已提交
2098 2099 2100 2101
		}
	}

	/*
2102 2103 2104
	 * step 3: now that we're guaranteed success wrt the css_sets,
	 * proceed to move all tasks to the new cgroup.  There are no
	 * failure cases after here, so this is the commit point.
B
Ben Blum 已提交
2105 2106
	 */
	for (i = 0; i < group_size; i++) {
2107
		tc = flex_array_get(group, i);
L
Li Zefan 已提交
2108
		cgroup_task_migrate(tc->cgrp, tc->task, tc->cset);
B
Ben Blum 已提交
2109 2110 2111 2112
	}
	/* nothing is sensitive to fork() after this point. */

	/*
2113
	 * step 4: do subsystem attach callbacks.
B
Ben Blum 已提交
2114
	 */
2115
	for_each_root_subsys(root, ss) {
T
Tejun Heo 已提交
2116
		struct cgroup_subsys_state *css = cgroup_css(cgrp, ss->subsys_id);
2117

B
Ben Blum 已提交
2118
		if (ss->attach)
2119
			ss->attach(css, &tset);
B
Ben Blum 已提交
2120 2121 2122 2123 2124 2125
	}

	/*
	 * step 5: success! and cleanup
	 */
	retval = 0;
2126 2127 2128 2129
out_put_css_set_refs:
	if (retval) {
		for (i = 0; i < group_size; i++) {
			tc = flex_array_get(group, i);
L
Li Zefan 已提交
2130
			if (!tc->cset)
2131
				break;
L
Li Zefan 已提交
2132
			put_css_set(tc->cset);
2133
		}
B
Ben Blum 已提交
2134 2135 2136
	}
out_cancel_attach:
	if (retval) {
2137
		for_each_root_subsys(root, ss) {
T
Tejun Heo 已提交
2138
			struct cgroup_subsys_state *css = cgroup_css(cgrp, ss->subsys_id);
2139

2140
			if (ss == failed_ss)
B
Ben Blum 已提交
2141 2142
				break;
			if (ss->cancel_attach)
2143
				ss->cancel_attach(css, &tset);
B
Ben Blum 已提交
2144 2145 2146
		}
	}
out_free_group_list:
2147
	flex_array_free(group);
B
Ben Blum 已提交
2148 2149 2150 2151 2152
	return retval;
}

/*
 * Find the task_struct of the task to attach by vpid and pass it along to the
2153 2154
 * function to attach either it or all tasks in its threadgroup. Will lock
 * cgroup_mutex and threadgroup; may take task_lock of task.
2155
 */
B
Ben Blum 已提交
2156
static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2157 2158
{
	struct task_struct *tsk;
2159
	const struct cred *cred = current_cred(), *tcred;
2160 2161
	int ret;

B
Ben Blum 已提交
2162 2163 2164
	if (!cgroup_lock_live_group(cgrp))
		return -ENODEV;

2165 2166
retry_find_task:
	rcu_read_lock();
2167
	if (pid) {
2168
		tsk = find_task_by_vpid(pid);
B
Ben Blum 已提交
2169 2170
		if (!tsk) {
			rcu_read_unlock();
2171 2172
			ret= -ESRCH;
			goto out_unlock_cgroup;
2173
		}
B
Ben Blum 已提交
2174 2175 2176 2177
		/*
		 * even if we're attaching all tasks in the thread group, we
		 * only need to check permissions on one of them.
		 */
2178
		tcred = __task_cred(tsk);
2179 2180 2181
		if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
		    !uid_eq(cred->euid, tcred->uid) &&
		    !uid_eq(cred->euid, tcred->suid)) {
2182
			rcu_read_unlock();
2183 2184
			ret = -EACCES;
			goto out_unlock_cgroup;
2185
		}
2186 2187
	} else
		tsk = current;
2188 2189

	if (threadgroup)
2190
		tsk = tsk->group_leader;
2191 2192

	/*
2193
	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2194 2195 2196
	 * trapped in a cpuset, or RT worker may be born in a cgroup
	 * with no rt_runtime allocated.  Just say no.
	 */
2197
	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2198 2199 2200 2201 2202
		ret = -EINVAL;
		rcu_read_unlock();
		goto out_unlock_cgroup;
	}

2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
	get_task_struct(tsk);
	rcu_read_unlock();

	threadgroup_lock(tsk);
	if (threadgroup) {
		if (!thread_group_leader(tsk)) {
			/*
			 * a race with de_thread from another thread's exec()
			 * may strip us of our leadership, if this happens,
			 * there is no choice but to throw this task away and
			 * try again; this is
			 * "double-double-toil-and-trouble-check locking".
			 */
			threadgroup_unlock(tsk);
			put_task_struct(tsk);
			goto retry_find_task;
		}
2220 2221 2222 2223
	}

	ret = cgroup_attach_task(cgrp, tsk, threadgroup);

2224 2225
	threadgroup_unlock(tsk);

2226
	put_task_struct(tsk);
2227
out_unlock_cgroup:
T
Tejun Heo 已提交
2228
	mutex_unlock(&cgroup_mutex);
2229 2230 2231
	return ret;
}

2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
	struct cgroupfs_root *root;
	int retval = 0;

T
Tejun Heo 已提交
2242
	mutex_lock(&cgroup_mutex);
2243
	for_each_active_root(root) {
L
Li Zefan 已提交
2244
		struct cgroup *from_cgrp = task_cgroup_from_root(from, root);
2245

L
Li Zefan 已提交
2246
		retval = cgroup_attach_task(from_cgrp, tsk, false);
2247 2248 2249
		if (retval)
			break;
	}
T
Tejun Heo 已提交
2250
	mutex_unlock(&cgroup_mutex);
2251 2252 2253 2254 2255

	return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);

2256 2257
static int cgroup_tasks_write(struct cgroup_subsys_state *css,
			      struct cftype *cft, u64 pid)
B
Ben Blum 已提交
2258
{
2259
	return attach_task_by_pid(css->cgroup, pid, false);
B
Ben Blum 已提交
2260 2261
}

2262 2263
static int cgroup_procs_write(struct cgroup_subsys_state *css,
			      struct cftype *cft, u64 tgid)
2264
{
2265
	return attach_task_by_pid(css->cgroup, tgid, true);
2266 2267
}

2268 2269
static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, const char *buffer)
2270
{
2271
	BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX);
2272 2273
	if (strlen(buffer) >= PATH_MAX)
		return -EINVAL;
2274
	if (!cgroup_lock_live_group(css->cgroup))
2275
		return -ENODEV;
T
Tejun Heo 已提交
2276
	mutex_lock(&cgroup_root_mutex);
2277
	strcpy(css->cgroup->root->release_agent_path, buffer);
T
Tejun Heo 已提交
2278
	mutex_unlock(&cgroup_root_mutex);
T
Tejun Heo 已提交
2279
	mutex_unlock(&cgroup_mutex);
2280 2281 2282
	return 0;
}

2283 2284
static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
				     struct cftype *cft, struct seq_file *seq)
2285
{
2286 2287
	struct cgroup *cgrp = css->cgroup;

2288 2289 2290 2291
	if (!cgroup_lock_live_group(cgrp))
		return -ENODEV;
	seq_puts(seq, cgrp->root->release_agent_path);
	seq_putc(seq, '\n');
T
Tejun Heo 已提交
2292
	mutex_unlock(&cgroup_mutex);
2293 2294 2295
	return 0;
}

2296 2297
static int cgroup_sane_behavior_show(struct cgroup_subsys_state *css,
				     struct cftype *cft, struct seq_file *seq)
2298
{
2299
	seq_printf(seq, "%d\n", cgroup_sane_behavior(css->cgroup));
2300 2301 2302
	return 0;
}

2303 2304 2305
/* A buffer size big enough for numbers or short strings */
#define CGROUP_LOCAL_BUFFER_SIZE 64

2306 2307 2308 2309
static ssize_t cgroup_write_X64(struct cgroup_subsys_state *css,
				struct cftype *cft, struct file *file,
				const char __user *userbuf, size_t nbytes,
				loff_t *unused_ppos)
2310
{
2311
	char buffer[CGROUP_LOCAL_BUFFER_SIZE];
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322
	int retval = 0;
	char *end;

	if (!nbytes)
		return -EINVAL;
	if (nbytes >= sizeof(buffer))
		return -E2BIG;
	if (copy_from_user(buffer, userbuf, nbytes))
		return -EFAULT;

	buffer[nbytes] = 0;     /* nul-terminate */
2323
	if (cft->write_u64) {
K
KOSAKI Motohiro 已提交
2324
		u64 val = simple_strtoull(strstrip(buffer), &end, 0);
2325 2326
		if (*end)
			return -EINVAL;
2327
		retval = cft->write_u64(css, cft, val);
2328
	} else {
K
KOSAKI Motohiro 已提交
2329
		s64 val = simple_strtoll(strstrip(buffer), &end, 0);
2330 2331
		if (*end)
			return -EINVAL;
2332
		retval = cft->write_s64(css, cft, val);
2333
	}
2334 2335 2336 2337 2338
	if (!retval)
		retval = nbytes;
	return retval;
}

2339 2340 2341 2342
static ssize_t cgroup_write_string(struct cgroup_subsys_state *css,
				   struct cftype *cft, struct file *file,
				   const char __user *userbuf, size_t nbytes,
				   loff_t *unused_ppos)
2343
{
2344
	char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358
	int retval = 0;
	size_t max_bytes = cft->max_write_len;
	char *buffer = local_buffer;

	if (!max_bytes)
		max_bytes = sizeof(local_buffer) - 1;
	if (nbytes >= max_bytes)
		return -E2BIG;
	/* Allocate a dynamic buffer if we need one */
	if (nbytes >= sizeof(local_buffer)) {
		buffer = kmalloc(nbytes + 1, GFP_KERNEL);
		if (buffer == NULL)
			return -ENOMEM;
	}
L
Li Zefan 已提交
2359 2360 2361 2362
	if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
		retval = -EFAULT;
		goto out;
	}
2363 2364

	buffer[nbytes] = 0;     /* nul-terminate */
2365
	retval = cft->write_string(css, cft, strstrip(buffer));
2366 2367
	if (!retval)
		retval = nbytes;
L
Li Zefan 已提交
2368
out:
2369 2370 2371 2372 2373
	if (buffer != local_buffer)
		kfree(buffer);
	return retval;
}

2374
static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
2375
				 size_t nbytes, loff_t *ppos)
2376
{
2377
	struct cfent *cfe = __d_cfe(file->f_dentry);
2378
	struct cftype *cft = __d_cft(file->f_dentry);
2379
	struct cgroup_subsys_state *css = cfe->css;
2380

2381
	if (cft->write)
2382
		return cft->write(css, cft, file, buf, nbytes, ppos);
2383
	if (cft->write_u64 || cft->write_s64)
2384
		return cgroup_write_X64(css, cft, file, buf, nbytes, ppos);
2385
	if (cft->write_string)
2386
		return cgroup_write_string(css, cft, file, buf, nbytes, ppos);
2387
	if (cft->trigger) {
2388
		int ret = cft->trigger(css, (unsigned int)cft->private);
2389 2390
		return ret ? ret : nbytes;
	}
2391
	return -EINVAL;
2392 2393
}

2394 2395 2396
static ssize_t cgroup_read_u64(struct cgroup_subsys_state *css,
			       struct cftype *cft, struct file *file,
			       char __user *buf, size_t nbytes, loff_t *ppos)
2397
{
2398
	char tmp[CGROUP_LOCAL_BUFFER_SIZE];
2399
	u64 val = cft->read_u64(css, cft);
2400 2401 2402 2403 2404
	int len = sprintf(tmp, "%llu\n", (unsigned long long) val);

	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}

2405 2406 2407
static ssize_t cgroup_read_s64(struct cgroup_subsys_state *css,
			       struct cftype *cft, struct file *file,
			       char __user *buf, size_t nbytes, loff_t *ppos)
2408
{
2409
	char tmp[CGROUP_LOCAL_BUFFER_SIZE];
2410
	s64 val = cft->read_s64(css, cft);
2411 2412 2413 2414 2415
	int len = sprintf(tmp, "%lld\n", (long long) val);

	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}

2416
static ssize_t cgroup_file_read(struct file *file, char __user *buf,
2417
				size_t nbytes, loff_t *ppos)
2418
{
2419
	struct cfent *cfe = __d_cfe(file->f_dentry);
2420
	struct cftype *cft = __d_cft(file->f_dentry);
2421
	struct cgroup_subsys_state *css = cfe->css;
2422 2423

	if (cft->read)
2424
		return cft->read(css, cft, file, buf, nbytes, ppos);
2425
	if (cft->read_u64)
2426
		return cgroup_read_u64(css, cft, file, buf, nbytes, ppos);
2427
	if (cft->read_s64)
2428
		return cgroup_read_s64(css, cft, file, buf, nbytes, ppos);
2429 2430 2431
	return -EINVAL;
}

2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444
/*
 * seqfile ops/methods for returning structured data. Currently just
 * supports string->u64 maps, but can be extended in future.
 */

static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
{
	struct seq_file *sf = cb->state;
	return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
}

static int cgroup_seqfile_show(struct seq_file *m, void *arg)
{
2445 2446
	struct cfent *cfe = m->private;
	struct cftype *cft = cfe->type;
2447
	struct cgroup_subsys_state *css = cfe->css;
2448

2449 2450 2451 2452 2453
	if (cft->read_map) {
		struct cgroup_map_cb cb = {
			.fill = cgroup_map_add,
			.state = m,
		};
2454
		return cft->read_map(css, cft, &cb);
2455
	}
2456
	return cft->read_seq_string(css, cft, m);
2457 2458
}

2459
static const struct file_operations cgroup_seqfile_operations = {
2460
	.read = seq_read,
2461
	.write = cgroup_file_write,
2462
	.llseek = seq_lseek,
2463
	.release = single_release,
2464 2465
};

2466 2467
static int cgroup_file_open(struct inode *inode, struct file *file)
{
2468 2469
	struct cfent *cfe = __d_cfe(file->f_dentry);
	struct cftype *cft = __d_cft(file->f_dentry);
2470 2471
	struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent);
	struct cgroup_subsys_state *css;
2472 2473 2474 2475 2476
	int err;

	err = generic_file_open(inode, file);
	if (err)
		return err;
2477 2478 2479 2480 2481 2482

	/*
	 * If the file belongs to a subsystem, pin the css.  Will be
	 * unpinned either on open failure or release.  This ensures that
	 * @css stays alive for all file operations.
	 */
2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
	rcu_read_lock();
	if (cft->ss) {
		css = cgroup_css(cgrp, cft->ss->subsys_id);
		if (!css_tryget(css))
			css = NULL;
	} else {
		css = &cgrp->dummy_css;
	}
	rcu_read_unlock();

2493
	if (!css)
2494
		return -ENODEV;
2495

2496 2497 2498 2499 2500 2501 2502 2503 2504
	/*
	 * @cfe->css is used by read/write/close to determine the
	 * associated css.  @file->private_data would be a better place but
	 * that's already used by seqfile.  Multiple accessors may use it
	 * simultaneously which is okay as the association never changes.
	 */
	WARN_ON_ONCE(cfe->css && cfe->css != css);
	cfe->css = css;

2505
	if (cft->read_map || cft->read_seq_string) {
2506
		file->f_op = &cgroup_seqfile_operations;
2507 2508
		err = single_open(file, cgroup_seqfile_show, cfe);
	} else if (cft->open) {
2509
		err = cft->open(inode, file);
2510
	}
2511

T
Tejun Heo 已提交
2512
	if (css->ss && err)
2513
		css_put(css);
2514 2515 2516 2517 2518
	return err;
}

static int cgroup_file_release(struct inode *inode, struct file *file)
{
2519
	struct cfent *cfe = __d_cfe(file->f_dentry);
2520
	struct cftype *cft = __d_cft(file->f_dentry);
2521
	struct cgroup_subsys_state *css = cfe->css;
2522 2523
	int ret = 0;

2524
	if (cft->release)
2525
		ret = cft->release(inode, file);
T
Tejun Heo 已提交
2526
	if (css->ss)
2527 2528
		css_put(css);
	return ret;
2529 2530 2531 2532 2533 2534 2535 2536
}

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
			    struct inode *new_dir, struct dentry *new_dentry)
{
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546
	int ret;
	struct cgroup_name *name, *old_name;
	struct cgroup *cgrp;

	/*
	 * It's convinient to use parent dir's i_mutex to protected
	 * cgrp->name.
	 */
	lockdep_assert_held(&old_dir->i_mutex);

2547 2548 2549 2550 2551 2552
	if (!S_ISDIR(old_dentry->d_inode->i_mode))
		return -ENOTDIR;
	if (new_dentry->d_inode)
		return -EEXIST;
	if (old_dir != new_dir)
		return -EIO;
2553 2554 2555

	cgrp = __d_cgrp(old_dentry);

2556 2557 2558 2559 2560 2561 2562
	/*
	 * This isn't a proper migration and its usefulness is very
	 * limited.  Disallow if sane_behavior.
	 */
	if (cgroup_sane_behavior(cgrp))
		return -EPERM;

2563 2564 2565 2566 2567 2568 2569 2570 2571 2572
	name = cgroup_alloc_name(new_dentry);
	if (!name)
		return -ENOMEM;

	ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry);
	if (ret) {
		kfree(name);
		return ret;
	}

2573
	old_name = rcu_dereference_protected(cgrp->name, true);
2574 2575 2576 2577
	rcu_assign_pointer(cgrp->name, name);

	kfree_rcu(old_name, rcu_head);
	return 0;
2578 2579
}

A
Aristeu Rozanski 已提交
2580 2581 2582 2583 2584
static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
{
	if (S_ISDIR(dentry->d_inode->i_mode))
		return &__d_cgrp(dentry)->xattrs;
	else
L
Li Zefan 已提交
2585
		return &__d_cfe(dentry)->xattrs;
A
Aristeu Rozanski 已提交
2586 2587 2588 2589 2590
}

static inline int xattr_enabled(struct dentry *dentry)
{
	struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
2591
	return root->flags & CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
}

static bool is_valid_xattr(const char *name)
{
	if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
	    !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
		return true;
	return false;
}

static int cgroup_setxattr(struct dentry *dentry, const char *name,
			   const void *val, size_t size, int flags)
{
	if (!xattr_enabled(dentry))
		return -EOPNOTSUPP;
	if (!is_valid_xattr(name))
		return -EINVAL;
	return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags);
}

static int cgroup_removexattr(struct dentry *dentry, const char *name)
{
	if (!xattr_enabled(dentry))
		return -EOPNOTSUPP;
	if (!is_valid_xattr(name))
		return -EINVAL;
	return simple_xattr_remove(__d_xattrs(dentry), name);
}

static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name,
			       void *buf, size_t size)
{
	if (!xattr_enabled(dentry))
		return -EOPNOTSUPP;
	if (!is_valid_xattr(name))
		return -EINVAL;
	return simple_xattr_get(__d_xattrs(dentry), name, buf, size);
}

static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size)
{
	if (!xattr_enabled(dentry))
		return -EOPNOTSUPP;
	return simple_xattr_list(__d_xattrs(dentry), buf, size);
}

2638
static const struct file_operations cgroup_file_operations = {
2639 2640 2641 2642 2643 2644 2645
	.read = cgroup_file_read,
	.write = cgroup_file_write,
	.llseek = generic_file_llseek,
	.open = cgroup_file_open,
	.release = cgroup_file_release,
};

A
Aristeu Rozanski 已提交
2646 2647 2648 2649 2650 2651 2652
static const struct inode_operations cgroup_file_inode_operations = {
	.setxattr = cgroup_setxattr,
	.getxattr = cgroup_getxattr,
	.listxattr = cgroup_listxattr,
	.removexattr = cgroup_removexattr,
};

2653
static const struct inode_operations cgroup_dir_inode_operations = {
2654
	.lookup = cgroup_lookup,
2655 2656 2657
	.mkdir = cgroup_mkdir,
	.rmdir = cgroup_rmdir,
	.rename = cgroup_rename,
A
Aristeu Rozanski 已提交
2658 2659 2660 2661
	.setxattr = cgroup_setxattr,
	.getxattr = cgroup_getxattr,
	.listxattr = cgroup_listxattr,
	.removexattr = cgroup_removexattr,
2662 2663
};

A
Al Viro 已提交
2664
static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
2665 2666 2667 2668 2669 2670 2671
{
	if (dentry->d_name.len > NAME_MAX)
		return ERR_PTR(-ENAMETOOLONG);
	d_add(dentry, NULL);
	return NULL;
}

2672 2673 2674 2675 2676
/*
 * Check if a file is a control file
 */
static inline struct cftype *__file_cft(struct file *file)
{
A
Al Viro 已提交
2677
	if (file_inode(file)->i_fop != &cgroup_file_operations)
2678 2679 2680 2681
		return ERR_PTR(-EINVAL);
	return __d_cft(file->f_dentry);
}

A
Al Viro 已提交
2682
static int cgroup_create_file(struct dentry *dentry, umode_t mode,
2683 2684
				struct super_block *sb)
{
2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
	struct inode *inode;

	if (!dentry)
		return -ENOENT;
	if (dentry->d_inode)
		return -EEXIST;

	inode = cgroup_new_inode(mode, sb);
	if (!inode)
		return -ENOMEM;

	if (S_ISDIR(mode)) {
		inode->i_op = &cgroup_dir_inode_operations;
		inode->i_fop = &simple_dir_operations;

		/* start off with i_nlink == 2 (for "." entry) */
		inc_nlink(inode);
T
Tejun Heo 已提交
2702
		inc_nlink(dentry->d_parent->d_inode);
2703

2704 2705 2706 2707 2708 2709 2710 2711 2712
		/*
		 * Control reaches here with cgroup_mutex held.
		 * @inode->i_mutex should nest outside cgroup_mutex but we
		 * want to populate it immediately without releasing
		 * cgroup_mutex.  As @inode isn't visible to anyone else
		 * yet, trylock will always succeed without affecting
		 * lockdep checks.
		 */
		WARN_ON_ONCE(!mutex_trylock(&inode->i_mutex));
2713 2714 2715
	} else if (S_ISREG(mode)) {
		inode->i_size = 0;
		inode->i_fop = &cgroup_file_operations;
A
Aristeu Rozanski 已提交
2716
		inode->i_op = &cgroup_file_inode_operations;
2717 2718 2719 2720 2721 2722
	}
	d_instantiate(dentry, inode);
	dget(dentry);	/* Extra count - pin the dentry in core */
	return 0;
}

L
Li Zefan 已提交
2723 2724 2725 2726 2727 2728 2729 2730 2731
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
 * returns cft->mode if ->mode is not 0
 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
 * returns S_IRUGO if it has only a read handler
 * returns S_IWUSR if it has only a write hander
 */
A
Al Viro 已提交
2732
static umode_t cgroup_file_mode(const struct cftype *cft)
L
Li Zefan 已提交
2733
{
A
Al Viro 已提交
2734
	umode_t mode = 0;
L
Li Zefan 已提交
2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749

	if (cft->mode)
		return cft->mode;

	if (cft->read || cft->read_u64 || cft->read_s64 ||
	    cft->read_map || cft->read_seq_string)
		mode |= S_IRUGO;

	if (cft->write || cft->write_u64 || cft->write_s64 ||
	    cft->write_string || cft->trigger)
		mode |= S_IWUSR;

	return mode;
}

2750
static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
2751
{
2752
	struct dentry *dir = cgrp->dentry;
T
Tejun Heo 已提交
2753
	struct cgroup *parent = __d_cgrp(dir);
2754
	struct dentry *dentry;
T
Tejun Heo 已提交
2755
	struct cfent *cfe;
2756
	int error;
A
Al Viro 已提交
2757
	umode_t mode;
2758
	char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
2759

2760 2761
	if (cft->ss && !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
		strcpy(name, cft->ss->name);
2762 2763 2764
		strcat(name, ".");
	}
	strcat(name, cft->name);
T
Tejun Heo 已提交
2765

2766
	BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
T
Tejun Heo 已提交
2767 2768 2769 2770 2771

	cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
	if (!cfe)
		return -ENOMEM;

2772
	dentry = lookup_one_len(name, dir, strlen(name));
T
Tejun Heo 已提交
2773
	if (IS_ERR(dentry)) {
2774
		error = PTR_ERR(dentry);
T
Tejun Heo 已提交
2775 2776 2777
		goto out;
	}

2778 2779 2780 2781 2782
	cfe->type = (void *)cft;
	cfe->dentry = dentry;
	dentry->d_fsdata = cfe;
	simple_xattrs_init(&cfe->xattrs);

T
Tejun Heo 已提交
2783 2784 2785 2786 2787 2788 2789 2790 2791
	mode = cgroup_file_mode(cft);
	error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
	if (!error) {
		list_add_tail(&cfe->node, &parent->files);
		cfe = NULL;
	}
	dput(dentry);
out:
	kfree(cfe);
2792 2793 2794
	return error;
}

2795 2796 2797 2798 2799 2800 2801
/**
 * cgroup_addrm_files - add or remove files to a cgroup directory
 * @cgrp: the target cgroup
 * @cfts: array of cftypes to be added
 * @is_add: whether to add or remove
 *
 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
2802 2803 2804
 * For removals, this function never fails.  If addition fails, this
 * function doesn't remove files already added.  The caller is responsible
 * for cleaning up.
2805
 */
2806 2807
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add)
2808
{
A
Aristeu Rozanski 已提交
2809
	struct cftype *cft;
2810 2811 2812 2813
	int ret;

	lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
2814 2815

	for (cft = cfts; cft->name[0] != '\0'; cft++) {
2816
		/* does cft->flags tell us to skip this file on @cgrp? */
2817 2818
		if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp))
			continue;
2819 2820 2821 2822 2823
		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent)
			continue;
		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
			continue;

2824
		if (is_add) {
2825
			ret = cgroup_add_file(cgrp, cft);
2826
			if (ret) {
2827
				pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
2828 2829 2830
					cft->name, ret);
				return ret;
			}
2831 2832
		} else {
			cgroup_rm_file(cgrp, cft);
T
Tejun Heo 已提交
2833
		}
2834
	}
2835
	return 0;
2836 2837
}

2838
static void cgroup_cfts_prepare(void)
2839
	__acquires(&cgroup_mutex)
2840 2841 2842 2843
{
	/*
	 * Thanks to the entanglement with vfs inode locking, we can't walk
	 * the existing cgroups under cgroup_mutex and create files.
2844 2845
	 * Instead, we use css_for_each_descendant_pre() and drop RCU read
	 * lock before calling cgroup_addrm_files().
2846 2847 2848 2849
	 */
	mutex_lock(&cgroup_mutex);
}

2850
static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
2851
	__releases(&cgroup_mutex)
2852 2853
{
	LIST_HEAD(pending);
2854
	struct cgroup_subsys *ss = cfts[0].ss;
2855
	struct cgroup *root = &ss->root->top_cgroup;
2856
	struct super_block *sb = ss->root->sb;
2857 2858
	struct dentry *prev = NULL;
	struct inode *inode;
2859
	struct cgroup_subsys_state *css;
2860
	u64 update_before;
2861
	int ret = 0;
2862 2863

	/* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
2864
	if (!cfts || ss->root == &cgroup_dummy_root ||
2865 2866
	    !atomic_inc_not_zero(&sb->s_active)) {
		mutex_unlock(&cgroup_mutex);
2867
		return 0;
2868 2869 2870
	}

	/*
2871 2872
	 * All cgroups which are created after we drop cgroup_mutex will
	 * have the updated set of files, so we only need to update the
2873
	 * cgroups created before the current @cgroup_serial_nr_next.
2874
	 */
2875
	update_before = cgroup_serial_nr_next;
2876 2877 2878 2879 2880

	mutex_unlock(&cgroup_mutex);

	/* add/rm files for all cgroups created before */
	rcu_read_lock();
2881 2882 2883
	css_for_each_descendant_pre(css, cgroup_css(root, ss->subsys_id)) {
		struct cgroup *cgrp = css->cgroup;

2884 2885 2886 2887 2888 2889 2890 2891 2892
		if (cgroup_is_dead(cgrp))
			continue;

		inode = cgrp->dentry->d_inode;
		dget(cgrp->dentry);
		rcu_read_unlock();

		dput(prev);
		prev = cgrp->dentry;
2893 2894 2895

		mutex_lock(&inode->i_mutex);
		mutex_lock(&cgroup_mutex);
2896
		if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
2897
			ret = cgroup_addrm_files(cgrp, cfts, is_add);
2898 2899 2900
		mutex_unlock(&cgroup_mutex);
		mutex_unlock(&inode->i_mutex);

2901
		rcu_read_lock();
2902 2903
		if (ret)
			break;
2904
	}
2905 2906 2907
	rcu_read_unlock();
	dput(prev);
	deactivate_super(sb);
2908
	return ret;
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
}

/**
 * cgroup_add_cftypes - add an array of cftypes to a subsystem
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Register @cfts to @ss.  Files described by @cfts are created for all
 * existing cgroups to which @ss is attached and all future cgroups will
 * have them too.  This function can be called anytime whether @ss is
 * attached or not.
 *
 * Returns 0 on successful registration, -errno on failure.  Note that this
 * function currently returns 0 as long as @cfts registration is successful
 * even if some file creation attempts on existing cgroups fail.
 */
A
Aristeu Rozanski 已提交
2925
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
2926 2927
{
	struct cftype_set *set;
2928
	struct cftype *cft;
2929
	int ret;
2930 2931 2932 2933 2934

	set = kzalloc(sizeof(*set), GFP_KERNEL);
	if (!set)
		return -ENOMEM;

2935 2936 2937
	for (cft = cfts; cft->name[0] != '\0'; cft++)
		cft->ss = ss;

2938 2939 2940
	cgroup_cfts_prepare();
	set->cfts = cfts;
	list_add_tail(&set->node, &ss->cftsets);
2941
	ret = cgroup_cfts_commit(cfts, true);
2942
	if (ret)
2943
		cgroup_rm_cftypes(cfts);
2944
	return ret;
2945 2946 2947
}
EXPORT_SYMBOL_GPL(cgroup_add_cftypes);

2948 2949 2950 2951
/**
 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
2952 2953 2954
 * Unregister @cfts.  Files described by @cfts are removed from all
 * existing cgroups and all future cgroups won't have them either.  This
 * function can be called anytime whether @cfts' subsys is attached or not.
2955 2956
 *
 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
2957
 * registered.
2958
 */
2959
int cgroup_rm_cftypes(struct cftype *cfts)
2960 2961 2962
{
	struct cftype_set *set;

2963 2964 2965
	if (!cfts || !cfts[0].ss)
		return -ENOENT;

2966 2967
	cgroup_cfts_prepare();

2968
	list_for_each_entry(set, &cfts[0].ss->cftsets, node) {
2969
		if (set->cfts == cfts) {
2970 2971
			list_del(&set->node);
			kfree(set);
2972
			cgroup_cfts_commit(cfts, false);
2973 2974 2975 2976
			return 0;
		}
	}

2977
	cgroup_cfts_commit(NULL, false);
2978 2979 2980
	return -ENOENT;
}

L
Li Zefan 已提交
2981 2982 2983 2984 2985 2986
/**
 * cgroup_task_count - count the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 *
 * Return the number of tasks in the cgroup.
 */
2987
int cgroup_task_count(const struct cgroup *cgrp)
2988 2989
{
	int count = 0;
2990
	struct cgrp_cset_link *link;
2991 2992

	read_lock(&css_set_lock);
2993 2994
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
2995
	read_unlock(&css_set_lock);
2996 2997 2998
	return count;
}

2999
/*
3000 3001 3002
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
3003
 * words after the first call to css_task_iter_start().
3004
 */
3005
static void cgroup_enable_task_cg_lists(void)
3006 3007 3008 3009
{
	struct task_struct *p, *g;
	write_lock(&css_set_lock);
	use_task_css_set_links = 1;
3010 3011 3012 3013 3014 3015 3016 3017
	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
3018 3019
	do_each_thread(g, p) {
		task_lock(p);
3020 3021 3022 3023 3024 3025
		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
		 */
		if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
3026
			list_add(&p->cg_list, &task_css_set(p)->tasks);
3027 3028
		task_unlock(p);
	} while_each_thread(g, p);
3029
	read_unlock(&tasklist_lock);
3030 3031 3032
	write_unlock(&css_set_lock);
}

3033
/**
3034 3035 3036
 * css_next_child - find the next child of a given css
 * @pos_css: the current position (%NULL to initiate traversal)
 * @parent_css: css whose children to walk
3037
 *
3038 3039 3040 3041
 * This function returns the next child of @parent_css and should be called
 * under RCU read lock.  The only requirement is that @parent_css and
 * @pos_css are accessible.  The next sibling is guaranteed to be returned
 * regardless of their states.
3042
 */
3043 3044 3045
struct cgroup_subsys_state *
css_next_child(struct cgroup_subsys_state *pos_css,
	       struct cgroup_subsys_state *parent_css)
3046
{
3047 3048
	struct cgroup *pos = pos_css ? pos_css->cgroup : NULL;
	struct cgroup *cgrp = parent_css->cgroup;
3049 3050 3051 3052 3053 3054 3055
	struct cgroup *next;

	WARN_ON_ONCE(!rcu_read_lock_held());

	/*
	 * @pos could already have been removed.  Once a cgroup is removed,
	 * its ->sibling.next is no longer updated when its next sibling
3056 3057 3058 3059 3060 3061 3062
	 * changes.  As CGRP_DEAD assertion is serialized and happens
	 * before the cgroup is taken off the ->sibling list, if we see it
	 * unasserted, it's guaranteed that the next sibling hasn't
	 * finished its grace period even if it's already removed, and thus
	 * safe to dereference from this RCU critical section.  If
	 * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
	 * to be visible as %true here.
3063 3064 3065 3066 3067 3068 3069 3070
	 *
	 * If @pos is dead, its next pointer can't be dereferenced;
	 * however, as each cgroup is given a monotonically increasing
	 * unique serial number and always appended to the sibling list,
	 * the next one can be found by walking the parent's children until
	 * we see a cgroup with higher serial number than @pos's.  While
	 * this path can be slower, it's taken only when either the current
	 * cgroup is removed or iteration and removal race.
3071
	 */
3072 3073 3074
	if (!pos) {
		next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling);
	} else if (likely(!cgroup_is_dead(pos))) {
3075
		next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
3076 3077 3078 3079
	} else {
		list_for_each_entry_rcu(next, &cgrp->children, sibling)
			if (next->serial_nr > pos->serial_nr)
				break;
3080 3081
	}

3082 3083 3084 3085 3086 3087 3088
	if (&next->sibling == &cgrp->children)
		return NULL;

	if (parent_css->ss)
		return cgroup_css(next, parent_css->ss->subsys_id);
	else
		return &next->dummy_css;
3089
}
3090
EXPORT_SYMBOL_GPL(css_next_child);
3091

3092
/**
3093
 * css_next_descendant_pre - find the next descendant for pre-order walk
3094
 * @pos: the current position (%NULL to initiate traversal)
3095
 * @root: css whose descendants to walk
3096
 *
3097
 * To be used by css_for_each_descendant_pre().  Find the next descendant
3098 3099
 * to visit for pre-order traversal of @root's descendants.  @root is
 * included in the iteration and the first node to be visited.
3100 3101 3102 3103
 *
 * While this function requires RCU read locking, it doesn't require the
 * whole traversal to be contained in a single RCU critical section.  This
 * function will return the correct next descendant as long as both @pos
3104
 * and @root are accessible and @pos is a descendant of @root.
3105
 */
3106 3107 3108
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
			struct cgroup_subsys_state *root)
3109
{
3110
	struct cgroup_subsys_state *next;
3111 3112 3113

	WARN_ON_ONCE(!rcu_read_lock_held());

3114
	/* if first iteration, visit @root */
3115
	if (!pos)
3116
		return root;
3117 3118

	/* visit the first child if exists */
3119
	next = css_next_child(NULL, pos);
3120 3121 3122 3123
	if (next)
		return next;

	/* no child, visit my or the closest ancestor's next sibling */
3124 3125
	while (pos != root) {
		next = css_next_child(pos, css_parent(pos));
3126
		if (next)
3127
			return next;
3128
		pos = css_parent(pos);
3129
	}
3130 3131 3132

	return NULL;
}
3133
EXPORT_SYMBOL_GPL(css_next_descendant_pre);
3134

3135
/**
3136 3137
 * css_rightmost_descendant - return the rightmost descendant of a css
 * @pos: css of interest
3138
 *
3139 3140
 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
 * is returned.  This can be used during pre-order traversal to skip
3141
 * subtree of @pos.
3142 3143 3144 3145 3146
 *
 * While this function requires RCU read locking, it doesn't require the
 * whole traversal to be contained in a single RCU critical section.  This
 * function will return the correct rightmost descendant as long as @pos is
 * accessible.
3147
 */
3148 3149
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
3150
{
3151
	struct cgroup_subsys_state *last, *tmp;
3152 3153 3154 3155 3156 3157 3158

	WARN_ON_ONCE(!rcu_read_lock_held());

	do {
		last = pos;
		/* ->prev isn't RCU safe, walk ->next till the end */
		pos = NULL;
3159
		css_for_each_child(tmp, last)
3160 3161 3162 3163 3164
			pos = tmp;
	} while (pos);

	return last;
}
3165
EXPORT_SYMBOL_GPL(css_rightmost_descendant);
3166

3167 3168
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
3169
{
3170
	struct cgroup_subsys_state *last;
3171 3172 3173

	do {
		last = pos;
3174
		pos = css_next_child(NULL, pos);
3175 3176 3177 3178 3179 3180
	} while (pos);

	return last;
}

/**
3181
 * css_next_descendant_post - find the next descendant for post-order walk
3182
 * @pos: the current position (%NULL to initiate traversal)
3183
 * @root: css whose descendants to walk
3184
 *
3185
 * To be used by css_for_each_descendant_post().  Find the next descendant
3186 3187
 * to visit for post-order traversal of @root's descendants.  @root is
 * included in the iteration and the last node to be visited.
3188 3189 3190 3191 3192
 *
 * While this function requires RCU read locking, it doesn't require the
 * whole traversal to be contained in a single RCU critical section.  This
 * function will return the correct next descendant as long as both @pos
 * and @cgroup are accessible and @pos is a descendant of @cgroup.
3193
 */
3194 3195 3196
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
			 struct cgroup_subsys_state *root)
3197
{
3198
	struct cgroup_subsys_state *next;
3199 3200 3201 3202 3203

	WARN_ON_ONCE(!rcu_read_lock_held());

	/* if first iteration, visit the leftmost descendant */
	if (!pos) {
3204 3205
		next = css_leftmost_descendant(root);
		return next != root ? next : NULL;
3206 3207
	}

3208 3209 3210 3211
	/* if we visited @root, we're done */
	if (pos == root)
		return NULL;

3212
	/* if there's an unvisited sibling, visit its leftmost descendant */
3213
	next = css_next_child(pos, css_parent(pos));
3214
	if (next)
3215
		return css_leftmost_descendant(next);
3216 3217

	/* no sibling left, visit parent */
3218
	return css_parent(pos);
3219
}
3220
EXPORT_SYMBOL_GPL(css_next_descendant_post);
3221

3222
/**
3223
 * css_advance_task_iter - advance a task itererator to the next css_set
3224 3225 3226
 * @it: the iterator to advance
 *
 * Advance @it to the next css_set to walk.
3227
 */
3228
static void css_advance_task_iter(struct css_task_iter *it)
3229 3230 3231 3232 3233 3234 3235 3236
{
	struct list_head *l = it->cset_link;
	struct cgrp_cset_link *link;
	struct css_set *cset;

	/* Advance to the next non-empty css_set */
	do {
		l = l->next;
3237
		if (l == &it->origin_css->cgroup->cset_links) {
3238 3239 3240 3241 3242 3243 3244 3245 3246 3247
			it->cset_link = NULL;
			return;
		}
		link = list_entry(l, struct cgrp_cset_link, cset_link);
		cset = link->cset;
	} while (list_empty(&cset->tasks));
	it->cset_link = l;
	it->task = cset->tasks.next;
}

3248
/**
3249 3250
 * css_task_iter_start - initiate task iteration
 * @css: the css to walk tasks of
3251 3252
 * @it: the task iterator to use
 *
3253 3254 3255 3256
 * Initiate iteration through the tasks of @css.  The caller can call
 * css_task_iter_next() to walk through the tasks until the function
 * returns NULL.  On completion of iteration, css_task_iter_end() must be
 * called.
3257 3258 3259 3260 3261
 *
 * Note that this function acquires a lock which is released when the
 * iteration finishes.  The caller can't sleep while iteration is in
 * progress.
 */
3262 3263
void css_task_iter_start(struct cgroup_subsys_state *css,
			 struct css_task_iter *it)
3264
	__acquires(css_set_lock)
3265 3266
{
	/*
3267 3268 3269
	 * The first time anyone tries to iterate across a css, we need to
	 * enable the list linking each css_set to its tasks, and fix up
	 * all existing tasks.
3270
	 */
3271 3272 3273
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();

3274
	read_lock(&css_set_lock);
3275

3276 3277
	it->origin_css = css;
	it->cset_link = &css->cgroup->cset_links;
3278

3279
	css_advance_task_iter(it);
3280 3281
}

3282
/**
3283
 * css_task_iter_next - return the next task for the iterator
3284 3285 3286
 * @it: the task iterator being iterated
 *
 * The "next" function for task iteration.  @it should have been
3287 3288
 * initialized via css_task_iter_start().  Returns NULL when the iteration
 * reaches the end.
3289
 */
3290
struct task_struct *css_task_iter_next(struct css_task_iter *it)
3291 3292 3293
{
	struct task_struct *res;
	struct list_head *l = it->task;
3294
	struct cgrp_cset_link *link;
3295 3296

	/* If the iterator cg is NULL, we have no tasks */
3297
	if (!it->cset_link)
3298 3299 3300 3301
		return NULL;
	res = list_entry(l, struct task_struct, cg_list);
	/* Advance iterator to find next entry */
	l = l->next;
3302 3303
	link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link);
	if (l == &link->cset->tasks) {
3304 3305 3306 3307
		/*
		 * We reached the end of this task list - move on to the
		 * next cgrp_cset_link.
		 */
3308
		css_advance_task_iter(it);
3309 3310 3311 3312 3313 3314
	} else {
		it->task = l;
	}
	return res;
}

3315
/**
3316
 * css_task_iter_end - finish task iteration
3317 3318
 * @it: the task iterator to finish
 *
3319
 * Finish task iteration started by css_task_iter_start().
3320
 */
3321
void css_task_iter_end(struct css_task_iter *it)
3322
	__releases(css_set_lock)
3323 3324 3325 3326
{
	read_unlock(&css_set_lock);
}

3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361
static inline int started_after_time(struct task_struct *t1,
				     struct timespec *time,
				     struct task_struct *t2)
{
	int start_diff = timespec_compare(&t1->start_time, time);
	if (start_diff > 0) {
		return 1;
	} else if (start_diff < 0) {
		return 0;
	} else {
		/*
		 * Arbitrarily, if two processes started at the same
		 * time, we'll say that the lower pointer value
		 * started first. Note that t2 may have exited by now
		 * so this may not be a valid pointer any longer, but
		 * that's fine - it still serves to distinguish
		 * between two tasks started (effectively) simultaneously.
		 */
		return t1 > t2;
	}
}

/*
 * This function is a callback from heap_insert() and is used to order
 * the heap.
 * In this case we order the heap in descending task start time.
 */
static inline int started_after(void *p1, void *p2)
{
	struct task_struct *t1 = p1;
	struct task_struct *t2 = p2;
	return started_after_time(t1, &t2->start_time, t2);
}

/**
3362 3363
 * css_scan_tasks - iterate though all the tasks in a css
 * @css: the css to iterate tasks of
T
Tejun Heo 已提交
3364 3365 3366 3367
 * @test: optional test callback
 * @process: process callback
 * @data: data passed to @test and @process
 * @heap: optional pre-allocated heap used for task iteration
3368
 *
3369 3370
 * Iterate through all the tasks in @css, calling @test for each, and if it
 * returns %true, call @process for it also.
3371
 *
T
Tejun Heo 已提交
3372
 * @test may be NULL, meaning always true (select all tasks), which
3373
 * effectively duplicates css_task_iter_{start,next,end}() but does not
T
Tejun Heo 已提交
3374 3375 3376
 * lock css_set_lock for the call to @process.
 *
 * It is guaranteed that @process will act on every task that is a member
3377 3378 3379
 * of @css for the duration of this call.  This function may or may not
 * call @process for tasks that exit or move to a different css during the
 * call, or are forked or move into the css during the call.
T
Tejun Heo 已提交
3380 3381 3382 3383 3384 3385 3386 3387 3388
 *
 * Note that @test may be called with locks held, and may in some
 * situations be called multiple times for the same task, so it should be
 * cheap.
 *
 * If @heap is non-NULL, a heap has been pre-allocated and will be used for
 * heap operations (and its "gt" member will be overwritten), else a
 * temporary heap will be used (allocation of which may cause this function
 * to fail).
3389
 */
3390 3391 3392 3393
int css_scan_tasks(struct cgroup_subsys_state *css,
		   bool (*test)(struct task_struct *, void *),
		   void (*process)(struct task_struct *, void *),
		   void *data, struct ptr_heap *heap)
3394 3395
{
	int retval, i;
3396
	struct css_task_iter it;
3397 3398 3399 3400 3401 3402
	struct task_struct *p, *dropped;
	/* Never dereference latest_task, since it's not refcounted */
	struct task_struct *latest_task = NULL;
	struct ptr_heap tmp_heap;
	struct timespec latest_time = { 0, 0 };

T
Tejun Heo 已提交
3403
	if (heap) {
3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416
		/* The caller supplied our heap and pre-allocated its memory */
		heap->gt = &started_after;
	} else {
		/* We need to allocate our own heap memory */
		heap = &tmp_heap;
		retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
		if (retval)
			/* cannot allocate the heap */
			return retval;
	}

 again:
	/*
3417
	 * Scan tasks in the css, using the @test callback to determine
T
Tejun Heo 已提交
3418 3419 3420 3421 3422 3423 3424
	 * which are of interest, and invoking @process callback on the
	 * ones which need an update.  Since we don't want to hold any
	 * locks during the task updates, gather tasks to be processed in a
	 * heap structure.  The heap is sorted by descending task start
	 * time.  If the statically-sized heap fills up, we overflow tasks
	 * that started later, and in future iterations only consider tasks
	 * that started after the latest task in the previous pass. This
3425 3426 3427
	 * guarantees forward progress and that we don't miss any tasks.
	 */
	heap->size = 0;
3428 3429
	css_task_iter_start(css, &it);
	while ((p = css_task_iter_next(&it))) {
3430 3431 3432 3433
		/*
		 * Only affect tasks that qualify per the caller's callback,
		 * if he provided one
		 */
T
Tejun Heo 已提交
3434
		if (test && !test(p, data))
3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461
			continue;
		/*
		 * Only process tasks that started after the last task
		 * we processed
		 */
		if (!started_after_time(p, &latest_time, latest_task))
			continue;
		dropped = heap_insert(heap, p);
		if (dropped == NULL) {
			/*
			 * The new task was inserted; the heap wasn't
			 * previously full
			 */
			get_task_struct(p);
		} else if (dropped != p) {
			/*
			 * The new task was inserted, and pushed out a
			 * different task
			 */
			get_task_struct(p);
			put_task_struct(dropped);
		}
		/*
		 * Else the new task was newer than anything already in
		 * the heap and wasn't inserted
		 */
	}
3462
	css_task_iter_end(&it);
3463 3464 3465

	if (heap->size) {
		for (i = 0; i < heap->size; i++) {
3466
			struct task_struct *q = heap->ptrs[i];
3467
			if (i == 0) {
3468 3469
				latest_time = q->start_time;
				latest_task = q;
3470 3471
			}
			/* Process the task per the caller's callback */
T
Tejun Heo 已提交
3472
			process(q, data);
3473
			put_task_struct(q);
3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488
		}
		/*
		 * If we had to process any tasks at all, scan again
		 * in case some of them were in the middle of forking
		 * children that didn't get processed.
		 * Not the most efficient way to do it, but it avoids
		 * having to take callback_mutex in the fork path
		 */
		goto again;
	}
	if (heap == &tmp_heap)
		heap_free(&tmp_heap);
	return 0;
}

T
Tejun Heo 已提交
3489
static void cgroup_transfer_one_task(struct task_struct *task, void *data)
3490
{
T
Tejun Heo 已提交
3491
	struct cgroup *new_cgroup = data;
3492

T
Tejun Heo 已提交
3493
	mutex_lock(&cgroup_mutex);
3494
	cgroup_attach_task(new_cgroup, task, false);
T
Tejun Heo 已提交
3495
	mutex_unlock(&cgroup_mutex);
3496 3497 3498 3499 3500 3501 3502 3503 3504
}

/**
 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
 */
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
{
3505 3506
	return css_scan_tasks(&from->dummy_css, NULL, cgroup_transfer_one_task,
			      to, NULL);
3507 3508
}

3509
/*
3510
 * Stuff for reading the 'tasks'/'procs' files.
3511 3512 3513 3514 3515 3516 3517 3518
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547
/* which pidlist file are we talking about? */
enum cgroup_filetype {
	CGROUP_FILE_PROCS,
	CGROUP_FILE_TASKS,
};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {
	/*
	 * used to find which pidlist is wanted. doesn't change as long as
	 * this particular list stays in the list.
	*/
	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
	/* array of xids */
	pid_t *list;
	/* how many elements the above list has */
	int length;
	/* how many files are using the current array */
	int use_count;
	/* each of these stored in a list by its cgroup */
	struct list_head links;
	/* pointer to the cgroup we belong to, for list removal purposes */
	struct cgroup *owner;
	/* protects the other fields */
L
Li Zefan 已提交
3548
	struct rw_semaphore rwsem;
3549 3550
};

3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571
/*
 * The following two functions "fix" the issue where there are more pids
 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 * TODO: replace with a kernel-wide solution to this problem
 */
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(count * sizeof(pid_t));
	else
		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
static void pidlist_free(void *p)
{
	if (is_vmalloc_addr(p))
		vfree(p);
	else
		kfree(p);
}

3572
/*
3573
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3574
 * Returns the number of unique elements.
3575
 */
3576
static int pidlist_uniq(pid_t *list, int length)
3577
{
3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606
	int src, dest = 1;

	/*
	 * we presume the 0th element is unique, so i starts at 1. trivial
	 * edge cases first; no work needs to be done for either
	 */
	if (length == 0 || length == 1)
		return length;
	/* src and dest walk down the list; dest counts unique elements */
	for (src = 1; src < length; src++) {
		/* find next unique element */
		while (list[src] == list[src-1]) {
			src++;
			if (src == length)
				goto after;
		}
		/* dest always points to where the next unique element goes */
		list[dest] = list[src];
		dest++;
	}
after:
	return dest;
}

static int cmppid(const void *a, const void *b)
{
	return *(pid_t *)a - *(pid_t *)b;
}

3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617
/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{
	struct cgroup_pidlist *l;
	/* don't need task_nsproxy() if we're looking at ourself */
3618
	struct pid_namespace *ns = task_active_pid_ns(current);
3619

3620
	/*
L
Li Zefan 已提交
3621
	 * We can't drop the pidlist_mutex before taking the l->rwsem in case
3622 3623 3624 3625 3626 3627 3628 3629
	 * the last ref-holder is trying to remove l from the list at the same
	 * time. Holding the pidlist_mutex precludes somebody taking whichever
	 * list we find out from under us - compare release_pid_array().
	 */
	mutex_lock(&cgrp->pidlist_mutex);
	list_for_each_entry(l, &cgrp->pidlists, links) {
		if (l->key.type == type && l->key.ns == ns) {
			/* make sure l doesn't vanish out from under us */
L
Li Zefan 已提交
3630
			down_write(&l->rwsem);
3631 3632 3633 3634 3635
			mutex_unlock(&cgrp->pidlist_mutex);
			return l;
		}
	}
	/* entry not found; create a new one */
3636
	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
3637 3638 3639 3640
	if (!l) {
		mutex_unlock(&cgrp->pidlist_mutex);
		return l;
	}
L
Li Zefan 已提交
3641 3642
	init_rwsem(&l->rwsem);
	down_write(&l->rwsem);
3643
	l->key.type = type;
3644
	l->key.ns = get_pid_ns(ns);
3645 3646 3647 3648 3649 3650
	l->owner = cgrp;
	list_add(&l->links, &cgrp->pidlists);
	mutex_unlock(&cgrp->pidlist_mutex);
	return l;
}

3651 3652 3653
/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
3654 3655
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
3656 3657 3658 3659
{
	pid_t *array;
	int length;
	int pid, n = 0; /* used for populating the array */
3660
	struct css_task_iter it;
3661
	struct task_struct *tsk;
3662 3663 3664 3665 3666 3667 3668 3669 3670
	struct cgroup_pidlist *l;

	/*
	 * If cgroup gets more users after we read count, we won't have
	 * enough space - tough.  This race is indistinguishable to the
	 * caller from the case that the additional cgroup users didn't
	 * show up until sometime later on.
	 */
	length = cgroup_task_count(cgrp);
3671
	array = pidlist_allocate(length);
3672 3673 3674
	if (!array)
		return -ENOMEM;
	/* now, populate the array */
3675 3676
	css_task_iter_start(&cgrp->dummy_css, &it);
	while ((tsk = css_task_iter_next(&it))) {
3677
		if (unlikely(n == length))
3678
			break;
3679
		/* get tgid or pid for procs or tasks file respectively */
3680 3681 3682 3683
		if (type == CGROUP_FILE_PROCS)
			pid = task_tgid_vnr(tsk);
		else
			pid = task_pid_vnr(tsk);
3684 3685
		if (pid > 0) /* make sure to only use valid results */
			array[n++] = pid;
3686
	}
3687
	css_task_iter_end(&it);
3688 3689 3690
	length = n;
	/* now sort & (if procs) strip out duplicates */
	sort(array, length, sizeof(pid_t), cmppid, NULL);
3691
	if (type == CGROUP_FILE_PROCS)
3692
		length = pidlist_uniq(array, length);
3693 3694
	l = cgroup_pidlist_find(cgrp, type);
	if (!l) {
3695
		pidlist_free(array);
3696
		return -ENOMEM;
3697
	}
3698
	/* store array, freeing old if necessary - lock already held */
3699
	pidlist_free(l->list);
3700 3701 3702
	l->list = array;
	l->length = length;
	l->use_count++;
L
Li Zefan 已提交
3703
	up_write(&l->rwsem);
3704
	*lp = l;
3705
	return 0;
3706 3707
}

B
Balbir Singh 已提交
3708
/**
L
Li Zefan 已提交
3709
 * cgroupstats_build - build and fill cgroupstats
B
Balbir Singh 已提交
3710 3711 3712
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
L
Li Zefan 已提交
3713 3714 3715
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
B
Balbir Singh 已提交
3716 3717 3718 3719
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
	int ret = -EINVAL;
3720
	struct cgroup *cgrp;
3721
	struct css_task_iter it;
B
Balbir Singh 已提交
3722
	struct task_struct *tsk;
3723

B
Balbir Singh 已提交
3724
	/*
3725 3726
	 * Validate dentry by checking the superblock operations,
	 * and make sure it's a directory.
B
Balbir Singh 已提交
3727
	 */
3728 3729
	if (dentry->d_sb->s_op != &cgroup_ops ||
	    !S_ISDIR(dentry->d_inode->i_mode))
B
Balbir Singh 已提交
3730 3731 3732
		 goto err;

	ret = 0;
3733
	cgrp = dentry->d_fsdata;
B
Balbir Singh 已提交
3734

3735 3736
	css_task_iter_start(&cgrp->dummy_css, &it);
	while ((tsk = css_task_iter_next(&it))) {
B
Balbir Singh 已提交
3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755
		switch (tsk->state) {
		case TASK_RUNNING:
			stats->nr_running++;
			break;
		case TASK_INTERRUPTIBLE:
			stats->nr_sleeping++;
			break;
		case TASK_UNINTERRUPTIBLE:
			stats->nr_uninterruptible++;
			break;
		case TASK_STOPPED:
			stats->nr_stopped++;
			break;
		default:
			if (delayacct_is_task_waiting_on_io(tsk))
				stats->nr_io_wait++;
			break;
		}
	}
3756
	css_task_iter_end(&it);
B
Balbir Singh 已提交
3757 3758 3759 3760 3761

err:
	return ret;
}

3762

3763
/*
3764
 * seq_file methods for the tasks/procs files. The seq_file position is the
3765
 * next pid to display; the seq_file iterator is a pointer to the pid
3766
 * in the cgroup->l->list array.
3767
 */
3768

3769
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
3770
{
3771 3772 3773 3774 3775 3776
	/*
	 * Initially we receive a position value that corresponds to
	 * one more than the last pid shown (or 0 on the first call or
	 * after a seek to the start). Use a binary-search to find the
	 * next pid to display, if any
	 */
3777
	struct cgroup_pidlist *l = s->private;
3778 3779 3780
	int index = 0, pid = *pos;
	int *iter;

L
Li Zefan 已提交
3781
	down_read(&l->rwsem);
3782
	if (pid) {
3783
		int end = l->length;
S
Stephen Rothwell 已提交
3784

3785 3786
		while (index < end) {
			int mid = (index + end) / 2;
3787
			if (l->list[mid] == pid) {
3788 3789
				index = mid;
				break;
3790
			} else if (l->list[mid] <= pid)
3791 3792 3793 3794 3795 3796
				index = mid + 1;
			else
				end = mid;
		}
	}
	/* If we're off the end of the array, we're done */
3797
	if (index >= l->length)
3798 3799
		return NULL;
	/* Update the abstract position to be the actual pid that we found */
3800
	iter = l->list + index;
3801 3802 3803 3804
	*pos = *iter;
	return iter;
}

3805
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
3806
{
3807
	struct cgroup_pidlist *l = s->private;
L
Li Zefan 已提交
3808
	up_read(&l->rwsem);
3809 3810
}

3811
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
3812
{
3813 3814 3815
	struct cgroup_pidlist *l = s->private;
	pid_t *p = v;
	pid_t *end = l->list + l->length;
3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
	/*
	 * Advance to the next pid in the array. If this goes off the
	 * end, we're done
	 */
	p++;
	if (p >= end) {
		return NULL;
	} else {
		*pos = *p;
		return p;
	}
}

3829
static int cgroup_pidlist_show(struct seq_file *s, void *v)
3830 3831 3832
{
	return seq_printf(s, "%d\n", *(int *)v);
}
3833

3834 3835 3836 3837 3838 3839 3840 3841 3842
/*
 * seq_operations functions for iterating on pidlists through seq_file -
 * independent of whether it's tasks or procs
 */
static const struct seq_operations cgroup_pidlist_seq_operations = {
	.start = cgroup_pidlist_start,
	.stop = cgroup_pidlist_stop,
	.next = cgroup_pidlist_next,
	.show = cgroup_pidlist_show,
3843 3844
};

3845
static void cgroup_release_pid_array(struct cgroup_pidlist *l)
3846
{
3847 3848 3849 3850 3851 3852 3853
	/*
	 * the case where we're the last user of this particular pidlist will
	 * have us remove it from the cgroup's list, which entails taking the
	 * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
	 * pidlist_mutex, we have to take pidlist_mutex first.
	 */
	mutex_lock(&l->owner->pidlist_mutex);
L
Li Zefan 已提交
3854
	down_write(&l->rwsem);
3855 3856
	BUG_ON(!l->use_count);
	if (!--l->use_count) {
3857 3858 3859
		/* we're the last user if refcount is 0; remove and free */
		list_del(&l->links);
		mutex_unlock(&l->owner->pidlist_mutex);
3860
		pidlist_free(l->list);
3861
		put_pid_ns(l->key.ns);
L
Li Zefan 已提交
3862
		up_write(&l->rwsem);
3863 3864
		kfree(l);
		return;
3865
	}
3866
	mutex_unlock(&l->owner->pidlist_mutex);
L
Li Zefan 已提交
3867
	up_write(&l->rwsem);
3868 3869
}

3870
static int cgroup_pidlist_release(struct inode *inode, struct file *file)
3871
{
3872
	struct cgroup_pidlist *l;
3873 3874
	if (!(file->f_mode & FMODE_READ))
		return 0;
3875 3876 3877 3878 3879 3880
	/*
	 * the seq_file will only be initialized if the file was opened for
	 * reading; hence we check if it's not null only in that case.
	 */
	l = ((struct seq_file *)file->private_data)->private;
	cgroup_release_pid_array(l);
3881 3882 3883
	return seq_release(inode, file);
}

3884
static const struct file_operations cgroup_pidlist_operations = {
3885 3886 3887
	.read = seq_read,
	.llseek = seq_lseek,
	.write = cgroup_file_write,
3888
	.release = cgroup_pidlist_release,
3889 3890
};

3891
/*
3892 3893 3894
 * The following functions handle opens on a file that displays a pidlist
 * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
 * in the cgroup.
3895
 */
3896
/* helper function for the two below it */
3897
static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
3898
{
3899
	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
3900
	struct cgroup_pidlist *l;
3901
	int retval;
3902

3903
	/* Nothing to do for write-only files */
3904 3905 3906
	if (!(file->f_mode & FMODE_READ))
		return 0;

3907
	/* have the array populated */
3908
	retval = pidlist_array_load(cgrp, type, &l);
3909 3910 3911 3912
	if (retval)
		return retval;
	/* configure file information */
	file->f_op = &cgroup_pidlist_operations;
3913

3914
	retval = seq_open(file, &cgroup_pidlist_seq_operations);
3915
	if (retval) {
3916
		cgroup_release_pid_array(l);
3917
		return retval;
3918
	}
3919
	((struct seq_file *)file->private_data)->private = l;
3920 3921
	return 0;
}
3922 3923
static int cgroup_tasks_open(struct inode *unused, struct file *file)
{
3924
	return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
3925 3926 3927
}
static int cgroup_procs_open(struct inode *unused, struct file *file)
{
3928
	return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
3929
}
3930

3931 3932
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
3933
{
3934
	return notify_on_release(css->cgroup);
3935 3936
}

3937 3938
static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
3939
{
3940
	clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
3941
	if (val)
3942
		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
3943
	else
3944
		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
3945 3946 3947
	return 0;
}

3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964
/*
 * When dput() is called asynchronously, if umount has been done and
 * then deactivate_super() in cgroup_free_fn() kills the superblock,
 * there's a small window that vfs will see the root dentry with non-zero
 * refcnt and trigger BUG().
 *
 * That's why we hold a reference before dput() and drop it right after.
 */
static void cgroup_dput(struct cgroup *cgrp)
{
	struct super_block *sb = cgrp->root->sb;

	atomic_inc(&sb->s_active);
	dput(cgrp->dentry);
	deactivate_super(sb);
}

3965 3966 3967 3968 3969 3970 3971 3972 3973
/*
 * Unregister event and free resources.
 *
 * Gets called from workqueue.
 */
static void cgroup_event_remove(struct work_struct *work)
{
	struct cgroup_event *event = container_of(work, struct cgroup_event,
			remove);
3974 3975
	struct cgroup_subsys_state *css = event->css;
	struct cgroup *cgrp = css->cgroup;
3976

3977 3978
	remove_wait_queue(event->wqh, &event->wait);

3979
	event->cft->unregister_event(css, event->cft, event->eventfd);
3980

3981 3982 3983
	/* Notify userspace the event is going away. */
	eventfd_signal(event->eventfd, 1);

3984 3985
	eventfd_ctx_put(event->eventfd);
	kfree(event);
3986
	cgroup_dput(cgrp);
3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998
}

/*
 * Gets called on POLLHUP on eventfd when user closes it.
 *
 * Called with wqh->lock held and interrupts disabled.
 */
static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
		int sync, void *key)
{
	struct cgroup_event *event = container_of(wait,
			struct cgroup_event, wait);
3999
	struct cgroup *cgrp = event->css->cgroup;
4000 4001 4002 4003
	unsigned long flags = (unsigned long)key;

	if (flags & POLLHUP) {
		/*
4004 4005 4006 4007 4008 4009 4010
		 * If the event has been detached at cgroup removal, we
		 * can simply return knowing the other side will cleanup
		 * for us.
		 *
		 * We can't race against event freeing since the other
		 * side will require wqh->lock via remove_wait_queue(),
		 * which we hold.
4011
		 */
4012 4013 4014 4015 4016 4017 4018 4019 4020 4021
		spin_lock(&cgrp->event_list_lock);
		if (!list_empty(&event->list)) {
			list_del_init(&event->list);
			/*
			 * We are in atomic context, but cgroup_event_remove()
			 * may sleep, so we have to call it in workqueue.
			 */
			schedule_work(&event->remove);
		}
		spin_unlock(&cgrp->event_list_lock);
4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042
	}

	return 0;
}

static void cgroup_event_ptable_queue_proc(struct file *file,
		wait_queue_head_t *wqh, poll_table *pt)
{
	struct cgroup_event *event = container_of(pt,
			struct cgroup_event, pt);

	event->wqh = wqh;
	add_wait_queue(wqh, &event->wait);
}

/*
 * Parse input and register new cgroup event handler.
 *
 * Input must be in format '<event_fd> <control_fd> <args>'.
 * Interpretation of args is defined by control file implementation.
 */
4043
static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css,
4044
				      struct cftype *cft, const char *buffer)
4045
{
4046
	struct cgroup *cgrp = dummy_css->cgroup;
4047
	struct cgroup_event *event;
4048
	struct cgroup *cgrp_cfile;
4049
	unsigned int efd, cfd;
4050 4051
	struct file *efile;
	struct file *cfile;
4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067
	char *endp;
	int ret;

	efd = simple_strtoul(buffer, &endp, 10);
	if (*endp != ' ')
		return -EINVAL;
	buffer = endp + 1;

	cfd = simple_strtoul(buffer, &endp, 10);
	if ((*endp != ' ') && (*endp != '\0'))
		return -EINVAL;
	buffer = endp + 1;

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;
4068

4069 4070 4071 4072 4073 4074 4075 4076
	INIT_LIST_HEAD(&event->list);
	init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
	init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
	INIT_WORK(&event->remove, cgroup_event_remove);

	efile = eventfd_fget(efd);
	if (IS_ERR(efile)) {
		ret = PTR_ERR(efile);
4077
		goto out_kfree;
4078 4079 4080 4081 4082
	}

	event->eventfd = eventfd_ctx_fileget(efile);
	if (IS_ERR(event->eventfd)) {
		ret = PTR_ERR(event->eventfd);
4083
		goto out_put_efile;
4084 4085 4086 4087 4088
	}

	cfile = fget(cfd);
	if (!cfile) {
		ret = -EBADF;
4089
		goto out_put_eventfd;
4090 4091 4092
	}

	/* the process need read permission on control file */
A
Al Viro 已提交
4093
	/* AV: shouldn't we check that it's been opened for read instead? */
A
Al Viro 已提交
4094
	ret = inode_permission(file_inode(cfile), MAY_READ);
4095
	if (ret < 0)
4096
		goto out_put_cfile;
4097 4098 4099 4100

	event->cft = __file_cft(cfile);
	if (IS_ERR(event->cft)) {
		ret = PTR_ERR(event->cft);
4101
		goto out_put_cfile;
4102 4103
	}

4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120
	if (!event->cft->ss) {
		ret = -EBADF;
		goto out_put_cfile;
	}

	/* determine the css of @cfile and associate @event with it */
	rcu_read_lock();

	ret = -EINVAL;
	event->css = cgroup_css(cgrp, event->cft->ss->subsys_id);
	if (event->css)
		ret = 0;

	rcu_read_unlock();
	if (ret)
		goto out_put_cfile;

4121 4122 4123 4124 4125 4126 4127
	/*
	 * The file to be monitored must be in the same cgroup as
	 * cgroup.event_control is.
	 */
	cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
	if (cgrp_cfile != cgrp) {
		ret = -EINVAL;
4128
		goto out_put_cfile;
4129 4130
	}

4131 4132
	if (!event->cft->register_event || !event->cft->unregister_event) {
		ret = -EINVAL;
4133
		goto out_put_cfile;
4134 4135
	}

4136
	ret = event->cft->register_event(event->css, event->cft,
4137 4138
			event->eventfd, buffer);
	if (ret)
4139
		goto out_put_cfile;
4140

4141
	efile->f_op->poll(efile, &event->pt);
4142

4143 4144 4145 4146 4147 4148 4149
	/*
	 * Events should be removed after rmdir of cgroup directory, but before
	 * destroying subsystem state objects. Let's take reference to cgroup
	 * directory dentry to do that.
	 */
	dget(cgrp->dentry);

4150 4151 4152 4153 4154 4155 4156 4157 4158
	spin_lock(&cgrp->event_list_lock);
	list_add(&event->list, &cgrp->event_list);
	spin_unlock(&cgrp->event_list_lock);

	fput(cfile);
	fput(efile);

	return 0;

4159 4160 4161 4162 4163 4164 4165
out_put_cfile:
	fput(cfile);
out_put_eventfd:
	eventfd_ctx_put(event->eventfd);
out_put_efile:
	fput(efile);
out_kfree:
4166 4167 4168 4169 4170
	kfree(event);

	return ret;
}

4171 4172
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
4173
{
4174
	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4175 4176
}

4177 4178
static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
4179 4180
{
	if (val)
4181
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4182
	else
4183
		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4184 4185 4186
	return 0;
}

4187
static struct cftype cgroup_base_files[] = {
4188
	{
4189
		.name = "cgroup.procs",
4190
		.open = cgroup_procs_open,
B
Ben Blum 已提交
4191
		.write_u64 = cgroup_procs_write,
4192
		.release = cgroup_pidlist_release,
B
Ben Blum 已提交
4193
		.mode = S_IRUGO | S_IWUSR,
4194
	},
4195
	{
4196
		.name = "cgroup.event_control",
4197 4198 4199
		.write_string = cgroup_write_event_control,
		.mode = S_IWUGO,
	},
4200 4201
	{
		.name = "cgroup.clone_children",
4202
		.flags = CFTYPE_INSANE,
4203 4204 4205
		.read_u64 = cgroup_clone_children_read,
		.write_u64 = cgroup_clone_children_write,
	},
4206 4207 4208 4209 4210
	{
		.name = "cgroup.sane_behavior",
		.flags = CFTYPE_ONLY_ON_ROOT,
		.read_seq_string = cgroup_sane_behavior_show,
	},
4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230

	/*
	 * Historical crazy stuff.  These don't have "cgroup."  prefix and
	 * don't exist if sane_behavior.  If you're depending on these, be
	 * prepared to be burned.
	 */
	{
		.name = "tasks",
		.flags = CFTYPE_INSANE,		/* use "procs" instead */
		.open = cgroup_tasks_open,
		.write_u64 = cgroup_tasks_write,
		.release = cgroup_pidlist_release,
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "notify_on_release",
		.flags = CFTYPE_INSANE,
		.read_u64 = cgroup_read_notify_on_release,
		.write_u64 = cgroup_write_notify_on_release,
	},
4231 4232
	{
		.name = "release_agent",
4233
		.flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
4234 4235 4236 4237
		.read_seq_string = cgroup_release_agent_show,
		.write_string = cgroup_release_agent_write,
		.max_write_len = PATH_MAX,
	},
T
Tejun Heo 已提交
4238
	{ }	/* terminate */
4239 4240
};

4241
/**
4242
 * cgroup_populate_dir - create subsys files in a cgroup directory
4243 4244
 * @cgrp: target cgroup
 * @subsys_mask: mask of the subsystem ids whose files should be added
4245 4246
 *
 * On failure, no file is added.
4247
 */
4248
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
4249 4250
{
	struct cgroup_subsys *ss;
4251
	int i, ret = 0;
4252

4253
	/* process cftsets of each subsystem */
4254
	for_each_subsys(ss, i) {
4255
		struct cftype_set *set;
4256 4257

		if (!test_bit(i, &subsys_mask))
4258
			continue;
4259

4260
		list_for_each_entry(set, &ss->cftsets, node) {
4261
			ret = cgroup_addrm_files(cgrp, set->cfts, true);
4262 4263 4264
			if (ret < 0)
				goto err;
		}
4265
	}
4266

K
KAMEZAWA Hiroyuki 已提交
4267
	/* This cgroup is ready now */
4268
	for_each_root_subsys(cgrp->root, ss) {
T
Tejun Heo 已提交
4269
		struct cgroup_subsys_state *css = cgroup_css(cgrp, ss->subsys_id);
4270 4271
		struct css_id *id = rcu_dereference_protected(css->id, true);

K
KAMEZAWA Hiroyuki 已提交
4272 4273 4274 4275 4276
		/*
		 * Update id->css pointer and make this css visible from
		 * CSS ID functions. This pointer will be dereferened
		 * from RCU-read-side without locks.
		 */
4277 4278
		if (id)
			rcu_assign_pointer(id->css, css);
K
KAMEZAWA Hiroyuki 已提交
4279
	}
4280 4281

	return 0;
4282 4283 4284
err:
	cgroup_clear_dir(cgrp, subsys_mask);
	return ret;
4285 4286
}

4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308
/*
 * css destruction is four-stage process.
 *
 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
 *    Implemented in kill_css().
 *
 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
 *    and thus css_tryget() is guaranteed to fail, the css can be offlined
 *    by invoking offline_css().  After offlining, the base ref is put.
 *    Implemented in css_killed_work_fn().
 *
 * 3. When the percpu_ref reaches zero, the only possible remaining
 *    accessors are inside RCU read sections.  css_release() schedules the
 *    RCU callback.
 *
 * 4. After the grace period, the css can be freed.  Implemented in
 *    css_free_work_fn().
 *
 * It is actually hairier because both step 2 and 4 require process context
 * and thus involve punting to css->destroy_work adding two additional
 * steps to the already complex sequence.
 */
4309
static void css_free_work_fn(struct work_struct *work)
4310 4311
{
	struct cgroup_subsys_state *css =
4312
		container_of(work, struct cgroup_subsys_state, destroy_work);
4313
	struct cgroup *cgrp = css->cgroup;
4314

4315 4316 4317
	if (css->parent)
		css_put(css->parent);

4318 4319
	css->ss->css_free(css);
	cgroup_dput(cgrp);
4320 4321
}

4322
static void css_free_rcu_fn(struct rcu_head *rcu_head)
4323 4324
{
	struct cgroup_subsys_state *css =
4325
		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
4326

4327 4328
	/*
	 * css holds an extra ref to @cgrp->dentry which is put on the last
4329
	 * css_put().  dput() requires process context which we don't have.
4330 4331 4332
	 */
	INIT_WORK(&css->destroy_work, css_free_work_fn);
	schedule_work(&css->destroy_work);
4333 4334
}

4335 4336 4337 4338 4339 4340 4341 4342
static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

	call_rcu(&css->rcu_head, css_free_rcu_fn);
}

4343 4344
static void init_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss,
		     struct cgroup *cgrp)
4345
{
4346
	css->cgroup = cgrp;
4347
	css->ss = ss;
4348
	css->flags = 0;
K
KAMEZAWA Hiroyuki 已提交
4349
	css->id = NULL;
4350 4351 4352 4353

	if (cgrp->parent)
		css->parent = cgroup_css(cgrp->parent, ss->subsys_id);
	else
4354
		css->flags |= CSS_ROOT;
4355

T
Tejun Heo 已提交
4356
	BUG_ON(cgroup_css(cgrp, ss->subsys_id));
4357 4358
}

4359
/* invoke ->css_online() on a new CSS and mark it online if successful */
4360
static int online_css(struct cgroup_subsys_state *css)
4361
{
4362
	struct cgroup_subsys *ss = css->ss;
T
Tejun Heo 已提交
4363 4364
	int ret = 0;

4365 4366
	lockdep_assert_held(&cgroup_mutex);

4367
	if (ss->css_online)
4368
		ret = ss->css_online(css);
4369
	if (!ret) {
4370
		css->flags |= CSS_ONLINE;
4371
		css->cgroup->nr_css++;
4372 4373
		rcu_assign_pointer(css->cgroup->subsys[ss->subsys_id], css);
	}
T
Tejun Heo 已提交
4374
	return ret;
4375 4376
}

4377
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4378
static void offline_css(struct cgroup_subsys_state *css)
4379
{
4380
	struct cgroup_subsys *ss = css->ss;
4381 4382 4383 4384 4385 4386

	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

4387
	if (ss->css_offline)
4388
		ss->css_offline(css);
4389

4390
	css->flags &= ~CSS_ONLINE;
4391
	css->cgroup->nr_css--;
4392
	RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css);
4393 4394
}

4395
/*
L
Li Zefan 已提交
4396 4397 4398 4399
 * cgroup_create - create a cgroup
 * @parent: cgroup that will be parent of the new cgroup
 * @dentry: dentry of the new cgroup
 * @mode: mode to set on new inode
4400
 *
L
Li Zefan 已提交
4401
 * Must be called with the mutex on the parent inode held
4402 4403
 */
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
A
Al Viro 已提交
4404
			     umode_t mode)
4405
{
4406
	struct cgroup_subsys_state *css_ar[CGROUP_SUBSYS_COUNT] = { };
4407
	struct cgroup *cgrp;
4408
	struct cgroup_name *name;
4409 4410 4411 4412 4413
	struct cgroupfs_root *root = parent->root;
	int err = 0;
	struct cgroup_subsys *ss;
	struct super_block *sb = root->sb;

T
Tejun Heo 已提交
4414
	/* allocate the cgroup and its ID, 0 is reserved for the root */
4415 4416
	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
	if (!cgrp)
4417 4418
		return -ENOMEM;

4419 4420 4421 4422 4423
	name = cgroup_alloc_name(dentry);
	if (!name)
		goto err_free_cgrp;
	rcu_assign_pointer(cgrp->name, name);

4424 4425 4426 4427 4428
	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
	cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
T
Tejun Heo 已提交
4429
	if (cgrp->id < 0)
4430
		goto err_free_name;
T
Tejun Heo 已提交
4431

4432 4433 4434 4435 4436 4437 4438 4439 4440
	/*
	 * Only live parents can have children.  Note that the liveliness
	 * check isn't strictly necessary because cgroup_mkdir() and
	 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
	 * anyway so that locking is contained inside cgroup proper and we
	 * don't get nasty surprises if we ever grow another caller.
	 */
	if (!cgroup_lock_live_group(parent)) {
		err = -ENODEV;
T
Tejun Heo 已提交
4441
		goto err_free_id;
4442 4443
	}

4444 4445 4446 4447 4448 4449 4450
	/* Grab a reference on the superblock so the hierarchy doesn't
	 * get deleted on unmount if there are child cgroups.  This
	 * can be done outside cgroup_mutex, since the sb can't
	 * disappear while someone has an open control file on the
	 * fs */
	atomic_inc(&sb->s_active);

4451
	init_cgroup_housekeeping(cgrp);
4452

4453 4454 4455
	dentry->d_fsdata = cgrp;
	cgrp->dentry = dentry;

4456
	cgrp->parent = parent;
4457
	cgrp->dummy_css.parent = &parent->dummy_css;
4458
	cgrp->root = parent->root;
4459

4460 4461 4462
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

4463 4464
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4465

4466
	for_each_root_subsys(root, ss) {
4467
		struct cgroup_subsys_state *css;
4468

T
Tejun Heo 已提交
4469
		css = ss->css_alloc(cgroup_css(parent, ss->subsys_id));
4470 4471
		if (IS_ERR(css)) {
			err = PTR_ERR(css);
4472
			goto err_free_all;
4473
		}
4474
		css_ar[ss->subsys_id] = css;
4475 4476

		err = percpu_ref_init(&css->refcnt, css_release);
4477
		if (err)
4478 4479
			goto err_free_all;

4480
		init_css(css, ss, cgrp);
4481

4482
		if (ss->use_id) {
4483
			err = alloc_css_id(css);
4484
			if (err)
4485
				goto err_free_all;
4486
		}
4487 4488
	}

4489 4490 4491 4492 4493
	/*
	 * Create directory.  cgroup_create_file() returns with the new
	 * directory locked on success so that it can be populated without
	 * dropping cgroup_mutex.
	 */
T
Tejun Heo 已提交
4494
	err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
4495
	if (err < 0)
4496
		goto err_free_all;
4497
	lockdep_assert_held(&dentry->d_inode->i_mutex);
4498

4499
	cgrp->serial_nr = cgroup_serial_nr_next++;
4500

4501 4502 4503
	/* allocation complete, commit to creation */
	list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
	root->number_of_cgroups++;
T
Tejun Heo 已提交
4504

4505 4506
	/* each css holds a ref to the cgroup's dentry and the parent css */
	for_each_root_subsys(root, ss) {
4507
		struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
4508

4509
		dget(dentry);
4510
		css_get(css->parent);
4511
	}
4512

4513 4514 4515
	/* hold a ref to the parent's dentry */
	dget(parent->dentry);

T
Tejun Heo 已提交
4516
	/* creation succeeded, notify subsystems */
4517
	for_each_root_subsys(root, ss) {
4518
		struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
4519 4520

		err = online_css(css);
T
Tejun Heo 已提交
4521 4522
		if (err)
			goto err_destroy;
4523 4524 4525 4526 4527 4528 4529 4530 4531

		if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
		    parent->parent) {
			pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
				   current->comm, current->pid, ss->name);
			if (!strcmp(ss->name, "memory"))
				pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
			ss->warned_broken_hierarchy = true;
		}
4532 4533
	}

4534 4535
	idr_replace(&root->cgroup_idr, cgrp, cgrp->id);

4536
	err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
4537 4538 4539 4540
	if (err)
		goto err_destroy;

	err = cgroup_populate_dir(cgrp, root->subsys_mask);
4541 4542
	if (err)
		goto err_destroy;
4543 4544

	mutex_unlock(&cgroup_mutex);
4545
	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
4546 4547 4548

	return 0;

4549
err_free_all:
4550
	for_each_root_subsys(root, ss) {
4551
		struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
4552 4553 4554

		if (css) {
			percpu_ref_cancel_init(&css->refcnt);
4555
			ss->css_free(css);
4556
		}
4557 4558 4559 4560
	}
	mutex_unlock(&cgroup_mutex);
	/* Release the reference count that we took on the superblock */
	deactivate_super(sb);
T
Tejun Heo 已提交
4561
err_free_id:
4562
	idr_remove(&root->cgroup_idr, cgrp->id);
4563 4564
err_free_name:
	kfree(rcu_dereference_raw(cgrp->name));
4565
err_free_cgrp:
4566
	kfree(cgrp);
4567
	return err;
4568 4569 4570 4571 4572 4573

err_destroy:
	cgroup_destroy_locked(cgrp);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&dentry->d_inode->i_mutex);
	return err;
4574 4575
}

4576
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
4577 4578 4579 4580 4581 4582 4583
{
	struct cgroup *c_parent = dentry->d_parent->d_fsdata;

	/* the vfs holds inode->i_mutex already */
	return cgroup_create(c_parent, dentry, mode | S_IFDIR);
}

4584 4585 4586 4587 4588 4589 4590 4591 4592 4593
/*
 * This is called when the refcnt of a css is confirmed to be killed.
 * css_tryget() is now guaranteed to fail.
 */
static void css_killed_work_fn(struct work_struct *work)
{
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
	struct cgroup *cgrp = css->cgroup;

4594 4595
	mutex_lock(&cgroup_mutex);

4596 4597 4598 4599 4600 4601
	/*
	 * css_tryget() is guaranteed to fail now.  Tell subsystems to
	 * initate destruction.
	 */
	offline_css(css);

4602 4603 4604 4605 4606
	/*
	 * If @cgrp is marked dead, it's waiting for refs of all css's to
	 * be disabled before proceeding to the second phase of cgroup
	 * destruction.  If we are the last one, kick it off.
	 */
4607
	if (!cgrp->nr_css && cgroup_is_dead(cgrp))
4608 4609 4610
		cgroup_destroy_css_killed(cgrp);

	mutex_unlock(&cgroup_mutex);
4611 4612 4613 4614 4615 4616 4617 4618 4619

	/*
	 * Put the css refs from kill_css().  Each css holds an extra
	 * reference to the cgroup's dentry and cgroup removal proceeds
	 * regardless of css refs.  On the last put of each css, whenever
	 * that may be, the extra dentry ref is put so that dentry
	 * destruction happens only after all css's are released.
	 */
	css_put(css);
4620 4621 4622 4623
}

/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
4624 4625 4626 4627
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

4628 4629
	INIT_WORK(&css->destroy_work, css_killed_work_fn);
	schedule_work(&css->destroy_work);
4630 4631
}

T
Tejun Heo 已提交
4632 4633 4634 4635
/**
 * kill_css - destroy a css
 * @css: css to destroy
 *
4636 4637 4638 4639
 * This function initiates destruction of @css by removing cgroup interface
 * files and putting its base reference.  ->css_offline() will be invoked
 * asynchronously once css_tryget() is guaranteed to fail and when the
 * reference count reaches zero, @css will be released.
T
Tejun Heo 已提交
4640 4641 4642
 */
static void kill_css(struct cgroup_subsys_state *css)
{
4643 4644
	cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);

T
Tejun Heo 已提交
4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663
	/*
	 * Killing would put the base ref, but we need to keep it alive
	 * until after ->css_offline().
	 */
	css_get(css);

	/*
	 * cgroup core guarantees that, by the time ->css_offline() is
	 * invoked, no new css reference will be given out via
	 * css_tryget().  We can't simply call percpu_ref_kill() and
	 * proceed to offlining css's because percpu_ref_kill() doesn't
	 * guarantee that the ref is seen as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.
	 */
	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
}

4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687
/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
 * guarantee that css_tryget() won't succeed by the time ->css_offline() is
 * invoked.  To satisfy all the requirements, destruction is implemented in
 * the following two steps.
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
4688 4689
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4690
{
4691
	struct dentry *d = cgrp->dentry;
4692
	struct cgroup_event *event, *tmp;
4693
	struct cgroup_subsys *ss;
4694
	bool empty;
4695

4696 4697 4698
	lockdep_assert_held(&d->d_inode->i_mutex);
	lockdep_assert_held(&cgroup_mutex);

4699
	/*
T
Tejun Heo 已提交
4700 4701
	 * css_set_lock synchronizes access to ->cset_links and prevents
	 * @cgrp from being removed while __put_css_set() is in progress.
4702 4703
	 */
	read_lock(&css_set_lock);
T
Tejun Heo 已提交
4704
	empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children);
4705 4706
	read_unlock(&css_set_lock);
	if (!empty)
4707
		return -EBUSY;
L
Li Zefan 已提交
4708

4709
	/*
T
Tejun Heo 已提交
4710 4711 4712
	 * Initiate massacre of all css's.  cgroup_destroy_css_killed()
	 * will be invoked to perform the rest of destruction once the
	 * percpu refs of all css's are confirmed to be killed.
4713
	 */
T
Tejun Heo 已提交
4714 4715
	for_each_root_subsys(cgrp->root, ss)
		kill_css(cgroup_css(cgrp, ss->subsys_id));
4716 4717 4718 4719

	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
	 * creation by disabling cgroup_lock_live_group().  Note that
4720
	 * CGRP_DEAD assertion is depended upon by css_next_child() to
4721
	 * resume iteration after dropping RCU read lock.  See
4722
	 * css_next_child() for details.
4723
	 */
4724
	set_bit(CGRP_DEAD, &cgrp->flags);
4725

4726 4727 4728 4729 4730 4731
	/* CGRP_DEAD is set, remove from ->release_list for the last time */
	raw_spin_lock(&release_list_lock);
	if (!list_empty(&cgrp->release_list))
		list_del_init(&cgrp->release_list);
	raw_spin_unlock(&release_list_lock);

4732 4733 4734 4735 4736 4737 4738 4739 4740
	/*
	 * If @cgrp has css's attached, the second stage of cgroup
	 * destruction is kicked off from css_killed_work_fn() after the
	 * refs of all attached css's are killed.  If @cgrp doesn't have
	 * any css, we kick it off here.
	 */
	if (!cgrp->nr_css)
		cgroup_destroy_css_killed(cgrp);

4741
	/*
4742 4743 4744
	 * Clear the base files and remove @cgrp directory.  The removal
	 * puts the base ref but we aren't quite done with @cgrp yet, so
	 * hold onto it.
4745
	 */
4746
	cgroup_addrm_files(cgrp, cgroup_base_files, false);
4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761
	dget(d);
	cgroup_d_remove_dir(d);

	/*
	 * Unregister events and notify userspace.
	 * Notify userspace about cgroup removing only after rmdir of cgroup
	 * directory to avoid race between userspace and kernelspace.
	 */
	spin_lock(&cgrp->event_list_lock);
	list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
		list_del_init(&event->list);
		schedule_work(&event->remove);
	}
	spin_unlock(&cgrp->event_list_lock);

4762 4763 4764
	return 0;
};

4765
/**
4766
 * cgroup_destroy_css_killed - the second step of cgroup destruction
4767 4768 4769
 * @work: cgroup->destroy_free_work
 *
 * This function is invoked from a work item for a cgroup which is being
4770 4771 4772
 * destroyed after all css's are offlined and performs the rest of
 * destruction.  This is the second step of destruction described in the
 * comment above cgroup_destroy_locked().
4773
 */
4774
static void cgroup_destroy_css_killed(struct cgroup *cgrp)
4775 4776 4777 4778
{
	struct cgroup *parent = cgrp->parent;
	struct dentry *d = cgrp->dentry;

4779
	lockdep_assert_held(&cgroup_mutex);
4780

4781
	/* delete this cgroup from parent->children */
4782
	list_del_rcu(&cgrp->sibling);
4783

4784 4785 4786 4787 4788 4789 4790 4791
	/*
	 * We should remove the cgroup object from idr before its grace
	 * period starts, so we won't be looking up a cgroup while the
	 * cgroup is being freed.
	 */
	idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
	cgrp->id = -1;

4792 4793
	dput(d);

4794
	set_bit(CGRP_RELEASABLE, &parent->flags);
4795
	check_for_release(parent);
4796 4797
}

4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
{
	int ret;

	mutex_lock(&cgroup_mutex);
	ret = cgroup_destroy_locked(dentry->d_fsdata);
	mutex_unlock(&cgroup_mutex);

	return ret;
}

4809 4810 4811 4812 4813 4814 4815 4816 4817
static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss)
{
	INIT_LIST_HEAD(&ss->cftsets);

	/*
	 * base_cftset is embedded in subsys itself, no need to worry about
	 * deregistration.
	 */
	if (ss->base_cftypes) {
4818 4819 4820 4821 4822
		struct cftype *cft;

		for (cft = ss->base_cftypes; cft->name[0] != '\0'; cft++)
			cft->ss = ss;

4823 4824 4825 4826 4827
		ss->base_cftset.cfts = ss->base_cftypes;
		list_add_tail(&ss->base_cftset.node, &ss->cftsets);
	}
}

4828
static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
4829 4830
{
	struct cgroup_subsys_state *css;
D
Diego Calleja 已提交
4831 4832

	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
4833

4834 4835
	mutex_lock(&cgroup_mutex);

4836 4837 4838
	/* init base cftset */
	cgroup_init_cftsets(ss);

4839
	/* Create the top cgroup state for this subsystem */
4840 4841
	list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
	ss->root = &cgroup_dummy_root;
T
Tejun Heo 已提交
4842
	css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss->subsys_id));
4843 4844
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
4845
	init_css(css, ss, cgroup_dummy_top);
4846

L
Li Zefan 已提交
4847
	/* Update the init_css_set to contain a subsys
4848
	 * pointer to this state - since the subsystem is
L
Li Zefan 已提交
4849 4850
	 * newly registered, all tasks and hence the
	 * init_css_set is in the subsystem's top cgroup. */
4851
	init_css_set.subsys[ss->subsys_id] = css;
4852 4853 4854

	need_forkexit_callback |= ss->fork || ss->exit;

L
Li Zefan 已提交
4855 4856 4857 4858 4859
	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

4860
	BUG_ON(online_css(css));
4861

4862 4863
	mutex_unlock(&cgroup_mutex);

4864 4865 4866 4867 4868 4869 4870 4871 4872 4873
	/* this function shouldn't be used with modular subsystems, since they
	 * need to register a subsys_id, among other things */
	BUG_ON(ss->module);
}

/**
 * cgroup_load_subsys: load and register a modular subsystem at runtime
 * @ss: the subsystem to load
 *
 * This function should be called in a modular subsystem's initcall. If the
T
Thomas Weber 已提交
4874
 * subsystem is built as a module, it will be assigned a new subsys_id and set
4875 4876 4877 4878 4879 4880
 * up for use. If the subsystem is built-in anyway, work is delegated to the
 * simpler cgroup_init_subsys.
 */
int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
{
	struct cgroup_subsys_state *css;
4881
	int i, ret;
4882
	struct hlist_node *tmp;
4883
	struct css_set *cset;
4884
	unsigned long key;
4885 4886 4887

	/* check name and function validity */
	if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
4888
	    ss->css_alloc == NULL || ss->css_free == NULL)
4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904
		return -EINVAL;

	/*
	 * we don't support callbacks in modular subsystems. this check is
	 * before the ss->module check for consistency; a subsystem that could
	 * be a module should still have no callbacks even if the user isn't
	 * compiling it as one.
	 */
	if (ss->fork || ss->exit)
		return -EINVAL;

	/*
	 * an optionally modular subsystem is built-in: we want to do nothing,
	 * since cgroup_init_subsys will have already taken care of it.
	 */
	if (ss->module == NULL) {
4905
		/* a sanity check */
4906
		BUG_ON(cgroup_subsys[ss->subsys_id] != ss);
4907 4908 4909
		return 0;
	}

4910 4911 4912
	/* init base cftset */
	cgroup_init_cftsets(ss);

4913
	mutex_lock(&cgroup_mutex);
4914
	cgroup_subsys[ss->subsys_id] = ss;
4915 4916

	/*
4917
	 * no ss->css_alloc seems to need anything important in the ss
4918
	 * struct, so this can happen first (i.e. before the dummy root
4919
	 * attachment).
4920
	 */
T
Tejun Heo 已提交
4921
	css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss->subsys_id));
4922
	if (IS_ERR(css)) {
4923 4924
		/* failure case - need to deassign the cgroup_subsys[] slot. */
		cgroup_subsys[ss->subsys_id] = NULL;
4925 4926 4927 4928
		mutex_unlock(&cgroup_mutex);
		return PTR_ERR(css);
	}

4929 4930
	list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
	ss->root = &cgroup_dummy_root;
4931 4932

	/* our new subsystem will be attached to the dummy hierarchy. */
4933 4934
	init_css(css, ss, cgroup_dummy_top);
	/* init_idr must be after init_css() because it sets css->id. */
4935
	if (ss->use_id) {
4936 4937 4938
		ret = cgroup_init_idr(ss, css);
		if (ret)
			goto err_unload;
4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949
	}

	/*
	 * Now we need to entangle the css into the existing css_sets. unlike
	 * in cgroup_init_subsys, there are now multiple css_sets, so each one
	 * will need a new pointer to it; done by iterating the css_set_table.
	 * furthermore, modifying the existing css_sets will corrupt the hash
	 * table state, so each changed css_set will need its hash recomputed.
	 * this is all done under the css_set_lock.
	 */
	write_lock(&css_set_lock);
4950
	hash_for_each_safe(css_set_table, i, tmp, cset, hlist) {
4951
		/* skip entries that we already rehashed */
4952
		if (cset->subsys[ss->subsys_id])
4953 4954
			continue;
		/* remove existing entry */
4955
		hash_del(&cset->hlist);
4956
		/* set new value */
4957
		cset->subsys[ss->subsys_id] = css;
4958
		/* recompute hash and restore entry */
4959 4960
		key = css_set_hash(cset->subsys);
		hash_add(css_set_table, &cset->hlist, key);
4961 4962 4963
	}
	write_unlock(&css_set_lock);

4964
	ret = online_css(css);
T
Tejun Heo 已提交
4965 4966
	if (ret)
		goto err_unload;
4967

4968 4969 4970
	/* success! */
	mutex_unlock(&cgroup_mutex);
	return 0;
4971 4972 4973 4974 4975 4976

err_unload:
	mutex_unlock(&cgroup_mutex);
	/* @ss can't be mounted here as try_module_get() would fail */
	cgroup_unload_subsys(ss);
	return ret;
4977
}
4978
EXPORT_SYMBOL_GPL(cgroup_load_subsys);
4979

B
Ben Blum 已提交
4980 4981 4982 4983 4984 4985 4986 4987 4988 4989
/**
 * cgroup_unload_subsys: unload a modular subsystem
 * @ss: the subsystem to unload
 *
 * This function should be called in a modular subsystem's exitcall. When this
 * function is invoked, the refcount on the subsystem's module will be 0, so
 * the subsystem will not be attached to any hierarchy.
 */
void cgroup_unload_subsys(struct cgroup_subsys *ss)
{
4990
	struct cgrp_cset_link *link;
B
Ben Blum 已提交
4991 4992 4993 4994 4995

	BUG_ON(ss->module == NULL);

	/*
	 * we shouldn't be called if the subsystem is in use, and the use of
4996
	 * try_module_get() in rebind_subsystems() should ensure that it
B
Ben Blum 已提交
4997 4998
	 * doesn't start being used while we're killing it off.
	 */
4999
	BUG_ON(ss->root != &cgroup_dummy_root);
B
Ben Blum 已提交
5000 5001

	mutex_lock(&cgroup_mutex);
5002

5003
	offline_css(cgroup_css(cgroup_dummy_top, ss->subsys_id));
5004

T
Tejun Heo 已提交
5005
	if (ss->use_id)
5006 5007
		idr_destroy(&ss->idr);

B
Ben Blum 已提交
5008
	/* deassign the subsys_id */
5009
	cgroup_subsys[ss->subsys_id] = NULL;
B
Ben Blum 已提交
5010

5011
	/* remove subsystem from the dummy root's list of subsystems */
5012
	list_del_init(&ss->sibling);
B
Ben Blum 已提交
5013 5014

	/*
5015 5016 5017
	 * disentangle the css from all css_sets attached to the dummy
	 * top. as in loading, we need to pay our respects to the hashtable
	 * gods.
B
Ben Blum 已提交
5018 5019
	 */
	write_lock(&css_set_lock);
5020
	list_for_each_entry(link, &cgroup_dummy_top->cset_links, cset_link) {
5021
		struct css_set *cset = link->cset;
5022
		unsigned long key;
B
Ben Blum 已提交
5023

5024 5025 5026 5027
		hash_del(&cset->hlist);
		cset->subsys[ss->subsys_id] = NULL;
		key = css_set_hash(cset->subsys);
		hash_add(css_set_table, &cset->hlist, key);
B
Ben Blum 已提交
5028 5029 5030 5031
	}
	write_unlock(&css_set_lock);

	/*
5032 5033 5034 5035
	 * remove subsystem's css from the cgroup_dummy_top and free it -
	 * need to free before marking as null because ss->css_free needs
	 * the cgrp->subsys pointer to find their state. note that this
	 * also takes care of freeing the css_id.
B
Ben Blum 已提交
5036
	 */
T
Tejun Heo 已提交
5037
	ss->css_free(cgroup_css(cgroup_dummy_top, ss->subsys_id));
5038
	RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL);
B
Ben Blum 已提交
5039 5040 5041 5042 5043

	mutex_unlock(&cgroup_mutex);
}
EXPORT_SYMBOL_GPL(cgroup_unload_subsys);

5044
/**
L
Li Zefan 已提交
5045 5046 5047 5048
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
5049 5050 5051
 */
int __init cgroup_init_early(void)
{
5052
	struct cgroup_subsys *ss;
5053
	int i;
5054

5055
	atomic_set(&init_css_set.refcount, 1);
5056
	INIT_LIST_HEAD(&init_css_set.cgrp_links);
5057
	INIT_LIST_HEAD(&init_css_set.tasks);
5058
	INIT_HLIST_NODE(&init_css_set.hlist);
5059
	css_set_count = 1;
5060 5061
	init_cgroup_root(&cgroup_dummy_root);
	cgroup_root_count = 1;
5062
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
5063

5064
	init_cgrp_cset_link.cset = &init_css_set;
5065 5066
	init_cgrp_cset_link.cgrp = cgroup_dummy_top;
	list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links);
5067
	list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links);
5068

5069 5070
	/* at bootup time, we don't worry about modular subsystems */
	for_each_builtin_subsys(ss, i) {
5071 5072
		BUG_ON(!ss->name);
		BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
5073 5074
		BUG_ON(!ss->css_alloc);
		BUG_ON(!ss->css_free);
5075
		if (ss->subsys_id != i) {
D
Diego Calleja 已提交
5076
			printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087
			       ss->name, ss->subsys_id);
			BUG();
		}

		if (ss->early_init)
			cgroup_init_subsys(ss);
	}
	return 0;
}

/**
L
Li Zefan 已提交
5088 5089 5090 5091
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
5092 5093 5094
 */
int __init cgroup_init(void)
{
5095
	struct cgroup_subsys *ss;
5096
	unsigned long key;
5097
	int i, err;
5098 5099 5100 5101

	err = bdi_init(&cgroup_backing_dev_info);
	if (err)
		return err;
5102

5103
	for_each_builtin_subsys(ss, i) {
5104 5105
		if (!ss->early_init)
			cgroup_init_subsys(ss);
K
KAMEZAWA Hiroyuki 已提交
5106
		if (ss->use_id)
5107
			cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
5108 5109
	}

5110
	/* allocate id for the dummy hierarchy */
T
Tejun Heo 已提交
5111 5112 5113
	mutex_lock(&cgroup_mutex);
	mutex_lock(&cgroup_root_mutex);

5114 5115 5116 5117
	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

5118
	BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1));
5119

5120 5121 5122 5123
	err = idr_alloc(&cgroup_dummy_root.cgroup_idr, cgroup_dummy_top,
			0, 1, GFP_KERNEL);
	BUG_ON(err < 0);

T
Tejun Heo 已提交
5124 5125 5126
	mutex_unlock(&cgroup_root_mutex);
	mutex_unlock(&cgroup_mutex);

5127 5128 5129 5130 5131 5132
	cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
	if (!cgroup_kobj) {
		err = -ENOMEM;
		goto out;
	}

5133
	err = register_filesystem(&cgroup_fs_type);
5134 5135
	if (err < 0) {
		kobject_put(cgroup_kobj);
5136
		goto out;
5137
	}
5138

L
Li Zefan 已提交
5139
	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
5140

5141
out:
5142 5143 5144
	if (err)
		bdi_destroy(&cgroup_backing_dev_info);

5145 5146
	return err;
}
5147

5148 5149 5150 5151 5152 5153
/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 *  - No need to task_lock(tsk) on this tsk->cgroup reference, as it
 *    doesn't really matter if tsk->cgroup changes after we read it,
5154
 *    and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
5155 5156 5157 5158 5159 5160
 *    anyway.  No need to check that tsk->cgroup != NULL, thanks to
 *    the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
 *    cgroup to top_cgroup.
 */

/* TODO: Use a proper seq_file iterator */
5161
int proc_cgroup_show(struct seq_file *m, void *v)
5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183
{
	struct pid *pid;
	struct task_struct *tsk;
	char *buf;
	int retval;
	struct cgroupfs_root *root;

	retval = -ENOMEM;
	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
	if (!buf)
		goto out;

	retval = -ESRCH;
	pid = m->private;
	tsk = get_pid_task(pid, PIDTYPE_PID);
	if (!tsk)
		goto out_free;

	retval = 0;

	mutex_lock(&cgroup_mutex);

5184
	for_each_active_root(root) {
5185
		struct cgroup_subsys *ss;
5186
		struct cgroup *cgrp;
5187 5188
		int count = 0;

5189
		seq_printf(m, "%d:", root->hierarchy_id);
5190
		for_each_root_subsys(root, ss)
5191
			seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
5192 5193 5194
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
5195
		seq_putc(m, ':');
5196
		cgrp = task_cgroup_from_root(tsk, root);
5197
		retval = cgroup_path(cgrp, buf, PAGE_SIZE);
5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215
		if (retval < 0)
			goto out_unlock;
		seq_puts(m, buf);
		seq_putc(m, '\n');
	}

out_unlock:
	mutex_unlock(&cgroup_mutex);
	put_task_struct(tsk);
out_free:
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
5216
	struct cgroup_subsys *ss;
5217 5218
	int i;

5219
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
B
Ben Blum 已提交
5220 5221 5222 5223 5224
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
5225
	mutex_lock(&cgroup_mutex);
5226 5227

	for_each_subsys(ss, i)
5228 5229
		seq_printf(m, "%s\t%d\t%d\t%d\n",
			   ss->name, ss->root->hierarchy_id,
5230
			   ss->root->number_of_cgroups, !ss->disabled);
5231

5232 5233 5234 5235 5236 5237
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
5238
	return single_open(file, proc_cgroupstats_show, NULL);
5239 5240
}

5241
static const struct file_operations proc_cgroupstats_operations = {
5242 5243 5244 5245 5246 5247
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

5248 5249
/**
 * cgroup_fork - attach newly forked task to its parents cgroup.
L
Li Zefan 已提交
5250
 * @child: pointer to task_struct of forking parent process.
5251 5252 5253 5254 5255
 *
 * Description: A task inherits its parent's cgroup at fork().
 *
 * A pointer to the shared css_set was automatically copied in
 * fork.c by dup_task_struct().  However, we ignore that copy, since
5256 5257 5258 5259
 * it was not made under the protection of RCU or cgroup_mutex, so
 * might no longer be a valid cgroup pointer.  cgroup_attach_task() might
 * have already changed current->cgroups, allowing the previously
 * referenced cgroup group to be removed and freed.
5260 5261 5262 5263 5264 5265
 *
 * At the point that cgroup_fork() is called, 'current' is the parent
 * task, and the passed argument 'child' points to the child task.
 */
void cgroup_fork(struct task_struct *child)
{
5266
	task_lock(current);
5267
	get_css_set(task_css_set(current));
5268
	child->cgroups = current->cgroups;
5269
	task_unlock(current);
5270
	INIT_LIST_HEAD(&child->cg_list);
5271 5272
}

5273
/**
L
Li Zefan 已提交
5274 5275 5276
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
5277 5278 5279
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
5280
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5281
 * list.
L
Li Zefan 已提交
5282
 */
5283 5284
void cgroup_post_fork(struct task_struct *child)
{
5285
	struct cgroup_subsys *ss;
5286 5287
	int i;

5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298
	/*
	 * use_task_css_set_links is set to 1 before we walk the tasklist
	 * under the tasklist_lock and we read it here after we added the child
	 * to the tasklist under the tasklist_lock as well. If the child wasn't
	 * yet in the tasklist when we walked through it from
	 * cgroup_enable_task_cg_lists(), then use_task_css_set_links value
	 * should be visible now due to the paired locking and barriers implied
	 * by LOCK/UNLOCK: it is written before the tasklist_lock unlock
	 * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock
	 * lock on fork.
	 */
5299 5300
	if (use_task_css_set_links) {
		write_lock(&css_set_lock);
5301 5302
		task_lock(child);
		if (list_empty(&child->cg_list))
5303
			list_add(&child->cg_list, &task_css_set(child)->tasks);
5304
		task_unlock(child);
5305 5306
		write_unlock(&css_set_lock);
	}
5307 5308 5309 5310 5311 5312 5313

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
	if (need_forkexit_callback) {
5314 5315 5316 5317 5318 5319 5320 5321
		/*
		 * fork/exit callbacks are supported only for builtin
		 * subsystems, and the builtin section of the subsys
		 * array is immutable, so we don't need to lock the
		 * subsys array here. On the other hand, modular section
		 * of the array can be freed at module unload, so we
		 * can't touch that.
		 */
5322
		for_each_builtin_subsys(ss, i)
5323 5324 5325
			if (ss->fork)
				ss->fork(child);
	}
5326
}
5327

5328 5329 5330
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
L
Li Zefan 已提交
5331
 * @run_callback: run exit callbacks?
5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
 * the_top_cgroup_hack:
 *
 *    Set the exiting tasks cgroup to the root cgroup (top_cgroup).
 *
 *    We call cgroup_exit() while the task is still competent to
 *    handle notify_on_release(), then leave the task attached to the
 *    root cgroup in each hierarchy for the remainder of its exit.
 *
 *    To do this properly, we would increment the reference count on
 *    top_cgroup, and near the very end of the kernel/exit.c do_exit()
 *    code we would add a second cgroup function call, to drop that
 *    reference.  This would just create an unnecessary hot spot on
 *    the top_cgroup reference count, to no avail.
 *
 *    Normally, holding a reference to a cgroup without bumping its
 *    count is unsafe.   The cgroup could go away, or someone could
 *    attach us to a different cgroup, decrementing the count on
 *    the first cgroup that we never incremented.  But in this case,
 *    top_cgroup isn't going away, and either task has PF_EXITING set,
5360 5361
 *    which wards off any cgroup_attach_task() attempts, or task is a failed
 *    fork, never visible to cgroup_attach_task.
5362 5363 5364
 */
void cgroup_exit(struct task_struct *tsk, int run_callbacks)
{
5365
	struct cgroup_subsys *ss;
5366
	struct css_set *cset;
5367
	int i;
5368 5369 5370 5371 5372 5373 5374 5375 5376

	/*
	 * Unlink from the css_set task list if necessary.
	 * Optimistically check cg_list before taking
	 * css_set_lock
	 */
	if (!list_empty(&tsk->cg_list)) {
		write_lock(&css_set_lock);
		if (!list_empty(&tsk->cg_list))
5377
			list_del_init(&tsk->cg_list);
5378 5379 5380
		write_unlock(&css_set_lock);
	}

5381 5382
	/* Reassign the task to the init_css_set. */
	task_lock(tsk);
5383 5384
	cset = task_css_set(tsk);
	RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
5385 5386

	if (run_callbacks && need_forkexit_callback) {
5387 5388 5389 5390
		/*
		 * fork/exit callbacks are supported only for builtin
		 * subsystems, see cgroup_post_fork() for details.
		 */
5391
		for_each_builtin_subsys(ss, i) {
5392
			if (ss->exit) {
5393 5394
				struct cgroup_subsys_state *old_css = cset->subsys[i];
				struct cgroup_subsys_state *css = task_css(tsk, i);
5395

5396
				ss->exit(css, old_css, tsk);
5397 5398 5399
			}
		}
	}
5400
	task_unlock(tsk);
5401

5402
	put_css_set_taskexit(cset);
5403
}
5404

5405
static void check_for_release(struct cgroup *cgrp)
5406
{
5407
	if (cgroup_is_releasable(cgrp) &&
T
Tejun Heo 已提交
5408
	    list_empty(&cgrp->cset_links) && list_empty(&cgrp->children)) {
5409 5410
		/*
		 * Control Group is currently removeable. If it's not
5411
		 * already queued for a userspace notification, queue
5412 5413
		 * it now
		 */
5414
		int need_schedule_work = 0;
5415

5416
		raw_spin_lock(&release_list_lock);
5417
		if (!cgroup_is_dead(cgrp) &&
5418 5419
		    list_empty(&cgrp->release_list)) {
			list_add(&cgrp->release_list, &release_list);
5420 5421
			need_schedule_work = 1;
		}
5422
		raw_spin_unlock(&release_list_lock);
5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454
		if (need_schedule_work)
			schedule_work(&release_agent_work);
	}
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
	BUG_ON(work != &release_agent_work);
	mutex_lock(&cgroup_mutex);
5455
	raw_spin_lock(&release_list_lock);
5456 5457 5458
	while (!list_empty(&release_list)) {
		char *argv[3], *envp[3];
		int i;
5459
		char *pathbuf = NULL, *agentbuf = NULL;
5460
		struct cgroup *cgrp = list_entry(release_list.next,
5461 5462
						    struct cgroup,
						    release_list);
5463
		list_del_init(&cgrp->release_list);
5464
		raw_spin_unlock(&release_list_lock);
5465
		pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
5466 5467 5468 5469 5470 5471 5472
		if (!pathbuf)
			goto continue_free;
		if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
			goto continue_free;
		agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
		if (!agentbuf)
			goto continue_free;
5473 5474

		i = 0;
5475 5476
		argv[i++] = agentbuf;
		argv[i++] = pathbuf;
5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490
		argv[i] = NULL;

		i = 0;
		/* minimal command environment */
		envp[i++] = "HOME=/";
		envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
		envp[i] = NULL;

		/* Drop the lock while we invoke the usermode helper,
		 * since the exec could involve hitting disk and hence
		 * be a slow process */
		mutex_unlock(&cgroup_mutex);
		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
		mutex_lock(&cgroup_mutex);
5491 5492 5493
 continue_free:
		kfree(pathbuf);
		kfree(agentbuf);
5494
		raw_spin_lock(&release_list_lock);
5495
	}
5496
	raw_spin_unlock(&release_list_lock);
5497 5498
	mutex_unlock(&cgroup_mutex);
}
5499 5500 5501

static int __init cgroup_disable(char *str)
{
5502
	struct cgroup_subsys *ss;
5503
	char *token;
5504
	int i;
5505 5506 5507 5508

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
5509

5510 5511 5512 5513 5514
		/*
		 * cgroup_disable, being at boot time, can't know about
		 * module subsystems, so we don't worry about them.
		 */
		for_each_builtin_subsys(ss, i) {
5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525
			if (!strcmp(token, ss->name)) {
				ss->disabled = 1;
				printk(KERN_INFO "Disabling %s control group"
					" subsystem\n", ss->name);
				break;
			}
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
K
KAMEZAWA Hiroyuki 已提交
5526 5527 5528 5529 5530

/*
 * Functons for CSS ID.
 */

5531
/* to get ID other than 0, this should be called when !cgroup_is_dead() */
K
KAMEZAWA Hiroyuki 已提交
5532 5533
unsigned short css_id(struct cgroup_subsys_state *css)
{
5534 5535 5536 5537 5538 5539 5540
	struct css_id *cssid;

	/*
	 * This css_id() can return correct value when somone has refcnt
	 * on this or this is under rcu_read_lock(). Once css->id is allocated,
	 * it's unchanged until freed.
	 */
5541
	cssid = rcu_dereference_raw(css->id);
K
KAMEZAWA Hiroyuki 已提交
5542 5543 5544 5545 5546

	if (cssid)
		return cssid->id;
	return 0;
}
B
Ben Blum 已提交
5547
EXPORT_SYMBOL_GPL(css_id);
K
KAMEZAWA Hiroyuki 已提交
5548

5549 5550 5551 5552 5553 5554
/**
 *  css_is_ancestor - test "root" css is an ancestor of "child"
 * @child: the css to be tested.
 * @root: the css supporsed to be an ancestor of the child.
 *
 * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
5555
 * this function reads css->id, the caller must hold rcu_read_lock().
5556 5557 5558 5559 5560 5561
 * But, considering usual usage, the csses should be valid objects after test.
 * Assuming that the caller will do some action to the child if this returns
 * returns true, the caller must take "child";s reference count.
 * If "child" is valid object and this returns true, "root" is valid, too.
 */

K
KAMEZAWA Hiroyuki 已提交
5562
bool css_is_ancestor(struct cgroup_subsys_state *child,
5563
		    const struct cgroup_subsys_state *root)
K
KAMEZAWA Hiroyuki 已提交
5564
{
5565 5566
	struct css_id *child_id;
	struct css_id *root_id;
K
KAMEZAWA Hiroyuki 已提交
5567

5568
	child_id  = rcu_dereference(child->id);
5569 5570
	if (!child_id)
		return false;
5571
	root_id = rcu_dereference(root->id);
5572 5573 5574 5575 5576 5577 5578
	if (!root_id)
		return false;
	if (child_id->depth < root_id->depth)
		return false;
	if (child_id->stack[root_id->depth] != root_id->id)
		return false;
	return true;
K
KAMEZAWA Hiroyuki 已提交
5579 5580 5581 5582
}

void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
{
5583 5584
	struct css_id *id = rcu_dereference_protected(css->id, true);

K
KAMEZAWA Hiroyuki 已提交
5585 5586 5587 5588 5589 5590 5591 5592
	/* When this is called before css_id initialization, id can be NULL */
	if (!id)
		return;

	BUG_ON(!ss->use_id);

	rcu_assign_pointer(id->css, NULL);
	rcu_assign_pointer(css->id, NULL);
5593
	spin_lock(&ss->id_lock);
K
KAMEZAWA Hiroyuki 已提交
5594
	idr_remove(&ss->idr, id->id);
5595
	spin_unlock(&ss->id_lock);
5596
	kfree_rcu(id, rcu_head);
K
KAMEZAWA Hiroyuki 已提交
5597
}
B
Ben Blum 已提交
5598
EXPORT_SYMBOL_GPL(free_css_id);
K
KAMEZAWA Hiroyuki 已提交
5599 5600 5601 5602 5603 5604 5605 5606 5607

/*
 * This is called by init or create(). Then, calls to this function are
 * always serialized (By cgroup_mutex() at create()).
 */

static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
{
	struct css_id *newid;
T
Tejun Heo 已提交
5608
	int ret, size;
K
KAMEZAWA Hiroyuki 已提交
5609 5610 5611 5612 5613 5614 5615

	BUG_ON(!ss->use_id);

	size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
	newid = kzalloc(size, GFP_KERNEL);
	if (!newid)
		return ERR_PTR(-ENOMEM);
T
Tejun Heo 已提交
5616 5617

	idr_preload(GFP_KERNEL);
5618
	spin_lock(&ss->id_lock);
K
KAMEZAWA Hiroyuki 已提交
5619
	/* Don't use 0. allocates an ID of 1-65535 */
T
Tejun Heo 已提交
5620
	ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
5621
	spin_unlock(&ss->id_lock);
T
Tejun Heo 已提交
5622
	idr_preload_end();
K
KAMEZAWA Hiroyuki 已提交
5623 5624

	/* Returns error when there are no free spaces for new ID.*/
T
Tejun Heo 已提交
5625
	if (ret < 0)
K
KAMEZAWA Hiroyuki 已提交
5626 5627
		goto err_out;

T
Tejun Heo 已提交
5628
	newid->id = ret;
K
KAMEZAWA Hiroyuki 已提交
5629 5630 5631 5632
	newid->depth = depth;
	return newid;
err_out:
	kfree(newid);
T
Tejun Heo 已提交
5633
	return ERR_PTR(ret);
K
KAMEZAWA Hiroyuki 已提交
5634 5635 5636

}

5637 5638
static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
					    struct cgroup_subsys_state *rootcss)
K
KAMEZAWA Hiroyuki 已提交
5639 5640 5641
{
	struct css_id *newid;

5642
	spin_lock_init(&ss->id_lock);
K
KAMEZAWA Hiroyuki 已提交
5643 5644 5645 5646 5647 5648 5649
	idr_init(&ss->idr);

	newid = get_new_cssid(ss, 0);
	if (IS_ERR(newid))
		return PTR_ERR(newid);

	newid->stack[0] = newid->id;
5650 5651
	RCU_INIT_POINTER(newid->css, rootcss);
	RCU_INIT_POINTER(rootcss->id, newid);
K
KAMEZAWA Hiroyuki 已提交
5652 5653 5654
	return 0;
}

5655
static int alloc_css_id(struct cgroup_subsys_state *child_css)
K
KAMEZAWA Hiroyuki 已提交
5656
{
5657
	struct cgroup_subsys_state *parent_css = css_parent(child_css);
5658
	struct css_id *child_id, *parent_id;
5659
	int i, depth;
K
KAMEZAWA Hiroyuki 已提交
5660

5661
	parent_id = rcu_dereference_protected(parent_css->id, true);
5662
	depth = parent_id->depth + 1;
K
KAMEZAWA Hiroyuki 已提交
5663

5664
	child_id = get_new_cssid(child_css->ss, depth);
K
KAMEZAWA Hiroyuki 已提交
5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699
	if (IS_ERR(child_id))
		return PTR_ERR(child_id);

	for (i = 0; i < depth; i++)
		child_id->stack[i] = parent_id->stack[i];
	child_id->stack[depth] = child_id->id;
	/*
	 * child_id->css pointer will be set after this cgroup is available
	 * see cgroup_populate_dir()
	 */
	rcu_assign_pointer(child_css->id, child_id);

	return 0;
}

/**
 * css_lookup - lookup css by id
 * @ss: cgroup subsys to be looked into.
 * @id: the id
 *
 * Returns pointer to cgroup_subsys_state if there is valid one with id.
 * NULL if not. Should be called under rcu_read_lock()
 */
struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
{
	struct css_id *cssid = NULL;

	BUG_ON(!ss->use_id);
	cssid = idr_find(&ss->idr, id);

	if (unlikely(!cssid))
		return NULL;

	return rcu_dereference(cssid->css);
}
B
Ben Blum 已提交
5700
EXPORT_SYMBOL_GPL(css_lookup);
K
KAMEZAWA Hiroyuki 已提交
5701

5702 5703 5704 5705 5706 5707 5708 5709
/**
 * cgroup_css_from_dir - get corresponding css from file open on cgroup dir
 * @f: directory file of interest
 * @id: subsystem id of interest
 *
 * Must be called under RCU read lock.  The caller is responsible for
 * pinning the returned css if it needs to be accessed outside the RCU
 * critical section.
S
Stephane Eranian 已提交
5710 5711 5712 5713 5714 5715 5716
 */
struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
{
	struct cgroup *cgrp;
	struct inode *inode;
	struct cgroup_subsys_state *css;

5717 5718
	WARN_ON_ONCE(!rcu_read_lock_held());

A
Al Viro 已提交
5719
	inode = file_inode(f);
S
Stephane Eranian 已提交
5720 5721 5722 5723 5724 5725 5726 5727 5728
	/* check in cgroup filesystem dir */
	if (inode->i_op != &cgroup_dir_inode_operations)
		return ERR_PTR(-EBADF);

	if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
		return ERR_PTR(-EINVAL);

	/* get cgroup */
	cgrp = __d_cgrp(f->f_dentry);
T
Tejun Heo 已提交
5729
	css = cgroup_css(cgrp, id);
S
Stephane Eranian 已提交
5730 5731 5732
	return css ? css : ERR_PTR(-ENOENT);
}

5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754
/**
 * css_from_id - lookup css by id
 * @id: the cgroup id
 * @ss: cgroup subsys to be looked into
 *
 * Returns the css if there's valid one with @id, otherwise returns NULL.
 * Should be called under rcu_read_lock().
 */
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
	struct cgroup *cgrp;

	rcu_lockdep_assert(rcu_read_lock_held() ||
			   lockdep_is_held(&cgroup_mutex),
			   "css_from_id() needs proper protection");

	cgrp = idr_find(&ss->root->cgroup_idr, id);
	if (cgrp)
		return cgroup_css(cgrp, ss->subsys_id);
	return NULL;
}

5755
#ifdef CONFIG_CGROUP_DEBUG
5756 5757
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
5758 5759 5760 5761 5762 5763 5764 5765 5766
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

5767
static void debug_css_free(struct cgroup_subsys_state *css)
5768
{
5769
	kfree(css);
5770 5771
}

5772 5773
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5774
{
5775
	return cgroup_task_count(css->cgroup);
5776 5777
}

5778 5779
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5780 5781 5782 5783
{
	return (u64)(unsigned long)current->cgroups;
}

5784
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
L
Li Zefan 已提交
5785
					 struct cftype *cft)
5786 5787 5788 5789
{
	u64 count;

	rcu_read_lock();
5790
	count = atomic_read(&task_css_set(current)->refcount);
5791 5792 5793 5794
	rcu_read_unlock();
	return count;
}

5795
static int current_css_set_cg_links_read(struct cgroup_subsys_state *css,
5796 5797 5798
					 struct cftype *cft,
					 struct seq_file *seq)
{
5799
	struct cgrp_cset_link *link;
5800
	struct css_set *cset;
5801 5802 5803

	read_lock(&css_set_lock);
	rcu_read_lock();
5804
	cset = rcu_dereference(current->cgroups);
5805
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5806 5807 5808 5809 5810 5811 5812
		struct cgroup *c = link->cgrp;
		const char *name;

		if (c->dentry)
			name = c->dentry->d_name.name;
		else
			name = "?";
5813 5814
		seq_printf(seq, "Root %d group %s\n",
			   c->root->hierarchy_id, name);
5815 5816 5817 5818 5819 5820 5821
	}
	rcu_read_unlock();
	read_unlock(&css_set_lock);
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
5822 5823
static int cgroup_css_links_read(struct cgroup_subsys_state *css,
				 struct cftype *cft, struct seq_file *seq)
5824
{
5825
	struct cgrp_cset_link *link;
5826 5827

	read_lock(&css_set_lock);
5828
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5829
		struct css_set *cset = link->cset;
5830 5831
		struct task_struct *task;
		int count = 0;
5832 5833
		seq_printf(seq, "css_set %p\n", cset);
		list_for_each_entry(task, &cset->tasks, cg_list) {
5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846
			if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
				seq_puts(seq, "  ...\n");
				break;
			} else {
				seq_printf(seq, "  task %d\n",
					   task_pid_vnr(task));
			}
		}
	}
	read_unlock(&css_set_lock);
	return 0;
}

5847
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
5848
{
5849
	return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

5868 5869 5870 5871 5872 5873 5874 5875 5876 5877
	{
		.name = "current_css_set_cg_links",
		.read_seq_string = current_css_set_cg_links_read,
	},

	{
		.name = "cgroup_css_links",
		.read_seq_string = cgroup_css_links_read,
	},

5878 5879 5880 5881 5882
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

5883 5884
	{ }	/* terminate */
};
5885 5886 5887

struct cgroup_subsys debug_subsys = {
	.name = "debug",
5888 5889
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,
5890
	.subsys_id = debug_subsys_id,
5891
	.base_cftypes = debug_files,
5892 5893
};
#endif /* CONFIG_CGROUP_DEBUG */