cgroup.c 154.3 KB
Newer Older
1 2 3 4 5 6
/*
 *  Generic process-grouping system.
 *
 *  Based originally on the cpuset system, extracted by Paul Menage
 *  Copyright (C) 2006 Google, Inc
 *
7 8 9 10
 *  Notifications support
 *  Copyright (C) 2009 Nokia Corporation
 *  Author: Kirill A. Shutemov
 *
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *  Copyright notices from the original cpuset code:
 *  --------------------------------------------------
 *  Copyright (C) 2003 BULL SA.
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
 *  2003-10-10 Written by Simon Derr.
 *  2003-10-22 Updates by Stephen Hemminger.
 *  2004 May-July Rework by Paul Jackson.
 *  ---------------------------------------------------
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

#include <linux/cgroup.h>
30
#include <linux/cred.h>
31
#include <linux/ctype.h>
32
#include <linux/errno.h>
33
#include <linux/init_task.h>
34 35 36 37 38 39
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
40
#include <linux/proc_fs.h>
41 42
#include <linux/rcupdate.h>
#include <linux/sched.h>
43
#include <linux/backing-dev.h>
44 45 46 47 48
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/magic.h>
#include <linux/spinlock.h>
#include <linux/string.h>
49
#include <linux/sort.h>
50
#include <linux/kmod.h>
51
#include <linux/module.h>
B
Balbir Singh 已提交
52 53
#include <linux/delayacct.h>
#include <linux/cgroupstats.h>
54
#include <linux/hashtable.h>
55
#include <linux/namei.h>
L
Li Zefan 已提交
56
#include <linux/pid_namespace.h>
57
#include <linux/idr.h>
58
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
59 60
#include <linux/eventfd.h>
#include <linux/poll.h>
61
#include <linux/flex_array.h> /* used in cgroup_attach_task */
62
#include <linux/kthread.h>
B
Balbir Singh 已提交
63

A
Arun Sharma 已提交
64
#include <linux/atomic.h>
65

T
Tejun Heo 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * cgroup_mutex is the master lock.  Any modification to cgroup or its
 * hierarchy must be performed while holding it.
 *
 * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify
 * cgroupfs_root of any cgroup hierarchy - subsys list, flags,
 * release_agent_path and so on.  Modifying requires both cgroup_mutex and
 * cgroup_root_mutex.  Readers can acquire either of the two.  This is to
 * break the following locking order cycle.
 *
 *  A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem
 *  B. namespace_sem -> cgroup_mutex
 *
 * B happens only through cgroup_show_options() and using cgroup_root_mutex
 * breaks it.
 */
T
Tejun Heo 已提交
82 83
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
84
EXPORT_SYMBOL_GPL(cgroup_mutex);	/* only for lockdep */
T
Tejun Heo 已提交
85
#else
86
static DEFINE_MUTEX(cgroup_mutex);
T
Tejun Heo 已提交
87 88
#endif

T
Tejun Heo 已提交
89
static DEFINE_MUTEX(cgroup_root_mutex);
90

B
Ben Blum 已提交
91 92
/*
 * Generate an array of cgroup subsystem pointers. At boot time, this is
93
 * populated with the built in subsystems, and modular subsystems are
B
Ben Blum 已提交
94 95 96
 * registered after that. The mutable section of this array is protected by
 * cgroup_mutex.
 */
97
#define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys,
98
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
99
static struct cgroup_subsys *cgroup_subsys[CGROUP_SUBSYS_COUNT] = {
100 101 102 103
#include <linux/cgroup_subsys.h>
};

/*
104 105 106
 * The dummy hierarchy, reserved for the subsystems that are otherwise
 * unattached - it never has more than a single cgroup, and all tasks are
 * part of that cgroup.
107
 */
108 109 110 111
static struct cgroupfs_root cgroup_dummy_root;

/* dummy_top is a shorthand for the dummy hierarchy's top cgroup */
static struct cgroup * const cgroup_dummy_top = &cgroup_dummy_root.top_cgroup;
112

T
Tejun Heo 已提交
113 114 115 116 117 118 119
/*
 * cgroupfs file entry, pointed to from leaf dentry->d_fsdata.
 */
struct cfent {
	struct list_head		node;
	struct dentry			*dentry;
	struct cftype			*type;
L
Li Zefan 已提交
120 121 122

	/* file xattrs */
	struct simple_xattrs		xattrs;
T
Tejun Heo 已提交
123 124
};

K
KAMEZAWA Hiroyuki 已提交
125 126 127 128 129 130 131 132 133 134
/*
 * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
 * cgroup_subsys->use_id != 0.
 */
#define CSS_ID_MAX	(65535)
struct css_id {
	/*
	 * The css to which this ID points. This pointer is set to valid value
	 * after cgroup is populated. If cgroup is removed, this will be NULL.
	 * This pointer is expected to be RCU-safe because destroy()
T
Tejun Heo 已提交
135 136
	 * is called after synchronize_rcu(). But for safe use, css_tryget()
	 * should be used for avoiding race.
K
KAMEZAWA Hiroyuki 已提交
137
	 */
A
Arnd Bergmann 已提交
138
	struct cgroup_subsys_state __rcu *css;
K
KAMEZAWA Hiroyuki 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
	/*
	 * ID of this css.
	 */
	unsigned short id;
	/*
	 * Depth in hierarchy which this ID belongs to.
	 */
	unsigned short depth;
	/*
	 * ID is freed by RCU. (and lookup routine is RCU safe.)
	 */
	struct rcu_head rcu_head;
	/*
	 * Hierarchy of CSS ID belongs to.
	 */
	unsigned short stack[0]; /* Array of Length (depth+1) */
};

157
/*
L
Lucas De Marchi 已提交
158
 * cgroup_event represents events which userspace want to receive.
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
 */
struct cgroup_event {
	/*
	 * Cgroup which the event belongs to.
	 */
	struct cgroup *cgrp;
	/*
	 * Control file which the event associated.
	 */
	struct cftype *cft;
	/*
	 * eventfd to signal userspace about the event.
	 */
	struct eventfd_ctx *eventfd;
	/*
	 * Each of these stored in a list by the cgroup.
	 */
	struct list_head list;
	/*
	 * All fields below needed to unregister event when
	 * userspace closes eventfd.
	 */
	poll_table pt;
	wait_queue_head_t *wqh;
	wait_queue_t wait;
	struct work_struct remove;
};
K
KAMEZAWA Hiroyuki 已提交
186

187 188
/* The list of hierarchy roots */

189 190
static LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
191

T
Tejun Heo 已提交
192 193 194 195 196
/*
 * Hierarchy ID allocation and mapping.  It follows the same exclusion
 * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for
 * writes, either for reads.
 */
197
static DEFINE_IDR(cgroup_hierarchy_idr);
198

199 200
static struct cgroup_name root_cgroup_name = { .name = "/" };

201 202 203 204 205
/*
 * Assign a monotonically increasing serial number to cgroups.  It
 * guarantees cgroups with bigger numbers are newer than those with smaller
 * numbers.  Also, as cgroups are always appended to the parent's
 * ->children list, it guarantees that sibling cgroups are always sorted in
206 207
 * the ascending serial number order on the list.  Protected by
 * cgroup_mutex.
208
 */
209
static u64 cgroup_serial_nr_next = 1;
210

211
/* This flag indicates whether tasks in the fork and exit paths should
L
Li Zefan 已提交
212 213 214
 * check for fork/exit handlers to call. This avoids us having to do
 * extra work in the fork/exit path if none of the subsystems need to
 * be called.
215
 */
216
static int need_forkexit_callback __read_mostly;
217

218 219
static struct cftype cgroup_base_files[];

220
static void cgroup_offline_fn(struct work_struct *work);
221
static int cgroup_destroy_locked(struct cgroup *cgrp);
222 223
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add);
224

225
/* convenient tests for these bits */
226
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
227
{
228
	return test_bit(CGRP_DEAD, &cgrp->flags);
229 230
}

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
/**
 * cgroup_is_descendant - test ancestry
 * @cgrp: the cgroup to be tested
 * @ancestor: possible ancestor of @cgrp
 *
 * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
 * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
 * and @ancestor are accessible.
 */
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
{
	while (cgrp) {
		if (cgrp == ancestor)
			return true;
		cgrp = cgrp->parent;
	}
	return false;
}
EXPORT_SYMBOL_GPL(cgroup_is_descendant);
250

251
static int cgroup_is_releasable(const struct cgroup *cgrp)
252 253
{
	const int bits =
254 255 256
		(1 << CGRP_RELEASABLE) |
		(1 << CGRP_NOTIFY_ON_RELEASE);
	return (cgrp->flags & bits) == bits;
257 258
}

259
static int notify_on_release(const struct cgroup *cgrp)
260
{
261
	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
262 263
}

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
/**
 * for_each_subsys - iterate all loaded cgroup subsystems
 * @ss: the iteration cursor
 * @i: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
 *
 * Should be called under cgroup_mutex.
 */
#define for_each_subsys(ss, i)						\
	for ((i) = 0; (i) < CGROUP_SUBSYS_COUNT; (i)++)			\
		if (({ lockdep_assert_held(&cgroup_mutex);		\
		       !((ss) = cgroup_subsys[i]); })) { }		\
		else

/**
 * for_each_builtin_subsys - iterate all built-in cgroup subsystems
 * @ss: the iteration cursor
 * @i: the index of @ss, CGROUP_BUILTIN_SUBSYS_COUNT after reaching the end
 *
 * Bulit-in subsystems are always present and iteration itself doesn't
 * require any synchronization.
 */
#define for_each_builtin_subsys(ss, i)					\
	for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT &&		\
	     (((ss) = cgroup_subsys[i]) || true); (i)++)

289 290 291
/* iterate each subsystem attached to a hierarchy */
#define for_each_root_subsys(root, ss)					\
	list_for_each_entry((ss), &(root)->subsys_list, sibling)
292

293 294 295
/* iterate across the active hierarchies */
#define for_each_active_root(root)					\
	list_for_each_entry((root), &cgroup_roots, root_list)
296

297 298 299 300 301
static inline struct cgroup *__d_cgrp(struct dentry *dentry)
{
	return dentry->d_fsdata;
}

T
Tejun Heo 已提交
302
static inline struct cfent *__d_cfe(struct dentry *dentry)
303 304 305 306
{
	return dentry->d_fsdata;
}

T
Tejun Heo 已提交
307 308 309 310 311
static inline struct cftype *__d_cft(struct dentry *dentry)
{
	return __d_cfe(dentry)->type;
}

312 313 314 315
/**
 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
 * @cgrp: the cgroup to be checked for liveness
 *
T
Tejun Heo 已提交
316 317
 * On success, returns true; the mutex should be later unlocked.  On
 * failure returns false with no lock held.
318
 */
319
static bool cgroup_lock_live_group(struct cgroup *cgrp)
320 321
{
	mutex_lock(&cgroup_mutex);
322
	if (cgroup_is_dead(cgrp)) {
323 324 325 326 327 328
		mutex_unlock(&cgroup_mutex);
		return false;
	}
	return true;
}

329 330 331
/* the list of cgroups eligible for automatic release. Protected by
 * release_list_lock */
static LIST_HEAD(release_list);
332
static DEFINE_RAW_SPINLOCK(release_list_lock);
333 334
static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
335
static void check_for_release(struct cgroup *cgrp);
336

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
/*
 * A cgroup can be associated with multiple css_sets as different tasks may
 * belong to different cgroups on different hierarchies.  In the other
 * direction, a css_set is naturally associated with multiple cgroups.
 * This M:N relationship is represented by the following link structure
 * which exists for each association and allows traversing the associations
 * from both sides.
 */
struct cgrp_cset_link {
	/* the cgroup and css_set this link associates */
	struct cgroup		*cgrp;
	struct css_set		*cset;

	/* list of cgrp_cset_links anchored at cgrp->cset_links */
	struct list_head	cset_link;

	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
	struct list_head	cgrp_link;
355 356 357 358 359 360 361 362 363 364
};

/* The default css_set - used by init and its children prior to any
 * hierarchies being mounted. It contains a pointer to the root state
 * for each subsystem. Also used to anchor the list of css_sets. Not
 * reference-counted, to improve performance when child cgroups
 * haven't been created.
 */

static struct css_set init_css_set;
365
static struct cgrp_cset_link init_cgrp_cset_link;
366

367 368
static int cgroup_init_idr(struct cgroup_subsys *ss,
			   struct cgroup_subsys_state *css);
K
KAMEZAWA Hiroyuki 已提交
369

370 371 372 373 374
/*
 * css_set_lock protects the list of css_set objects, and the chain of
 * tasks off each css_set.  Nests outside task->alloc_lock due to
 * cgroup_task_iter_start().
 */
375 376 377
static DEFINE_RWLOCK(css_set_lock);
static int css_set_count;

378 379 380 381 382
/*
 * hash table for cgroup groups. This improves the performance to find
 * an existing css_set. This hash doesn't (currently) take into
 * account cgroups in empty hierarchies.
 */
383
#define CSS_SET_HASH_BITS	7
384
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
385

386
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
387
{
388
	unsigned long key = 0UL;
389 390
	struct cgroup_subsys *ss;
	int i;
391

392
	for_each_subsys(ss, i)
393 394
		key += (unsigned long)css[i];
	key = (key >> 16) ^ key;
395

396
	return key;
397 398
}

399 400 401 402 403 404
/*
 * We don't maintain the lists running through each css_set to its task
 * until after the first call to cgroup_task_iter_start().  This reduces
 * the fork()/exit() overhead for people who have cgroups compiled into
 * their kernel but not actually in use.
 */
405
static int use_task_css_set_links __read_mostly;
406

407
static void __put_css_set(struct css_set *cset, int taskexit)
408
{
409
	struct cgrp_cset_link *link, *tmp_link;
410

411 412 413 414 415
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * rwlock
	 */
416
	if (atomic_add_unless(&cset->refcount, -1, 1))
417 418
		return;
	write_lock(&css_set_lock);
419
	if (!atomic_dec_and_test(&cset->refcount)) {
420 421 422
		write_unlock(&css_set_lock);
		return;
	}
423

424
	/* This css_set is dead. unlink it and release cgroup refcounts */
425
	hash_del(&cset->hlist);
426 427
	css_set_count--;

428
	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
429
		struct cgroup *cgrp = link->cgrp;
430

431 432
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
L
Li Zefan 已提交
433

434
		/* @cgrp can't go away while we're holding css_set_lock */
T
Tejun Heo 已提交
435
		if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) {
436
			if (taskexit)
437 438
				set_bit(CGRP_RELEASABLE, &cgrp->flags);
			check_for_release(cgrp);
439
		}
440 441

		kfree(link);
442
	}
443 444

	write_unlock(&css_set_lock);
445
	kfree_rcu(cset, rcu_head);
446 447
}

448 449 450
/*
 * refcounted get/put for css_set objects
 */
451
static inline void get_css_set(struct css_set *cset)
452
{
453
	atomic_inc(&cset->refcount);
454 455
}

456
static inline void put_css_set(struct css_set *cset)
457
{
458
	__put_css_set(cset, 0);
459 460
}

461
static inline void put_css_set_taskexit(struct css_set *cset)
462
{
463
	__put_css_set(cset, 1);
464 465
}

466
/**
467
 * compare_css_sets - helper function for find_existing_css_set().
468 469
 * @cset: candidate css_set being tested
 * @old_cset: existing css_set for a task
470 471 472
 * @new_cgrp: cgroup that's being entered by the task
 * @template: desired set of css pointers in css_set (pre-calculated)
 *
L
Li Zefan 已提交
473
 * Returns true if "cset" matches "old_cset" except for the hierarchy
474 475
 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
 */
476 477
static bool compare_css_sets(struct css_set *cset,
			     struct css_set *old_cset,
478 479 480 481 482
			     struct cgroup *new_cgrp,
			     struct cgroup_subsys_state *template[])
{
	struct list_head *l1, *l2;

483
	if (memcmp(template, cset->subsys, sizeof(cset->subsys))) {
484 485 486 487 488 489 490 491 492 493 494 495 496
		/* Not all subsystems matched */
		return false;
	}

	/*
	 * Compare cgroup pointers in order to distinguish between
	 * different cgroups in heirarchies with no subsystems. We
	 * could get by with just this check alone (and skip the
	 * memcmp above) but on most setups the memcmp check will
	 * avoid the need for this more expensive check on almost all
	 * candidates.
	 */

497 498
	l1 = &cset->cgrp_links;
	l2 = &old_cset->cgrp_links;
499
	while (1) {
500
		struct cgrp_cset_link *link1, *link2;
501
		struct cgroup *cgrp1, *cgrp2;
502 503 504 505

		l1 = l1->next;
		l2 = l2->next;
		/* See if we reached the end - both lists are equal length. */
506 507
		if (l1 == &cset->cgrp_links) {
			BUG_ON(l2 != &old_cset->cgrp_links);
508 509
			break;
		} else {
510
			BUG_ON(l2 == &old_cset->cgrp_links);
511 512
		}
		/* Locate the cgroups associated with these links. */
513 514 515 516
		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
		cgrp1 = link1->cgrp;
		cgrp2 = link2->cgrp;
517
		/* Hierarchies should be linked in the same order. */
518
		BUG_ON(cgrp1->root != cgrp2->root);
519 520 521 522 523 524 525 526

		/*
		 * If this hierarchy is the hierarchy of the cgroup
		 * that's changing, then we need to check that this
		 * css_set points to the new cgroup; if it's any other
		 * hierarchy, then this css_set should point to the
		 * same cgroup as the old css_set.
		 */
527 528
		if (cgrp1->root == new_cgrp->root) {
			if (cgrp1 != new_cgrp)
529 530
				return false;
		} else {
531
			if (cgrp1 != cgrp2)
532 533 534 535 536 537
				return false;
		}
	}
	return true;
}

538 539 540 541 542
/**
 * find_existing_css_set - init css array and find the matching css_set
 * @old_cset: the css_set that we're using before the cgroup transition
 * @cgrp: the cgroup that we're moving into
 * @template: out param for the new set of csses, should be clear on entry
543
 */
544 545 546
static struct css_set *find_existing_css_set(struct css_set *old_cset,
					struct cgroup *cgrp,
					struct cgroup_subsys_state *template[])
547
{
548
	struct cgroupfs_root *root = cgrp->root;
549
	struct cgroup_subsys *ss;
550
	struct css_set *cset;
551
	unsigned long key;
552
	int i;
553

B
Ben Blum 已提交
554 555 556 557 558
	/*
	 * Build the set of subsystem state objects that we want to see in the
	 * new css_set. while subsystems can change globally, the entries here
	 * won't change, so no need for locking.
	 */
559
	for_each_subsys(ss, i) {
560
		if (root->subsys_mask & (1UL << i)) {
561 562 563
			/* Subsystem is in this hierarchy. So we want
			 * the subsystem state from the new
			 * cgroup */
564
			template[i] = cgrp->subsys[i];
565 566 567
		} else {
			/* Subsystem is not in this hierarchy, so we
			 * don't want to change the subsystem state */
568
			template[i] = old_cset->subsys[i];
569 570 571
		}
	}

572
	key = css_set_hash(template);
573 574
	hash_for_each_possible(css_set_table, cset, hlist, key) {
		if (!compare_css_sets(cset, old_cset, cgrp, template))
575 576 577
			continue;

		/* This css_set matches what we need */
578
		return cset;
579
	}
580 581 582 583 584

	/* No existing cgroup group matched */
	return NULL;
}

585
static void free_cgrp_cset_links(struct list_head *links_to_free)
586
{
587
	struct cgrp_cset_link *link, *tmp_link;
588

589 590
	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
		list_del(&link->cset_link);
591 592 593 594
		kfree(link);
	}
}

595 596 597 598 599 600 601
/**
 * allocate_cgrp_cset_links - allocate cgrp_cset_links
 * @count: the number of links to allocate
 * @tmp_links: list_head the allocated links are put on
 *
 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
 * through ->cset_link.  Returns 0 on success or -errno.
602
 */
603
static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
604
{
605
	struct cgrp_cset_link *link;
606
	int i;
607 608 609

	INIT_LIST_HEAD(tmp_links);

610
	for (i = 0; i < count; i++) {
611
		link = kzalloc(sizeof(*link), GFP_KERNEL);
612
		if (!link) {
613
			free_cgrp_cset_links(tmp_links);
614 615
			return -ENOMEM;
		}
616
		list_add(&link->cset_link, tmp_links);
617 618 619 620
	}
	return 0;
}

621 622
/**
 * link_css_set - a helper function to link a css_set to a cgroup
623
 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
624
 * @cset: the css_set to be linked
625 626
 * @cgrp: the destination cgroup
 */
627 628
static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
			 struct cgroup *cgrp)
629
{
630
	struct cgrp_cset_link *link;
631

632 633 634
	BUG_ON(list_empty(tmp_links));
	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
	link->cset = cset;
635
	link->cgrp = cgrp;
636
	list_move(&link->cset_link, &cgrp->cset_links);
637 638 639 640
	/*
	 * Always add links to the tail of the list so that the list
	 * is sorted by order of hierarchy creation
	 */
641
	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
642 643
}

644 645 646 647 648 649 650
/**
 * find_css_set - return a new css_set with one cgroup updated
 * @old_cset: the baseline css_set
 * @cgrp: the cgroup to be updated
 *
 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
 * substituted into the appropriate hierarchy.
651
 */
652 653
static struct css_set *find_css_set(struct css_set *old_cset,
				    struct cgroup *cgrp)
654
{
655
	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
656
	struct css_set *cset;
657 658
	struct list_head tmp_links;
	struct cgrp_cset_link *link;
659
	unsigned long key;
660

661 662
	lockdep_assert_held(&cgroup_mutex);

663 664
	/* First see if we already have a cgroup group that matches
	 * the desired set */
665
	read_lock(&css_set_lock);
666 667 668
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
		get_css_set(cset);
669
	read_unlock(&css_set_lock);
670

671 672
	if (cset)
		return cset;
673

674
	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
675
	if (!cset)
676 677
		return NULL;

678
	/* Allocate all the cgrp_cset_link objects that we'll need */
679
	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
680
		kfree(cset);
681 682 683
		return NULL;
	}

684
	atomic_set(&cset->refcount, 1);
685
	INIT_LIST_HEAD(&cset->cgrp_links);
686 687
	INIT_LIST_HEAD(&cset->tasks);
	INIT_HLIST_NODE(&cset->hlist);
688 689 690

	/* Copy the set of subsystem state objects generated in
	 * find_existing_css_set() */
691
	memcpy(cset->subsys, template, sizeof(cset->subsys));
692 693 694

	write_lock(&css_set_lock);
	/* Add reference counts and links from the new css_set. */
695
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
696
		struct cgroup *c = link->cgrp;
697

698 699
		if (c->root == cgrp->root)
			c = cgrp;
700
		link_css_set(&tmp_links, cset, c);
701
	}
702

703
	BUG_ON(!list_empty(&tmp_links));
704 705

	css_set_count++;
706 707

	/* Add this cgroup group to the hash table */
708 709
	key = css_set_hash(cset->subsys);
	hash_add(css_set_table, &cset->hlist, key);
710

711 712
	write_unlock(&css_set_lock);

713
	return cset;
714 715
}

716 717 718 719 720 721 722
/*
 * Return the cgroup for "task" from the given hierarchy. Must be
 * called with cgroup_mutex held.
 */
static struct cgroup *task_cgroup_from_root(struct task_struct *task,
					    struct cgroupfs_root *root)
{
723
	struct css_set *cset;
724 725 726 727 728 729 730 731 732
	struct cgroup *res = NULL;

	BUG_ON(!mutex_is_locked(&cgroup_mutex));
	read_lock(&css_set_lock);
	/*
	 * No need to lock the task - since we hold cgroup_mutex the
	 * task can't change groups, so the only thing that can happen
	 * is that it exits and its css is set back to init_css_set.
	 */
733
	cset = task_css_set(task);
734
	if (cset == &init_css_set) {
735 736
		res = &root->top_cgroup;
	} else {
737 738 739
		struct cgrp_cset_link *link;

		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
740
			struct cgroup *c = link->cgrp;
741

742 743 744 745 746 747 748 749 750 751 752
			if (c->root == root) {
				res = c;
				break;
			}
		}
	}
	read_unlock(&css_set_lock);
	BUG_ON(!res);
	return res;
}

753 754 755 756 757 758 759 760 761 762
/*
 * There is one global cgroup mutex. We also require taking
 * task_lock() when dereferencing a task's cgroup subsys pointers.
 * See "The task_lock() exception", at the end of this comment.
 *
 * A task must hold cgroup_mutex to modify cgroups.
 *
 * Any task can increment and decrement the count field without lock.
 * So in general, code holding cgroup_mutex can't rely on the count
 * field not changing.  However, if the count goes to zero, then only
763
 * cgroup_attach_task() can increment it again.  Because a count of zero
764 765 766 767 768 769 770 771 772 773 774 775 776
 * means that no tasks are currently attached, therefore there is no
 * way a task attached to that cgroup can fork (the other way to
 * increment the count).  So code holding cgroup_mutex can safely
 * assume that if the count is zero, it will stay zero. Similarly, if
 * a task holds cgroup_mutex on a cgroup with zero count, it
 * knows that the cgroup won't be removed, as cgroup_rmdir()
 * needs that mutex.
 *
 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
 * (usually) take cgroup_mutex.  These are the two most performance
 * critical pieces of code here.  The exception occurs on cgroup_exit(),
 * when a task in a notify_on_release cgroup exits.  Then cgroup_mutex
 * is taken, and if the cgroup count is zero, a usermode call made
L
Li Zefan 已提交
777 778
 * to the release agent with the name of the cgroup (path relative to
 * the root of cgroup file system) as the argument.
779 780 781 782 783 784 785 786 787 788 789
 *
 * A cgroup can only be deleted if both its 'count' of using tasks
 * is zero, and its list of 'children' cgroups is empty.  Since all
 * tasks in the system use _some_ cgroup, and since there is always at
 * least one task in the system (init, pid == 1), therefore, top_cgroup
 * always has either children cgroups and/or using tasks.  So we don't
 * need a special hack to ensure that top_cgroup cannot be deleted.
 *
 *	The task_lock() exception
 *
 * The need for this exception arises from the action of
790
 * cgroup_attach_task(), which overwrites one task's cgroup pointer with
L
Li Zefan 已提交
791
 * another.  It does so using cgroup_mutex, however there are
792 793 794
 * several performance critical places that need to reference
 * task->cgroup without the expense of grabbing a system global
 * mutex.  Therefore except as noted below, when dereferencing or, as
795
 * in cgroup_attach_task(), modifying a task's cgroup pointer we use
796 797 798 799
 * task_lock(), which acts on a spinlock (task->alloc_lock) already in
 * the task_struct routinely used for such matters.
 *
 * P.S.  One more locking exception.  RCU is used to guard the
800
 * update of a tasks cgroup pointer by cgroup_attach_task()
801 802 803 804 805 806 807 808 809
 */

/*
 * A couple of forward declarations required, due to cyclic reference loop:
 * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
 * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
 * -> cgroup_mkdir.
 */

810
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
A
Al Viro 已提交
811
static struct dentry *cgroup_lookup(struct inode *, struct dentry *, unsigned int);
812
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
813
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
814
static const struct inode_operations cgroup_dir_inode_operations;
815
static const struct file_operations proc_cgroupstats_operations;
816 817

static struct backing_dev_info cgroup_backing_dev_info = {
818
	.name		= "cgroup",
819
	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
820
};
821

K
KAMEZAWA Hiroyuki 已提交
822 823 824
static int alloc_css_id(struct cgroup_subsys *ss,
			struct cgroup *parent, struct cgroup *child);

A
Al Viro 已提交
825
static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
826 827 828 829
{
	struct inode *inode = new_inode(sb);

	if (inode) {
830
		inode->i_ino = get_next_ino();
831
		inode->i_mode = mode;
832 833
		inode->i_uid = current_fsuid();
		inode->i_gid = current_fsgid();
834 835 836 837 838 839
		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
		inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
	}
	return inode;
}

840 841 842 843 844 845 846 847 848 849 850
static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry)
{
	struct cgroup_name *name;

	name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL);
	if (!name)
		return NULL;
	strcpy(name->name, dentry->d_name.name);
	return name;
}

851 852
static void cgroup_free_fn(struct work_struct *work)
{
853
	struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
854 855 856 857 858 859
	struct cgroup_subsys *ss;

	mutex_lock(&cgroup_mutex);
	/*
	 * Release the subsystem state objects.
	 */
860 861 862 863 864
	for_each_root_subsys(cgrp->root, ss) {
		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];

		ss->css_free(css);
	}
865 866 867 868

	cgrp->root->number_of_cgroups--;
	mutex_unlock(&cgroup_mutex);

869 870 871 872 873 874 875
	/*
	 * We get a ref to the parent's dentry, and put the ref when
	 * this cgroup is being freed, so it's guaranteed that the
	 * parent won't be destroyed before its children.
	 */
	dput(cgrp->parent->dentry);

876 877
	/*
	 * Drop the active superblock reference that we took when we
878 879
	 * created the cgroup. This will free cgrp->root, if we are
	 * holding the last reference to @sb.
880 881 882 883 884 885 886 887 888 889 890
	 */
	deactivate_super(cgrp->root->sb);

	/*
	 * if we're getting rid of the cgroup, refcount should ensure
	 * that there are no pidlists left.
	 */
	BUG_ON(!list_empty(&cgrp->pidlists));

	simple_xattrs_free(&cgrp->xattrs);

891
	kfree(rcu_dereference_raw(cgrp->name));
892 893 894 895 896 897 898
	kfree(cgrp);
}

static void cgroup_free_rcu(struct rcu_head *head)
{
	struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);

899 900
	INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
	schedule_work(&cgrp->destroy_work);
901 902
}

903 904 905 906
static void cgroup_diput(struct dentry *dentry, struct inode *inode)
{
	/* is dentry a directory ? if so, kfree() associated cgroup */
	if (S_ISDIR(inode->i_mode)) {
907
		struct cgroup *cgrp = dentry->d_fsdata;
908

909
		BUG_ON(!(cgroup_is_dead(cgrp)));
910
		call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
T
Tejun Heo 已提交
911 912 913 914 915 916 917
	} else {
		struct cfent *cfe = __d_cfe(dentry);
		struct cgroup *cgrp = dentry->d_parent->d_fsdata;

		WARN_ONCE(!list_empty(&cfe->node) &&
			  cgrp != &cgrp->root->top_cgroup,
			  "cfe still linked for %s\n", cfe->type->name);
L
Li Zefan 已提交
918
		simple_xattrs_free(&cfe->xattrs);
T
Tejun Heo 已提交
919
		kfree(cfe);
920 921 922 923
	}
	iput(inode);
}

924 925 926 927 928
static int cgroup_delete(const struct dentry *d)
{
	return 1;
}

929 930 931 932 933 934 935 936 937
static void remove_dir(struct dentry *d)
{
	struct dentry *parent = dget(d->d_parent);

	d_delete(d);
	simple_rmdir(parent->d_inode, d);
	dput(parent);
}

938
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
T
Tejun Heo 已提交
939 940 941 942 943 944
{
	struct cfent *cfe;

	lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
	lockdep_assert_held(&cgroup_mutex);

945 946 947 948
	/*
	 * If we're doing cleanup due to failure of cgroup_create(),
	 * the corresponding @cfe may not exist.
	 */
T
Tejun Heo 已提交
949 950 951 952 953 954 955 956
	list_for_each_entry(cfe, &cgrp->files, node) {
		struct dentry *d = cfe->dentry;

		if (cft && cfe->type != cft)
			continue;

		dget(d);
		d_delete(d);
957
		simple_unlink(cgrp->dentry->d_inode, d);
T
Tejun Heo 已提交
958 959 960
		list_del_init(&cfe->node);
		dput(d);

961
		break;
962
	}
T
Tejun Heo 已提交
963 964
}

965
/**
966
 * cgroup_clear_dir - remove subsys files in a cgroup directory
967
 * @cgrp: target cgroup
968 969
 * @subsys_mask: mask of the subsystem ids whose files should be removed
 */
970
static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
T
Tejun Heo 已提交
971
{
972
	struct cgroup_subsys *ss;
973
	int i;
T
Tejun Heo 已提交
974

975
	for_each_subsys(ss, i) {
976
		struct cftype_set *set;
977 978

		if (!test_bit(i, &subsys_mask))
979 980
			continue;
		list_for_each_entry(set, &ss->cftsets, node)
981
			cgroup_addrm_files(cgrp, set->cfts, false);
982
	}
983 984 985 986 987 988 989
}

/*
 * NOTE : the dentry must have been dget()'ed
 */
static void cgroup_d_remove_dir(struct dentry *dentry)
{
N
Nick Piggin 已提交
990
	struct dentry *parent;
991

N
Nick Piggin 已提交
992 993
	parent = dentry->d_parent;
	spin_lock(&parent->d_lock);
994
	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
995
	list_del_init(&dentry->d_u.d_child);
N
Nick Piggin 已提交
996 997
	spin_unlock(&dentry->d_lock);
	spin_unlock(&parent->d_lock);
998 999 1000
	remove_dir(dentry);
}

B
Ben Blum 已提交
1001
/*
B
Ben Blum 已提交
1002 1003 1004
 * Call with cgroup_mutex held. Drops reference counts on modules, including
 * any duplicate ones that parse_cgroupfs_options took. If this function
 * returns an error, no reference counts are touched.
B
Ben Blum 已提交
1005
 */
1006
static int rebind_subsystems(struct cgroupfs_root *root,
1007
			     unsigned long added_mask, unsigned removed_mask)
1008
{
1009
	struct cgroup *cgrp = &root->top_cgroup;
1010
	struct cgroup_subsys *ss;
1011
	unsigned long pinned = 0;
1012
	int i, ret;
1013

B
Ben Blum 已提交
1014
	BUG_ON(!mutex_is_locked(&cgroup_mutex));
T
Tejun Heo 已提交
1015
	BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
B
Ben Blum 已提交
1016

1017
	/* Check that any added subsystems are currently free */
1018
	for_each_subsys(ss, i) {
1019
		if (!(added_mask & (1 << i)))
1020
			continue;
1021

1022
		/* is the subsystem mounted elsewhere? */
1023
		if (ss->root != &cgroup_dummy_root) {
1024 1025 1026 1027 1028 1029 1030 1031
			ret = -EBUSY;
			goto out_put;
		}

		/* pin the module */
		if (!try_module_get(ss->module)) {
			ret = -ENOENT;
			goto out_put;
1032
		}
1033 1034 1035 1036 1037 1038 1039
		pinned |= 1 << i;
	}

	/* subsys could be missing if unloaded between parsing and here */
	if (added_mask != pinned) {
		ret = -ENOENT;
		goto out_put;
1040 1041
	}

1042 1043
	ret = cgroup_populate_dir(cgrp, added_mask);
	if (ret)
1044
		goto out_put;
1045 1046 1047 1048 1049 1050 1051

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
	cgroup_clear_dir(cgrp, removed_mask);

1052
	for_each_subsys(ss, i) {
1053
		unsigned long bit = 1UL << i;
1054

1055
		if (bit & added_mask) {
1056
			/* We're binding this subsystem to this hierarchy */
1057
			BUG_ON(cgrp->subsys[i]);
1058 1059
			BUG_ON(!cgroup_dummy_top->subsys[i]);
			BUG_ON(cgroup_dummy_top->subsys[i]->cgroup != cgroup_dummy_top);
1060

1061
			cgrp->subsys[i] = cgroup_dummy_top->subsys[i];
1062
			cgrp->subsys[i]->cgroup = cgrp;
1063
			list_move(&ss->sibling, &root->subsys_list);
1064
			ss->root = root;
1065
			if (ss->bind)
1066
				ss->bind(cgrp->subsys[i]);
1067

B
Ben Blum 已提交
1068
			/* refcount was already taken, and we're keeping it */
1069
			root->subsys_mask |= bit;
1070
		} else if (bit & removed_mask) {
1071
			/* We're removing this subsystem */
1072
			BUG_ON(cgrp->subsys[i] != cgroup_dummy_top->subsys[i]);
1073
			BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
1074

1075
			if (ss->bind)
1076
				ss->bind(cgroup_dummy_top->subsys[i]);
1077
			cgroup_dummy_top->subsys[i]->cgroup = cgroup_dummy_top;
1078
			cgrp->subsys[i] = NULL;
1079 1080
			cgroup_subsys[i]->root = &cgroup_dummy_root;
			list_move(&ss->sibling, &cgroup_dummy_root.subsys_list);
1081

B
Ben Blum 已提交
1082 1083
			/* subsystem is now free - drop reference on module */
			module_put(ss->module);
1084
			root->subsys_mask &= ~bit;
1085 1086 1087
		}
	}

1088 1089 1090 1091 1092 1093
	/*
	 * Mark @root has finished binding subsystems.  @root->subsys_mask
	 * now matches the bound subsystems.
	 */
	root->flags |= CGRP_ROOT_SUBSYS_BOUND;

1094
	return 0;
1095 1096 1097 1098 1099 1100

out_put:
	for_each_subsys(ss, i)
		if (pinned & (1 << i))
			module_put(ss->module);
	return ret;
1101 1102
}

1103
static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
1104
{
1105
	struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
1106 1107
	struct cgroup_subsys *ss;

T
Tejun Heo 已提交
1108
	mutex_lock(&cgroup_root_mutex);
1109
	for_each_root_subsys(root, ss)
1110
		seq_printf(seq, ",%s", ss->name);
1111 1112
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
		seq_puts(seq, ",sane_behavior");
1113
	if (root->flags & CGRP_ROOT_NOPREFIX)
1114
		seq_puts(seq, ",noprefix");
1115
	if (root->flags & CGRP_ROOT_XATTR)
A
Aristeu Rozanski 已提交
1116
		seq_puts(seq, ",xattr");
1117 1118
	if (strlen(root->release_agent_path))
		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
1119
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags))
1120
		seq_puts(seq, ",clone_children");
1121 1122
	if (strlen(root->name))
		seq_printf(seq, ",name=%s", root->name);
T
Tejun Heo 已提交
1123
	mutex_unlock(&cgroup_root_mutex);
1124 1125 1126 1127
	return 0;
}

struct cgroup_sb_opts {
1128
	unsigned long subsys_mask;
1129
	unsigned long flags;
1130
	char *release_agent;
1131
	bool cpuset_clone_children;
1132
	char *name;
1133 1134
	/* User explicitly requested empty subsystem */
	bool none;
1135 1136

	struct cgroupfs_root *new_root;
1137

1138 1139
};

B
Ben Blum 已提交
1140
/*
1141 1142 1143 1144
 * Convert a hierarchy specifier into a bitmask of subsystems and
 * flags. Call with cgroup_mutex held to protect the cgroup_subsys[]
 * array. This function takes refcounts on subsystems to be used, unless it
 * returns error, in which case no refcounts are taken.
B
Ben Blum 已提交
1145
 */
B
Ben Blum 已提交
1146
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1147
{
1148 1149
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
1150
	unsigned long mask = (unsigned long)-1;
1151 1152
	struct cgroup_subsys *ss;
	int i;
1153

B
Ben Blum 已提交
1154 1155
	BUG_ON(!mutex_is_locked(&cgroup_mutex));

1156 1157 1158
#ifdef CONFIG_CPUSETS
	mask = ~(1UL << cpuset_subsys_id);
#endif
1159

1160
	memset(opts, 0, sizeof(*opts));
1161 1162 1163 1164

	while ((token = strsep(&o, ",")) != NULL) {
		if (!*token)
			return -EINVAL;
1165
		if (!strcmp(token, "none")) {
1166 1167
			/* Explicitly have no subsystems */
			opts->none = true;
1168 1169 1170 1171 1172 1173 1174 1175 1176
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
1177 1178 1179 1180
		if (!strcmp(token, "__DEVEL__sane_behavior")) {
			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
			continue;
		}
1181
		if (!strcmp(token, "noprefix")) {
1182
			opts->flags |= CGRP_ROOT_NOPREFIX;
1183 1184 1185
			continue;
		}
		if (!strcmp(token, "clone_children")) {
1186
			opts->cpuset_clone_children = true;
1187 1188
			continue;
		}
A
Aristeu Rozanski 已提交
1189
		if (!strcmp(token, "xattr")) {
1190
			opts->flags |= CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
1191 1192
			continue;
		}
1193
		if (!strncmp(token, "release_agent=", 14)) {
1194 1195 1196
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
1197
			opts->release_agent =
1198
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1199 1200
			if (!opts->release_agent)
				return -ENOMEM;
1201 1202 1203
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
1221
					      MAX_CGROUP_ROOT_NAMELEN - 1,
1222 1223 1224
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
1225 1226 1227 1228

			continue;
		}

1229
		for_each_subsys(ss, i) {
1230 1231 1232 1233 1234 1235 1236 1237
			if (strcmp(token, ss->name))
				continue;
			if (ss->disabled)
				continue;

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
1238
			set_bit(i, &opts->subsys_mask);
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

	/*
	 * If the 'all' option was specified select all the subsystems,
1249 1250
	 * otherwise if 'none', 'name=' and a subsystem name options
	 * were not specified, let's default to 'all'
1251
	 */
1252 1253 1254 1255
	if (all_ss || (!one_ss && !opts->none && !opts->name))
		for_each_subsys(ss, i)
			if (!ss->disabled)
				set_bit(i, &opts->subsys_mask);
1256

1257 1258
	/* Consistency checks */

1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
		pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");

		if (opts->flags & CGRP_ROOT_NOPREFIX) {
			pr_err("cgroup: sane_behavior: noprefix is not allowed\n");
			return -EINVAL;
		}

		if (opts->cpuset_clone_children) {
			pr_err("cgroup: sane_behavior: clone_children is not allowed\n");
			return -EINVAL;
		}
	}

1273 1274 1275 1276 1277
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
1278
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1279 1280
		return -EINVAL;

1281 1282

	/* Can't specify "none" and some subsystems */
1283
	if (opts->subsys_mask && opts->none)
1284 1285 1286 1287 1288 1289
		return -EINVAL;

	/*
	 * We either have to specify by name or by subsystems. (So all
	 * empty hierarchies must have a name).
	 */
1290
	if (!opts->subsys_mask && !opts->name)
1291 1292 1293 1294 1295 1296 1297 1298 1299
		return -EINVAL;

	return 0;
}

static int cgroup_remount(struct super_block *sb, int *flags, char *data)
{
	int ret = 0;
	struct cgroupfs_root *root = sb->s_fs_info;
1300
	struct cgroup *cgrp = &root->top_cgroup;
1301
	struct cgroup_sb_opts opts;
1302
	unsigned long added_mask, removed_mask;
1303

1304 1305 1306 1307 1308
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
		pr_err("cgroup: sane_behavior: remount is not allowed\n");
		return -EINVAL;
	}

1309
	mutex_lock(&cgrp->dentry->d_inode->i_mutex);
1310
	mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1311
	mutex_lock(&cgroup_root_mutex);
1312 1313 1314 1315 1316 1317

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

1318
	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1319 1320 1321
		pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n",
			   task_tgid_nr(current), current->comm);

1322 1323
	added_mask = opts.subsys_mask & ~root->subsys_mask;
	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1324

B
Ben Blum 已提交
1325
	/* Don't allow flags or name to change at remount */
1326
	if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) ||
B
Ben Blum 已提交
1327
	    (opts.name && strcmp(opts.name, root->name))) {
1328 1329 1330
		pr_err("cgroup: option or name mismatch, new: 0x%lx \"%s\", old: 0x%lx \"%s\"\n",
		       opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "",
		       root->flags & CGRP_ROOT_OPTION_MASK, root->name);
1331 1332 1333 1334
		ret = -EINVAL;
		goto out_unlock;
	}

1335 1336 1337 1338 1339 1340
	/* remounting is not allowed for populated hierarchies */
	if (root->number_of_cgroups > 1) {
		ret = -EBUSY;
		goto out_unlock;
	}

1341
	ret = rebind_subsystems(root, added_mask, removed_mask);
1342
	if (ret)
1343
		goto out_unlock;
1344

1345 1346
	if (opts.release_agent)
		strcpy(root->release_agent_path, opts.release_agent);
1347
 out_unlock:
1348
	kfree(opts.release_agent);
1349
	kfree(opts.name);
T
Tejun Heo 已提交
1350
	mutex_unlock(&cgroup_root_mutex);
1351
	mutex_unlock(&cgroup_mutex);
1352
	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
1353 1354 1355
	return ret;
}

1356
static const struct super_operations cgroup_ops = {
1357 1358 1359 1360 1361 1362
	.statfs = simple_statfs,
	.drop_inode = generic_delete_inode,
	.show_options = cgroup_show_options,
	.remount_fs = cgroup_remount,
};

1363 1364 1365 1366
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
	INIT_LIST_HEAD(&cgrp->sibling);
	INIT_LIST_HEAD(&cgrp->children);
T
Tejun Heo 已提交
1367
	INIT_LIST_HEAD(&cgrp->files);
1368
	INIT_LIST_HEAD(&cgrp->cset_links);
1369
	INIT_LIST_HEAD(&cgrp->release_list);
1370 1371
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
T
Tejun Heo 已提交
1372
	cgrp->dummy_css.cgroup = cgrp;
1373 1374
	INIT_LIST_HEAD(&cgrp->event_list);
	spin_lock_init(&cgrp->event_list_lock);
A
Aristeu Rozanski 已提交
1375
	simple_xattrs_init(&cgrp->xattrs);
1376
}
1377

1378 1379
static void init_cgroup_root(struct cgroupfs_root *root)
{
1380
	struct cgroup *cgrp = &root->top_cgroup;
1381

1382 1383 1384
	INIT_LIST_HEAD(&root->subsys_list);
	INIT_LIST_HEAD(&root->root_list);
	root->number_of_cgroups = 1;
1385
	cgrp->root = root;
1386
	RCU_INIT_POINTER(cgrp->name, &root_cgroup_name);
1387
	init_cgroup_housekeeping(cgrp);
1388
	idr_init(&root->cgroup_idr);
1389 1390
}

1391
static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end)
1392
{
1393
	int id;
1394

T
Tejun Heo 已提交
1395 1396 1397
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&cgroup_root_mutex);

1398 1399
	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end,
			      GFP_KERNEL);
1400 1401 1402 1403
	if (id < 0)
		return id;

	root->hierarchy_id = id;
1404 1405 1406 1407 1408
	return 0;
}

static void cgroup_exit_root_id(struct cgroupfs_root *root)
{
T
Tejun Heo 已提交
1409 1410 1411
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&cgroup_root_mutex);

1412
	if (root->hierarchy_id) {
1413
		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
1414 1415
		root->hierarchy_id = 0;
	}
1416 1417
}

1418 1419
static int cgroup_test_super(struct super_block *sb, void *data)
{
1420
	struct cgroup_sb_opts *opts = data;
1421 1422
	struct cgroupfs_root *root = sb->s_fs_info;

1423 1424 1425
	/* If we asked for a name then it must match */
	if (opts->name && strcmp(opts->name, root->name))
		return 0;
1426

1427 1428 1429 1430
	/*
	 * If we asked for subsystems (or explicitly for no
	 * subsystems) then they must match
	 */
1431 1432
	if ((opts->subsys_mask || opts->none)
	    && (opts->subsys_mask != root->subsys_mask))
1433 1434 1435 1436 1437
		return 0;

	return 1;
}

1438 1439 1440 1441
static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
{
	struct cgroupfs_root *root;

1442
	if (!opts->subsys_mask && !opts->none)
1443 1444 1445 1446 1447 1448 1449
		return NULL;

	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root)
		return ERR_PTR(-ENOMEM);

	init_cgroup_root(root);
1450

1451 1452 1453 1454 1455 1456 1457 1458
	/*
	 * We need to set @root->subsys_mask now so that @root can be
	 * matched by cgroup_test_super() before it finishes
	 * initialization; otherwise, competing mounts with the same
	 * options may try to bind the same subsystems instead of waiting
	 * for the first one leading to unexpected mount errors.
	 * SUBSYS_BOUND will be set once actual binding is complete.
	 */
1459
	root->subsys_mask = opts->subsys_mask;
1460 1461 1462 1463 1464
	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
1465 1466
	if (opts->cpuset_clone_children)
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags);
1467 1468 1469
	return root;
}

1470
static void cgroup_free_root(struct cgroupfs_root *root)
1471
{
1472 1473 1474
	if (root) {
		/* hierarhcy ID shoulid already have been released */
		WARN_ON_ONCE(root->hierarchy_id);
1475

1476
		idr_destroy(&root->cgroup_idr);
1477 1478
		kfree(root);
	}
1479 1480
}

1481 1482 1483
static int cgroup_set_super(struct super_block *sb, void *data)
{
	int ret;
1484 1485 1486 1487 1488 1489
	struct cgroup_sb_opts *opts = data;

	/* If we don't have a new root, we can't set up a new sb */
	if (!opts->new_root)
		return -EINVAL;

1490
	BUG_ON(!opts->subsys_mask && !opts->none);
1491 1492 1493 1494 1495

	ret = set_anon_super(sb, NULL);
	if (ret)
		return ret;

1496 1497
	sb->s_fs_info = opts->new_root;
	opts->new_root->sb = sb;
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508

	sb->s_blocksize = PAGE_CACHE_SIZE;
	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
	sb->s_magic = CGROUP_SUPER_MAGIC;
	sb->s_op = &cgroup_ops;

	return 0;
}

static int cgroup_get_rootdir(struct super_block *sb)
{
A
Al Viro 已提交
1509 1510
	static const struct dentry_operations cgroup_dops = {
		.d_iput = cgroup_diput,
1511
		.d_delete = cgroup_delete,
A
Al Viro 已提交
1512 1513
	};

1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
	struct inode *inode =
		cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);

	if (!inode)
		return -ENOMEM;

	inode->i_fop = &simple_dir_operations;
	inode->i_op = &cgroup_dir_inode_operations;
	/* directories start off with i_nlink == 2 (for "." entry) */
	inc_nlink(inode);
1524 1525
	sb->s_root = d_make_root(inode);
	if (!sb->s_root)
1526
		return -ENOMEM;
A
Al Viro 已提交
1527 1528
	/* for everything else we want ->d_op set */
	sb->s_d_op = &cgroup_dops;
1529 1530 1531
	return 0;
}

A
Al Viro 已提交
1532
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1533
			 int flags, const char *unused_dev_name,
A
Al Viro 已提交
1534
			 void *data)
1535 1536
{
	struct cgroup_sb_opts opts;
1537
	struct cgroupfs_root *root;
1538 1539
	int ret = 0;
	struct super_block *sb;
1540
	struct cgroupfs_root *new_root;
1541
	struct list_head tmp_links;
T
Tejun Heo 已提交
1542
	struct inode *inode;
1543
	const struct cred *cred;
1544 1545

	/* First find the desired set of subsystems */
B
Ben Blum 已提交
1546
	mutex_lock(&cgroup_mutex);
1547
	ret = parse_cgroupfs_options(data, &opts);
B
Ben Blum 已提交
1548
	mutex_unlock(&cgroup_mutex);
1549 1550
	if (ret)
		goto out_err;
1551

1552 1553 1554 1555 1556 1557 1558
	/*
	 * Allocate a new cgroup root. We may not need it if we're
	 * reusing an existing hierarchy.
	 */
	new_root = cgroup_root_from_opts(&opts);
	if (IS_ERR(new_root)) {
		ret = PTR_ERR(new_root);
1559
		goto out_err;
1560
	}
1561
	opts.new_root = new_root;
1562

1563
	/* Locate an existing or new sb for this hierarchy */
D
David Howells 已提交
1564
	sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts);
1565
	if (IS_ERR(sb)) {
1566
		ret = PTR_ERR(sb);
1567
		cgroup_free_root(opts.new_root);
1568
		goto out_err;
1569 1570
	}

1571 1572 1573 1574
	root = sb->s_fs_info;
	BUG_ON(!root);
	if (root == opts.new_root) {
		/* We used the new root structure, so this is a new hierarchy */
1575
		struct cgroup *root_cgrp = &root->top_cgroup;
1576
		struct cgroupfs_root *existing_root;
1577
		int i;
1578
		struct css_set *cset;
1579 1580 1581 1582 1583 1584

		BUG_ON(sb->s_root != NULL);

		ret = cgroup_get_rootdir(sb);
		if (ret)
			goto drop_new_super;
1585
		inode = sb->s_root->d_inode;
1586

1587
		mutex_lock(&inode->i_mutex);
1588
		mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1589
		mutex_lock(&cgroup_root_mutex);
1590

1591 1592 1593 1594 1595
		root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
					   0, 1, GFP_KERNEL);
		if (root_cgrp->id < 0)
			goto unlock_drop;

T
Tejun Heo 已提交
1596 1597 1598 1599 1600 1601
		/* Check for name clashes with existing mounts */
		ret = -EBUSY;
		if (strlen(root->name))
			for_each_active_root(existing_root)
				if (!strcmp(existing_root->name, root->name))
					goto unlock_drop;
1602

1603 1604 1605 1606 1607 1608 1609
		/*
		 * We're accessing css_set_count without locking
		 * css_set_lock here, but that's OK - it can only be
		 * increased by someone holding cgroup_lock, and
		 * that's us. The worst that can happen is that we
		 * have some link structures left over
		 */
1610
		ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
T
Tejun Heo 已提交
1611 1612
		if (ret)
			goto unlock_drop;
1613

1614 1615
		/* ID 0 is reserved for dummy root, 1 for unified hierarchy */
		ret = cgroup_init_root_id(root, 2, 0);
1616 1617 1618
		if (ret)
			goto unlock_drop;

1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
		sb->s_root->d_fsdata = root_cgrp;
		root_cgrp->dentry = sb->s_root;

		/*
		 * We're inside get_sb() and will call lookup_one_len() to
		 * create the root files, which doesn't work if SELinux is
		 * in use.  The following cred dancing somehow works around
		 * it.  See 2ce9738ba ("cgroupfs: use init_cred when
		 * populating new cgroupfs mount") for more details.
		 */
		cred = override_creds(&init_cred);

1631
		ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
1632 1633 1634
		if (ret)
			goto rm_base_files;

1635
		ret = rebind_subsystems(root, root->subsys_mask, 0);
1636 1637 1638 1639 1640
		if (ret)
			goto rm_base_files;

		revert_creds(cred);

B
Ben Blum 已提交
1641 1642 1643 1644 1645
		/*
		 * There must be no failure case after here, since rebinding
		 * takes care of subsystems' refcounts, which are explicitly
		 * dropped in the failure exit path.
		 */
1646

1647 1648
		list_add(&root->root_list, &cgroup_roots);
		cgroup_root_count++;
1649

1650 1651 1652
		/* Link the top cgroup in this hierarchy into all
		 * the css_set objects */
		write_lock(&css_set_lock);
1653
		hash_for_each(css_set_table, i, cset, hlist)
1654
			link_css_set(&tmp_links, cset, root_cgrp);
1655 1656
		write_unlock(&css_set_lock);

1657
		free_cgrp_cset_links(&tmp_links);
1658

1659
		BUG_ON(!list_empty(&root_cgrp->children));
1660 1661
		BUG_ON(root->number_of_cgroups != 1);

T
Tejun Heo 已提交
1662
		mutex_unlock(&cgroup_root_mutex);
1663
		mutex_unlock(&cgroup_mutex);
1664
		mutex_unlock(&inode->i_mutex);
1665 1666 1667 1668 1669
	} else {
		/*
		 * We re-used an existing hierarchy - the new root (if
		 * any) is not needed
		 */
1670
		cgroup_free_root(opts.new_root);
1671

1672
		if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) {
1673 1674 1675 1676 1677 1678 1679
			if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
				pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
				ret = -EINVAL;
				goto drop_new_super;
			} else {
				pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
			}
1680
		}
1681 1682
	}

1683 1684
	kfree(opts.release_agent);
	kfree(opts.name);
A
Al Viro 已提交
1685
	return dget(sb->s_root);
1686

1687 1688
 rm_base_files:
	free_cgrp_cset_links(&tmp_links);
1689
	cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false);
1690
	revert_creds(cred);
T
Tejun Heo 已提交
1691
 unlock_drop:
1692
	cgroup_exit_root_id(root);
T
Tejun Heo 已提交
1693 1694 1695
	mutex_unlock(&cgroup_root_mutex);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&inode->i_mutex);
1696
 drop_new_super:
1697
	deactivate_locked_super(sb);
1698 1699 1700
 out_err:
	kfree(opts.release_agent);
	kfree(opts.name);
A
Al Viro 已提交
1701
	return ERR_PTR(ret);
1702 1703 1704 1705
}

static void cgroup_kill_sb(struct super_block *sb) {
	struct cgroupfs_root *root = sb->s_fs_info;
1706
	struct cgroup *cgrp = &root->top_cgroup;
1707
	struct cgrp_cset_link *link, *tmp_link;
1708 1709 1710 1711 1712
	int ret;

	BUG_ON(!root);

	BUG_ON(root->number_of_cgroups != 1);
1713
	BUG_ON(!list_empty(&cgrp->children));
1714

1715
	mutex_lock(&cgrp->dentry->d_inode->i_mutex);
1716
	mutex_lock(&cgroup_mutex);
T
Tejun Heo 已提交
1717
	mutex_lock(&cgroup_root_mutex);
1718 1719

	/* Rebind all subsystems back to the default hierarchy */
1720 1721 1722 1723 1724
	if (root->flags & CGRP_ROOT_SUBSYS_BOUND) {
		ret = rebind_subsystems(root, 0, root->subsys_mask);
		/* Shouldn't be able to fail ... */
		BUG_ON(ret);
	}
1725

1726
	/*
1727
	 * Release all the links from cset_links to this hierarchy's
1728 1729 1730
	 * root cgroup
	 */
	write_lock(&css_set_lock);
K
KOSAKI Motohiro 已提交
1731

1732 1733 1734
	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
1735 1736 1737 1738
		kfree(link);
	}
	write_unlock(&css_set_lock);

1739 1740
	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
1741
		cgroup_root_count--;
1742
	}
1743

1744 1745
	cgroup_exit_root_id(root);

T
Tejun Heo 已提交
1746
	mutex_unlock(&cgroup_root_mutex);
1747
	mutex_unlock(&cgroup_mutex);
1748
	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
1749

A
Aristeu Rozanski 已提交
1750 1751
	simple_xattrs_free(&cgrp->xattrs);

1752
	kill_litter_super(sb);
1753
	cgroup_free_root(root);
1754 1755 1756 1757
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
A
Al Viro 已提交
1758
	.mount = cgroup_mount,
1759 1760 1761
	.kill_sb = cgroup_kill_sb,
};

1762 1763
static struct kobject *cgroup_kobj;

L
Li Zefan 已提交
1764 1765 1766 1767 1768 1769
/**
 * cgroup_path - generate the path of a cgroup
 * @cgrp: the cgroup in question
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
1770 1771 1772 1773 1774 1775
 * Writes path of cgroup into buf.  Returns 0 on success, -errno on error.
 *
 * We can't generate cgroup path using dentry->d_name, as accessing
 * dentry->name must be protected by irq-unsafe dentry->d_lock or parent
 * inode's i_mutex, while on the other hand cgroup_path() can be called
 * with some irq-safe spinlocks held.
1776
 */
1777
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1778
{
1779
	int ret = -ENAMETOOLONG;
1780
	char *start;
1781

1782 1783 1784
	if (!cgrp->parent) {
		if (strlcpy(buf, "/", buflen) >= buflen)
			return -ENAMETOOLONG;
1785 1786 1787
		return 0;
	}

1788 1789
	start = buf + buflen - 1;
	*start = '\0';
1790

1791
	rcu_read_lock();
1792
	do {
1793 1794 1795 1796
		const char *name = cgroup_name(cgrp);
		int len;

		len = strlen(name);
1797
		if ((start -= len) < buf)
1798 1799
			goto out;
		memcpy(start, name, len);
1800

1801
		if (--start < buf)
1802
			goto out;
1803
		*start = '/';
1804 1805

		cgrp = cgrp->parent;
1806
	} while (cgrp->parent);
1807
	ret = 0;
1808
	memmove(buf, start, buf + buflen - start);
1809 1810 1811
out:
	rcu_read_unlock();
	return ret;
1812
}
B
Ben Blum 已提交
1813
EXPORT_SYMBOL_GPL(cgroup_path);
1814

1815
/**
1816
 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1817 1818 1819 1820
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
1821 1822 1823 1824 1825 1826
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
 * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short.
1827
 */
1828
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1829 1830
{
	struct cgroupfs_root *root;
1831 1832 1833 1834 1835
	struct cgroup *cgrp;
	int hierarchy_id = 1, ret = 0;

	if (buflen < 2)
		return -ENAMETOOLONG;
1836 1837 1838

	mutex_lock(&cgroup_mutex);

1839 1840
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

1841 1842 1843
	if (root) {
		cgrp = task_cgroup_from_root(task, root);
		ret = cgroup_path(cgrp, buf, buflen);
1844 1845 1846
	} else {
		/* if no hierarchy exists, everyone is in "/" */
		memcpy(buf, "/", 2);
1847 1848 1849 1850 1851
	}

	mutex_unlock(&cgroup_mutex);
	return ret;
}
1852
EXPORT_SYMBOL_GPL(task_cgroup_path);
1853

1854 1855 1856
/*
 * Control Group taskset
 */
1857 1858 1859
struct task_and_cgroup {
	struct task_struct	*task;
	struct cgroup		*cgrp;
L
Li Zefan 已提交
1860
	struct css_set		*cset;
1861 1862
};

1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
struct cgroup_taskset {
	struct task_and_cgroup	single;
	struct flex_array	*tc_array;
	int			tc_array_len;
	int			idx;
	struct cgroup		*cur_cgrp;
};

/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
 *
 * @tset iteration is initialized and the first task is returned.
 */
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
{
	if (tset->tc_array) {
		tset->idx = 0;
		return cgroup_taskset_next(tset);
	} else {
		tset->cur_cgrp = tset->single.cgrp;
		return tset->single.task;
	}
}
EXPORT_SYMBOL_GPL(cgroup_taskset_first);

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
{
	struct task_and_cgroup *tc;

	if (!tset->tc_array || tset->idx >= tset->tc_array_len)
		return NULL;

	tc = flex_array_get(tset->tc_array, tset->idx++);
	tset->cur_cgrp = tc->cgrp;
	return tc->task;
}
EXPORT_SYMBOL_GPL(cgroup_taskset_next);

/**
 * cgroup_taskset_cur_cgroup - return the matching cgroup for the current task
 * @tset: taskset of interest
 *
 * Return the cgroup for the current (last returned) task of @tset.  This
 * function must be preceded by either cgroup_taskset_first() or
 * cgroup_taskset_next().
 */
struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset)
{
	return tset->cur_cgrp;
}
EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup);

/**
 * cgroup_taskset_size - return the number of tasks in taskset
 * @tset: taskset of interest
 */
int cgroup_taskset_size(struct cgroup_taskset *tset)
{
	return tset->tc_array ? tset->tc_array_len : 1;
}
EXPORT_SYMBOL_GPL(cgroup_taskset_size);


B
Ben Blum 已提交
1934 1935 1936
/*
 * cgroup_task_migrate - move a task from one cgroup to another.
 *
1937
 * Must be called with cgroup_mutex and threadgroup locked.
B
Ben Blum 已提交
1938
 */
1939 1940 1941
static void cgroup_task_migrate(struct cgroup *old_cgrp,
				struct task_struct *tsk,
				struct css_set *new_cset)
B
Ben Blum 已提交
1942
{
1943
	struct css_set *old_cset;
B
Ben Blum 已提交
1944 1945

	/*
1946 1947 1948
	 * We are synchronized through threadgroup_lock() against PF_EXITING
	 * setting such that we can't race against cgroup_exit() changing the
	 * css_set to init_css_set and dropping the old one.
B
Ben Blum 已提交
1949
	 */
1950
	WARN_ON_ONCE(tsk->flags & PF_EXITING);
1951
	old_cset = task_css_set(tsk);
B
Ben Blum 已提交
1952 1953

	task_lock(tsk);
1954
	rcu_assign_pointer(tsk->cgroups, new_cset);
B
Ben Blum 已提交
1955 1956 1957 1958 1959
	task_unlock(tsk);

	/* Update the css_set linked lists if we're using them */
	write_lock(&css_set_lock);
	if (!list_empty(&tsk->cg_list))
1960
		list_move(&tsk->cg_list, &new_cset->tasks);
B
Ben Blum 已提交
1961 1962 1963
	write_unlock(&css_set_lock);

	/*
1964 1965 1966
	 * We just gained a reference on old_cset by taking it from the
	 * task. As trading it for new_cset is protected by cgroup_mutex,
	 * we're safe to drop it here; it will be freed under RCU.
B
Ben Blum 已提交
1967
	 */
1968 1969
	set_bit(CGRP_RELEASABLE, &old_cgrp->flags);
	put_css_set(old_cset);
B
Ben Blum 已提交
1970 1971
}

L
Li Zefan 已提交
1972
/**
1973
 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
B
Ben Blum 已提交
1974
 * @cgrp: the cgroup to attach to
1975 1976
 * @tsk: the task or the leader of the threadgroup to be attached
 * @threadgroup: attach the whole threadgroup?
B
Ben Blum 已提交
1977
 *
1978
 * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
1979
 * task_lock of @tsk or each thread in the threadgroup individually in turn.
B
Ben Blum 已提交
1980
 */
T
Tejun Heo 已提交
1981 1982
static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
			      bool threadgroup)
B
Ben Blum 已提交
1983 1984 1985 1986 1987
{
	int retval, i, group_size;
	struct cgroup_subsys *ss, *failed_ss = NULL;
	struct cgroupfs_root *root = cgrp->root;
	/* threadgroup list cursor and array */
1988
	struct task_struct *leader = tsk;
1989
	struct task_and_cgroup *tc;
1990
	struct flex_array *group;
1991
	struct cgroup_taskset tset = { };
B
Ben Blum 已提交
1992 1993 1994 1995 1996

	/*
	 * step 0: in order to do expensive, possibly blocking operations for
	 * every thread, we cannot iterate the thread group list, since it needs
	 * rcu or tasklist locked. instead, build an array of all threads in the
1997 1998
	 * group - group_rwsem prevents new threads from appearing, and if
	 * threads exit, this will just be an over-estimate.
B
Ben Blum 已提交
1999
	 */
2000 2001 2002 2003
	if (threadgroup)
		group_size = get_nr_threads(tsk);
	else
		group_size = 1;
2004
	/* flex_array supports very large thread-groups better than kmalloc. */
2005
	group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
B
Ben Blum 已提交
2006 2007
	if (!group)
		return -ENOMEM;
2008
	/* pre-allocate to guarantee space while iterating in rcu read-side. */
2009
	retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
2010 2011
	if (retval)
		goto out_free_group_list;
B
Ben Blum 已提交
2012 2013

	i = 0;
2014 2015 2016 2017 2018 2019
	/*
	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 */
	rcu_read_lock();
B
Ben Blum 已提交
2020
	do {
2021 2022
		struct task_and_cgroup ent;

2023 2024 2025 2026
		/* @tsk either already exited or can't exit until the end */
		if (tsk->flags & PF_EXITING)
			continue;

B
Ben Blum 已提交
2027 2028
		/* as per above, nr_threads may decrease, but not increase. */
		BUG_ON(i >= group_size);
2029 2030
		ent.task = tsk;
		ent.cgrp = task_cgroup_from_root(tsk, root);
2031 2032 2033
		/* nothing to do if this task is already in the cgroup */
		if (ent.cgrp == cgrp)
			continue;
2034 2035 2036 2037
		/*
		 * saying GFP_ATOMIC has no effect here because we did prealloc
		 * earlier, but it's good form to communicate our expectations.
		 */
2038
		retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2039
		BUG_ON(retval != 0);
B
Ben Blum 已提交
2040
		i++;
2041 2042 2043

		if (!threadgroup)
			break;
B
Ben Blum 已提交
2044
	} while_each_thread(leader, tsk);
2045
	rcu_read_unlock();
B
Ben Blum 已提交
2046 2047
	/* remember the number of threads in the array for later. */
	group_size = i;
2048 2049
	tset.tc_array = group;
	tset.tc_array_len = group_size;
B
Ben Blum 已提交
2050

2051 2052
	/* methods shouldn't be called if no task is actually migrating */
	retval = 0;
2053
	if (!group_size)
2054
		goto out_free_group_list;
2055

B
Ben Blum 已提交
2056 2057 2058
	/*
	 * step 1: check that we can legitimately attach to the cgroup.
	 */
2059
	for_each_root_subsys(root, ss) {
2060 2061
		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];

B
Ben Blum 已提交
2062
		if (ss->can_attach) {
2063
			retval = ss->can_attach(css, &tset);
B
Ben Blum 已提交
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
			if (retval) {
				failed_ss = ss;
				goto out_cancel_attach;
			}
		}
	}

	/*
	 * step 2: make sure css_sets exist for all threads to be migrated.
	 * we use find_css_set, which allocates a new one if necessary.
	 */
	for (i = 0; i < group_size; i++) {
2076 2077
		struct css_set *old_cset;

2078
		tc = flex_array_get(group, i);
2079
		old_cset = task_css_set(tc->task);
L
Li Zefan 已提交
2080 2081
		tc->cset = find_css_set(old_cset, cgrp);
		if (!tc->cset) {
2082 2083
			retval = -ENOMEM;
			goto out_put_css_set_refs;
B
Ben Blum 已提交
2084 2085 2086 2087
		}
	}

	/*
2088 2089 2090
	 * step 3: now that we're guaranteed success wrt the css_sets,
	 * proceed to move all tasks to the new cgroup.  There are no
	 * failure cases after here, so this is the commit point.
B
Ben Blum 已提交
2091 2092
	 */
	for (i = 0; i < group_size; i++) {
2093
		tc = flex_array_get(group, i);
L
Li Zefan 已提交
2094
		cgroup_task_migrate(tc->cgrp, tc->task, tc->cset);
B
Ben Blum 已提交
2095 2096 2097 2098
	}
	/* nothing is sensitive to fork() after this point. */

	/*
2099
	 * step 4: do subsystem attach callbacks.
B
Ben Blum 已提交
2100
	 */
2101
	for_each_root_subsys(root, ss) {
2102 2103
		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];

B
Ben Blum 已提交
2104
		if (ss->attach)
2105
			ss->attach(css, &tset);
B
Ben Blum 已提交
2106 2107 2108 2109 2110 2111
	}

	/*
	 * step 5: success! and cleanup
	 */
	retval = 0;
2112 2113 2114 2115
out_put_css_set_refs:
	if (retval) {
		for (i = 0; i < group_size; i++) {
			tc = flex_array_get(group, i);
L
Li Zefan 已提交
2116
			if (!tc->cset)
2117
				break;
L
Li Zefan 已提交
2118
			put_css_set(tc->cset);
2119
		}
B
Ben Blum 已提交
2120 2121 2122
	}
out_cancel_attach:
	if (retval) {
2123
		for_each_root_subsys(root, ss) {
2124 2125
			struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];

2126
			if (ss == failed_ss)
B
Ben Blum 已提交
2127 2128
				break;
			if (ss->cancel_attach)
2129
				ss->cancel_attach(css, &tset);
B
Ben Blum 已提交
2130 2131 2132
		}
	}
out_free_group_list:
2133
	flex_array_free(group);
B
Ben Blum 已提交
2134 2135 2136 2137 2138
	return retval;
}

/*
 * Find the task_struct of the task to attach by vpid and pass it along to the
2139 2140
 * function to attach either it or all tasks in its threadgroup. Will lock
 * cgroup_mutex and threadgroup; may take task_lock of task.
2141
 */
B
Ben Blum 已提交
2142
static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2143 2144
{
	struct task_struct *tsk;
2145
	const struct cred *cred = current_cred(), *tcred;
2146 2147
	int ret;

B
Ben Blum 已提交
2148 2149 2150
	if (!cgroup_lock_live_group(cgrp))
		return -ENODEV;

2151 2152
retry_find_task:
	rcu_read_lock();
2153
	if (pid) {
2154
		tsk = find_task_by_vpid(pid);
B
Ben Blum 已提交
2155 2156
		if (!tsk) {
			rcu_read_unlock();
2157 2158
			ret= -ESRCH;
			goto out_unlock_cgroup;
2159
		}
B
Ben Blum 已提交
2160 2161 2162 2163
		/*
		 * even if we're attaching all tasks in the thread group, we
		 * only need to check permissions on one of them.
		 */
2164
		tcred = __task_cred(tsk);
2165 2166 2167
		if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
		    !uid_eq(cred->euid, tcred->uid) &&
		    !uid_eq(cred->euid, tcred->suid)) {
2168
			rcu_read_unlock();
2169 2170
			ret = -EACCES;
			goto out_unlock_cgroup;
2171
		}
2172 2173
	} else
		tsk = current;
2174 2175

	if (threadgroup)
2176
		tsk = tsk->group_leader;
2177 2178

	/*
2179
	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2180 2181 2182
	 * trapped in a cpuset, or RT worker may be born in a cgroup
	 * with no rt_runtime allocated.  Just say no.
	 */
2183
	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2184 2185 2186 2187 2188
		ret = -EINVAL;
		rcu_read_unlock();
		goto out_unlock_cgroup;
	}

2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205
	get_task_struct(tsk);
	rcu_read_unlock();

	threadgroup_lock(tsk);
	if (threadgroup) {
		if (!thread_group_leader(tsk)) {
			/*
			 * a race with de_thread from another thread's exec()
			 * may strip us of our leadership, if this happens,
			 * there is no choice but to throw this task away and
			 * try again; this is
			 * "double-double-toil-and-trouble-check locking".
			 */
			threadgroup_unlock(tsk);
			put_task_struct(tsk);
			goto retry_find_task;
		}
2206 2207 2208 2209
	}

	ret = cgroup_attach_task(cgrp, tsk, threadgroup);

2210 2211
	threadgroup_unlock(tsk);

2212
	put_task_struct(tsk);
2213
out_unlock_cgroup:
T
Tejun Heo 已提交
2214
	mutex_unlock(&cgroup_mutex);
2215 2216 2217
	return ret;
}

2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
	struct cgroupfs_root *root;
	int retval = 0;

T
Tejun Heo 已提交
2228
	mutex_lock(&cgroup_mutex);
2229
	for_each_active_root(root) {
L
Li Zefan 已提交
2230
		struct cgroup *from_cgrp = task_cgroup_from_root(from, root);
2231

L
Li Zefan 已提交
2232
		retval = cgroup_attach_task(from_cgrp, tsk, false);
2233 2234 2235
		if (retval)
			break;
	}
T
Tejun Heo 已提交
2236
	mutex_unlock(&cgroup_mutex);
2237 2238 2239 2240 2241

	return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);

2242 2243
static int cgroup_tasks_write(struct cgroup_subsys_state *css,
			      struct cftype *cft, u64 pid)
B
Ben Blum 已提交
2244
{
2245
	return attach_task_by_pid(css->cgroup, pid, false);
B
Ben Blum 已提交
2246 2247
}

2248 2249
static int cgroup_procs_write(struct cgroup_subsys_state *css,
			      struct cftype *cft, u64 tgid)
2250
{
2251
	return attach_task_by_pid(css->cgroup, tgid, true);
2252 2253
}

2254 2255
static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, const char *buffer)
2256
{
2257
	BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX);
2258 2259
	if (strlen(buffer) >= PATH_MAX)
		return -EINVAL;
2260
	if (!cgroup_lock_live_group(css->cgroup))
2261
		return -ENODEV;
T
Tejun Heo 已提交
2262
	mutex_lock(&cgroup_root_mutex);
2263
	strcpy(css->cgroup->root->release_agent_path, buffer);
T
Tejun Heo 已提交
2264
	mutex_unlock(&cgroup_root_mutex);
T
Tejun Heo 已提交
2265
	mutex_unlock(&cgroup_mutex);
2266 2267 2268
	return 0;
}

2269 2270
static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
				     struct cftype *cft, struct seq_file *seq)
2271
{
2272 2273
	struct cgroup *cgrp = css->cgroup;

2274 2275 2276 2277
	if (!cgroup_lock_live_group(cgrp))
		return -ENODEV;
	seq_puts(seq, cgrp->root->release_agent_path);
	seq_putc(seq, '\n');
T
Tejun Heo 已提交
2278
	mutex_unlock(&cgroup_mutex);
2279 2280 2281
	return 0;
}

2282 2283
static int cgroup_sane_behavior_show(struct cgroup_subsys_state *css,
				     struct cftype *cft, struct seq_file *seq)
2284
{
2285
	seq_printf(seq, "%d\n", cgroup_sane_behavior(css->cgroup));
2286 2287 2288
	return 0;
}

2289 2290 2291 2292 2293 2294 2295 2296
/* return the css for the given cgroup file */
static struct cgroup_subsys_state *cgroup_file_css(struct cfent *cfe)
{
	struct cftype *cft = cfe->type;
	struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent);

	if (cft->ss)
		return cgrp->subsys[cft->ss->subsys_id];
T
Tejun Heo 已提交
2297
	return &cgrp->dummy_css;
2298 2299
}

2300 2301 2302
/* A buffer size big enough for numbers or short strings */
#define CGROUP_LOCAL_BUFFER_SIZE 64

2303 2304 2305 2306
static ssize_t cgroup_write_X64(struct cgroup_subsys_state *css,
				struct cftype *cft, struct file *file,
				const char __user *userbuf, size_t nbytes,
				loff_t *unused_ppos)
2307
{
2308
	char buffer[CGROUP_LOCAL_BUFFER_SIZE];
2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
	int retval = 0;
	char *end;

	if (!nbytes)
		return -EINVAL;
	if (nbytes >= sizeof(buffer))
		return -E2BIG;
	if (copy_from_user(buffer, userbuf, nbytes))
		return -EFAULT;

	buffer[nbytes] = 0;     /* nul-terminate */
2320
	if (cft->write_u64) {
K
KOSAKI Motohiro 已提交
2321
		u64 val = simple_strtoull(strstrip(buffer), &end, 0);
2322 2323
		if (*end)
			return -EINVAL;
2324
		retval = cft->write_u64(css, cft, val);
2325
	} else {
K
KOSAKI Motohiro 已提交
2326
		s64 val = simple_strtoll(strstrip(buffer), &end, 0);
2327 2328
		if (*end)
			return -EINVAL;
2329
		retval = cft->write_s64(css, cft, val);
2330
	}
2331 2332 2333 2334 2335
	if (!retval)
		retval = nbytes;
	return retval;
}

2336 2337 2338 2339
static ssize_t cgroup_write_string(struct cgroup_subsys_state *css,
				   struct cftype *cft, struct file *file,
				   const char __user *userbuf, size_t nbytes,
				   loff_t *unused_ppos)
2340
{
2341
	char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355
	int retval = 0;
	size_t max_bytes = cft->max_write_len;
	char *buffer = local_buffer;

	if (!max_bytes)
		max_bytes = sizeof(local_buffer) - 1;
	if (nbytes >= max_bytes)
		return -E2BIG;
	/* Allocate a dynamic buffer if we need one */
	if (nbytes >= sizeof(local_buffer)) {
		buffer = kmalloc(nbytes + 1, GFP_KERNEL);
		if (buffer == NULL)
			return -ENOMEM;
	}
L
Li Zefan 已提交
2356 2357 2358 2359
	if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
		retval = -EFAULT;
		goto out;
	}
2360 2361

	buffer[nbytes] = 0;     /* nul-terminate */
2362
	retval = cft->write_string(css, cft, strstrip(buffer));
2363 2364
	if (!retval)
		retval = nbytes;
L
Li Zefan 已提交
2365
out:
2366 2367 2368 2369 2370
	if (buffer != local_buffer)
		kfree(buffer);
	return retval;
}

2371
static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
2372
				 size_t nbytes, loff_t *ppos)
2373
{
2374
	struct cfent *cfe = __d_cfe(file->f_dentry);
2375
	struct cftype *cft = __d_cft(file->f_dentry);
2376
	struct cgroup_subsys_state *css = cgroup_file_css(cfe);
2377

2378
	if (cft->write)
2379
		return cft->write(css, cft, file, buf, nbytes, ppos);
2380
	if (cft->write_u64 || cft->write_s64)
2381
		return cgroup_write_X64(css, cft, file, buf, nbytes, ppos);
2382
	if (cft->write_string)
2383
		return cgroup_write_string(css, cft, file, buf, nbytes, ppos);
2384
	if (cft->trigger) {
2385
		int ret = cft->trigger(css, (unsigned int)cft->private);
2386 2387
		return ret ? ret : nbytes;
	}
2388
	return -EINVAL;
2389 2390
}

2391 2392 2393
static ssize_t cgroup_read_u64(struct cgroup_subsys_state *css,
			       struct cftype *cft, struct file *file,
			       char __user *buf, size_t nbytes, loff_t *ppos)
2394
{
2395
	char tmp[CGROUP_LOCAL_BUFFER_SIZE];
2396
	u64 val = cft->read_u64(css, cft);
2397 2398 2399 2400 2401
	int len = sprintf(tmp, "%llu\n", (unsigned long long) val);

	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}

2402 2403 2404
static ssize_t cgroup_read_s64(struct cgroup_subsys_state *css,
			       struct cftype *cft, struct file *file,
			       char __user *buf, size_t nbytes, loff_t *ppos)
2405
{
2406
	char tmp[CGROUP_LOCAL_BUFFER_SIZE];
2407
	s64 val = cft->read_s64(css, cft);
2408 2409 2410 2411 2412
	int len = sprintf(tmp, "%lld\n", (long long) val);

	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}

2413
static ssize_t cgroup_file_read(struct file *file, char __user *buf,
2414
				size_t nbytes, loff_t *ppos)
2415
{
2416
	struct cfent *cfe = __d_cfe(file->f_dentry);
2417
	struct cftype *cft = __d_cft(file->f_dentry);
2418
	struct cgroup_subsys_state *css = cgroup_file_css(cfe);
2419 2420

	if (cft->read)
2421
		return cft->read(css, cft, file, buf, nbytes, ppos);
2422
	if (cft->read_u64)
2423
		return cgroup_read_u64(css, cft, file, buf, nbytes, ppos);
2424
	if (cft->read_s64)
2425
		return cgroup_read_s64(css, cft, file, buf, nbytes, ppos);
2426 2427 2428
	return -EINVAL;
}

2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441
/*
 * seqfile ops/methods for returning structured data. Currently just
 * supports string->u64 maps, but can be extended in future.
 */

static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
{
	struct seq_file *sf = cb->state;
	return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
}

static int cgroup_seqfile_show(struct seq_file *m, void *arg)
{
2442 2443
	struct cfent *cfe = m->private;
	struct cftype *cft = cfe->type;
2444
	struct cgroup_subsys_state *css = cgroup_file_css(cfe);
2445

2446 2447 2448 2449 2450
	if (cft->read_map) {
		struct cgroup_map_cb cb = {
			.fill = cgroup_map_add,
			.state = m,
		};
2451
		return cft->read_map(css, cft, &cb);
2452
	}
2453
	return cft->read_seq_string(css, cft, m);
2454 2455
}

2456
static const struct file_operations cgroup_seqfile_operations = {
2457
	.read = seq_read,
2458
	.write = cgroup_file_write,
2459
	.llseek = seq_lseek,
2460
	.release = single_release,
2461 2462
};

2463 2464
static int cgroup_file_open(struct inode *inode, struct file *file)
{
2465 2466 2467
	struct cfent *cfe = __d_cfe(file->f_dentry);
	struct cftype *cft = __d_cft(file->f_dentry);
	struct cgroup_subsys_state *css = cgroup_file_css(cfe);
2468 2469 2470 2471 2472
	int err;

	err = generic_file_open(inode, file);
	if (err)
		return err;
2473 2474 2475 2476 2477 2478

	/*
	 * If the file belongs to a subsystem, pin the css.  Will be
	 * unpinned either on open failure or release.  This ensures that
	 * @css stays alive for all file operations.
	 */
T
Tejun Heo 已提交
2479
	if (css->ss && !css_tryget(css))
2480
		return -ENODEV;
2481

2482
	if (cft->read_map || cft->read_seq_string) {
2483
		file->f_op = &cgroup_seqfile_operations;
2484 2485
		err = single_open(file, cgroup_seqfile_show, cfe);
	} else if (cft->open) {
2486
		err = cft->open(inode, file);
2487
	}
2488

T
Tejun Heo 已提交
2489
	if (css->ss && err)
2490
		css_put(css);
2491 2492 2493 2494 2495
	return err;
}

static int cgroup_file_release(struct inode *inode, struct file *file)
{
2496
	struct cfent *cfe = __d_cfe(file->f_dentry);
2497
	struct cftype *cft = __d_cft(file->f_dentry);
2498 2499 2500
	struct cgroup_subsys_state *css = cgroup_file_css(cfe);
	int ret = 0;

2501
	if (cft->release)
2502
		ret = cft->release(inode, file);
T
Tejun Heo 已提交
2503
	if (css->ss)
2504 2505
		css_put(css);
	return ret;
2506 2507 2508 2509 2510 2511 2512 2513
}

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
			    struct inode *new_dir, struct dentry *new_dentry)
{
2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
	int ret;
	struct cgroup_name *name, *old_name;
	struct cgroup *cgrp;

	/*
	 * It's convinient to use parent dir's i_mutex to protected
	 * cgrp->name.
	 */
	lockdep_assert_held(&old_dir->i_mutex);

2524 2525 2526 2527 2528 2529
	if (!S_ISDIR(old_dentry->d_inode->i_mode))
		return -ENOTDIR;
	if (new_dentry->d_inode)
		return -EEXIST;
	if (old_dir != new_dir)
		return -EIO;
2530 2531 2532

	cgrp = __d_cgrp(old_dentry);

2533 2534 2535 2536 2537 2538 2539
	/*
	 * This isn't a proper migration and its usefulness is very
	 * limited.  Disallow if sane_behavior.
	 */
	if (cgroup_sane_behavior(cgrp))
		return -EPERM;

2540 2541 2542 2543 2544 2545 2546 2547 2548 2549
	name = cgroup_alloc_name(new_dentry);
	if (!name)
		return -ENOMEM;

	ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry);
	if (ret) {
		kfree(name);
		return ret;
	}

2550
	old_name = rcu_dereference_protected(cgrp->name, true);
2551 2552 2553 2554
	rcu_assign_pointer(cgrp->name, name);

	kfree_rcu(old_name, rcu_head);
	return 0;
2555 2556
}

A
Aristeu Rozanski 已提交
2557 2558 2559 2560 2561
static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
{
	if (S_ISDIR(dentry->d_inode->i_mode))
		return &__d_cgrp(dentry)->xattrs;
	else
L
Li Zefan 已提交
2562
		return &__d_cfe(dentry)->xattrs;
A
Aristeu Rozanski 已提交
2563 2564 2565 2566 2567
}

static inline int xattr_enabled(struct dentry *dentry)
{
	struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
2568
	return root->flags & CGRP_ROOT_XATTR;
A
Aristeu Rozanski 已提交
2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
}

static bool is_valid_xattr(const char *name)
{
	if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
	    !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
		return true;
	return false;
}

static int cgroup_setxattr(struct dentry *dentry, const char *name,
			   const void *val, size_t size, int flags)
{
	if (!xattr_enabled(dentry))
		return -EOPNOTSUPP;
	if (!is_valid_xattr(name))
		return -EINVAL;
	return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags);
}

static int cgroup_removexattr(struct dentry *dentry, const char *name)
{
	if (!xattr_enabled(dentry))
		return -EOPNOTSUPP;
	if (!is_valid_xattr(name))
		return -EINVAL;
	return simple_xattr_remove(__d_xattrs(dentry), name);
}

static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name,
			       void *buf, size_t size)
{
	if (!xattr_enabled(dentry))
		return -EOPNOTSUPP;
	if (!is_valid_xattr(name))
		return -EINVAL;
	return simple_xattr_get(__d_xattrs(dentry), name, buf, size);
}

static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size)
{
	if (!xattr_enabled(dentry))
		return -EOPNOTSUPP;
	return simple_xattr_list(__d_xattrs(dentry), buf, size);
}

2615
static const struct file_operations cgroup_file_operations = {
2616 2617 2618 2619 2620 2621 2622
	.read = cgroup_file_read,
	.write = cgroup_file_write,
	.llseek = generic_file_llseek,
	.open = cgroup_file_open,
	.release = cgroup_file_release,
};

A
Aristeu Rozanski 已提交
2623 2624 2625 2626 2627 2628 2629
static const struct inode_operations cgroup_file_inode_operations = {
	.setxattr = cgroup_setxattr,
	.getxattr = cgroup_getxattr,
	.listxattr = cgroup_listxattr,
	.removexattr = cgroup_removexattr,
};

2630
static const struct inode_operations cgroup_dir_inode_operations = {
2631
	.lookup = cgroup_lookup,
2632 2633 2634
	.mkdir = cgroup_mkdir,
	.rmdir = cgroup_rmdir,
	.rename = cgroup_rename,
A
Aristeu Rozanski 已提交
2635 2636 2637 2638
	.setxattr = cgroup_setxattr,
	.getxattr = cgroup_getxattr,
	.listxattr = cgroup_listxattr,
	.removexattr = cgroup_removexattr,
2639 2640
};

A
Al Viro 已提交
2641
static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
2642 2643 2644 2645 2646 2647 2648
{
	if (dentry->d_name.len > NAME_MAX)
		return ERR_PTR(-ENAMETOOLONG);
	d_add(dentry, NULL);
	return NULL;
}

2649 2650 2651 2652 2653
/*
 * Check if a file is a control file
 */
static inline struct cftype *__file_cft(struct file *file)
{
A
Al Viro 已提交
2654
	if (file_inode(file)->i_fop != &cgroup_file_operations)
2655 2656 2657 2658
		return ERR_PTR(-EINVAL);
	return __d_cft(file->f_dentry);
}

A
Al Viro 已提交
2659
static int cgroup_create_file(struct dentry *dentry, umode_t mode,
2660 2661
				struct super_block *sb)
{
2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678
	struct inode *inode;

	if (!dentry)
		return -ENOENT;
	if (dentry->d_inode)
		return -EEXIST;

	inode = cgroup_new_inode(mode, sb);
	if (!inode)
		return -ENOMEM;

	if (S_ISDIR(mode)) {
		inode->i_op = &cgroup_dir_inode_operations;
		inode->i_fop = &simple_dir_operations;

		/* start off with i_nlink == 2 (for "." entry) */
		inc_nlink(inode);
T
Tejun Heo 已提交
2679
		inc_nlink(dentry->d_parent->d_inode);
2680

2681 2682 2683 2684 2685 2686 2687 2688 2689
		/*
		 * Control reaches here with cgroup_mutex held.
		 * @inode->i_mutex should nest outside cgroup_mutex but we
		 * want to populate it immediately without releasing
		 * cgroup_mutex.  As @inode isn't visible to anyone else
		 * yet, trylock will always succeed without affecting
		 * lockdep checks.
		 */
		WARN_ON_ONCE(!mutex_trylock(&inode->i_mutex));
2690 2691 2692
	} else if (S_ISREG(mode)) {
		inode->i_size = 0;
		inode->i_fop = &cgroup_file_operations;
A
Aristeu Rozanski 已提交
2693
		inode->i_op = &cgroup_file_inode_operations;
2694 2695 2696 2697 2698 2699
	}
	d_instantiate(dentry, inode);
	dget(dentry);	/* Extra count - pin the dentry in core */
	return 0;
}

L
Li Zefan 已提交
2700 2701 2702 2703 2704 2705 2706 2707 2708
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
 * returns cft->mode if ->mode is not 0
 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
 * returns S_IRUGO if it has only a read handler
 * returns S_IWUSR if it has only a write hander
 */
A
Al Viro 已提交
2709
static umode_t cgroup_file_mode(const struct cftype *cft)
L
Li Zefan 已提交
2710
{
A
Al Viro 已提交
2711
	umode_t mode = 0;
L
Li Zefan 已提交
2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726

	if (cft->mode)
		return cft->mode;

	if (cft->read || cft->read_u64 || cft->read_s64 ||
	    cft->read_map || cft->read_seq_string)
		mode |= S_IRUGO;

	if (cft->write || cft->write_u64 || cft->write_s64 ||
	    cft->write_string || cft->trigger)
		mode |= S_IWUSR;

	return mode;
}

2727
static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
2728
{
2729
	struct dentry *dir = cgrp->dentry;
T
Tejun Heo 已提交
2730
	struct cgroup *parent = __d_cgrp(dir);
2731
	struct dentry *dentry;
T
Tejun Heo 已提交
2732
	struct cfent *cfe;
2733
	int error;
A
Al Viro 已提交
2734
	umode_t mode;
2735
	char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
2736

2737 2738
	if (cft->ss && !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
		strcpy(name, cft->ss->name);
2739 2740 2741
		strcat(name, ".");
	}
	strcat(name, cft->name);
T
Tejun Heo 已提交
2742

2743
	BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
T
Tejun Heo 已提交
2744 2745 2746 2747 2748

	cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
	if (!cfe)
		return -ENOMEM;

2749
	dentry = lookup_one_len(name, dir, strlen(name));
T
Tejun Heo 已提交
2750
	if (IS_ERR(dentry)) {
2751
		error = PTR_ERR(dentry);
T
Tejun Heo 已提交
2752 2753 2754
		goto out;
	}

2755 2756 2757 2758 2759
	cfe->type = (void *)cft;
	cfe->dentry = dentry;
	dentry->d_fsdata = cfe;
	simple_xattrs_init(&cfe->xattrs);

T
Tejun Heo 已提交
2760 2761 2762 2763 2764 2765 2766 2767 2768
	mode = cgroup_file_mode(cft);
	error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
	if (!error) {
		list_add_tail(&cfe->node, &parent->files);
		cfe = NULL;
	}
	dput(dentry);
out:
	kfree(cfe);
2769 2770 2771
	return error;
}

2772 2773 2774 2775 2776 2777 2778
/**
 * cgroup_addrm_files - add or remove files to a cgroup directory
 * @cgrp: the target cgroup
 * @cfts: array of cftypes to be added
 * @is_add: whether to add or remove
 *
 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
2779 2780 2781
 * For removals, this function never fails.  If addition fails, this
 * function doesn't remove files already added.  The caller is responsible
 * for cleaning up.
2782
 */
2783 2784
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
			      bool is_add)
2785
{
A
Aristeu Rozanski 已提交
2786
	struct cftype *cft;
2787 2788 2789 2790
	int ret;

	lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
	lockdep_assert_held(&cgroup_mutex);
T
Tejun Heo 已提交
2791 2792

	for (cft = cfts; cft->name[0] != '\0'; cft++) {
2793
		/* does cft->flags tell us to skip this file on @cgrp? */
2794 2795
		if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp))
			continue;
2796 2797 2798 2799 2800
		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent)
			continue;
		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
			continue;

2801
		if (is_add) {
2802
			ret = cgroup_add_file(cgrp, cft);
2803
			if (ret) {
2804
				pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
2805 2806 2807
					cft->name, ret);
				return ret;
			}
2808 2809
		} else {
			cgroup_rm_file(cgrp, cft);
T
Tejun Heo 已提交
2810
		}
2811
	}
2812
	return 0;
2813 2814
}

2815
static void cgroup_cfts_prepare(void)
2816
	__acquires(&cgroup_mutex)
2817 2818 2819 2820
{
	/*
	 * Thanks to the entanglement with vfs inode locking, we can't walk
	 * the existing cgroups under cgroup_mutex and create files.
2821 2822
	 * Instead, we use css_for_each_descendant_pre() and drop RCU read
	 * lock before calling cgroup_addrm_files().
2823 2824 2825 2826
	 */
	mutex_lock(&cgroup_mutex);
}

2827
static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
2828
	__releases(&cgroup_mutex)
2829 2830
{
	LIST_HEAD(pending);
2831
	struct cgroup_subsys *ss = cfts[0].ss;
2832
	struct cgroup *root = &ss->root->top_cgroup;
2833
	struct super_block *sb = ss->root->sb;
2834 2835
	struct dentry *prev = NULL;
	struct inode *inode;
2836
	struct cgroup_subsys_state *css;
2837
	u64 update_before;
2838
	int ret = 0;
2839 2840

	/* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
2841
	if (!cfts || ss->root == &cgroup_dummy_root ||
2842 2843
	    !atomic_inc_not_zero(&sb->s_active)) {
		mutex_unlock(&cgroup_mutex);
2844
		return 0;
2845 2846 2847
	}

	/*
2848 2849
	 * All cgroups which are created after we drop cgroup_mutex will
	 * have the updated set of files, so we only need to update the
2850
	 * cgroups created before the current @cgroup_serial_nr_next.
2851
	 */
2852
	update_before = cgroup_serial_nr_next;
2853 2854 2855 2856 2857 2858 2859

	mutex_unlock(&cgroup_mutex);

	/* @root always needs to be updated */
	inode = root->dentry->d_inode;
	mutex_lock(&inode->i_mutex);
	mutex_lock(&cgroup_mutex);
2860
	ret = cgroup_addrm_files(root, cfts, is_add);
2861 2862 2863
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&inode->i_mutex);

2864 2865 2866
	if (ret)
		goto out_deact;

2867 2868
	/* add/rm files for all cgroups created before */
	rcu_read_lock();
2869 2870 2871
	css_for_each_descendant_pre(css, cgroup_css(root, ss->subsys_id)) {
		struct cgroup *cgrp = css->cgroup;

2872 2873 2874 2875 2876 2877 2878 2879 2880
		if (cgroup_is_dead(cgrp))
			continue;

		inode = cgrp->dentry->d_inode;
		dget(cgrp->dentry);
		rcu_read_unlock();

		dput(prev);
		prev = cgrp->dentry;
2881 2882 2883

		mutex_lock(&inode->i_mutex);
		mutex_lock(&cgroup_mutex);
2884
		if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
2885
			ret = cgroup_addrm_files(cgrp, cfts, is_add);
2886 2887 2888
		mutex_unlock(&cgroup_mutex);
		mutex_unlock(&inode->i_mutex);

2889
		rcu_read_lock();
2890 2891
		if (ret)
			break;
2892
	}
2893 2894
	rcu_read_unlock();
	dput(prev);
2895
out_deact:
2896
	deactivate_super(sb);
2897
	return ret;
2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913
}

/**
 * cgroup_add_cftypes - add an array of cftypes to a subsystem
 * @ss: target cgroup subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
 * Register @cfts to @ss.  Files described by @cfts are created for all
 * existing cgroups to which @ss is attached and all future cgroups will
 * have them too.  This function can be called anytime whether @ss is
 * attached or not.
 *
 * Returns 0 on successful registration, -errno on failure.  Note that this
 * function currently returns 0 as long as @cfts registration is successful
 * even if some file creation attempts on existing cgroups fail.
 */
A
Aristeu Rozanski 已提交
2914
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
2915 2916
{
	struct cftype_set *set;
2917
	struct cftype *cft;
2918
	int ret;
2919 2920 2921 2922 2923

	set = kzalloc(sizeof(*set), GFP_KERNEL);
	if (!set)
		return -ENOMEM;

2924 2925 2926
	for (cft = cfts; cft->name[0] != '\0'; cft++)
		cft->ss = ss;

2927 2928 2929
	cgroup_cfts_prepare();
	set->cfts = cfts;
	list_add_tail(&set->node, &ss->cftsets);
2930
	ret = cgroup_cfts_commit(cfts, true);
2931
	if (ret)
2932
		cgroup_rm_cftypes(cfts);
2933
	return ret;
2934 2935 2936
}
EXPORT_SYMBOL_GPL(cgroup_add_cftypes);

2937 2938 2939 2940
/**
 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
 * @cfts: zero-length name terminated array of cftypes
 *
2941 2942 2943
 * Unregister @cfts.  Files described by @cfts are removed from all
 * existing cgroups and all future cgroups won't have them either.  This
 * function can be called anytime whether @cfts' subsys is attached or not.
2944 2945
 *
 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
2946
 * registered.
2947
 */
2948
int cgroup_rm_cftypes(struct cftype *cfts)
2949 2950 2951
{
	struct cftype_set *set;

2952 2953 2954
	if (!cfts || !cfts[0].ss)
		return -ENOENT;

2955 2956
	cgroup_cfts_prepare();

2957
	list_for_each_entry(set, &cfts[0].ss->cftsets, node) {
2958
		if (set->cfts == cfts) {
2959 2960
			list_del(&set->node);
			kfree(set);
2961
			cgroup_cfts_commit(cfts, false);
2962 2963 2964 2965
			return 0;
		}
	}

2966
	cgroup_cfts_commit(NULL, false);
2967 2968 2969
	return -ENOENT;
}

L
Li Zefan 已提交
2970 2971 2972 2973 2974 2975
/**
 * cgroup_task_count - count the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 *
 * Return the number of tasks in the cgroup.
 */
2976
int cgroup_task_count(const struct cgroup *cgrp)
2977 2978
{
	int count = 0;
2979
	struct cgrp_cset_link *link;
2980 2981

	read_lock(&css_set_lock);
2982 2983
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
2984
	read_unlock(&css_set_lock);
2985 2986 2987
	return count;
}

2988
/*
2989 2990 2991 2992
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
 * words after the first call to cgroup_task_iter_start().
2993
 */
2994
static void cgroup_enable_task_cg_lists(void)
2995 2996 2997 2998
{
	struct task_struct *p, *g;
	write_lock(&css_set_lock);
	use_task_css_set_links = 1;
2999 3000 3001 3002 3003 3004 3005 3006
	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
3007 3008
	do_each_thread(g, p) {
		task_lock(p);
3009 3010 3011 3012 3013 3014
		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
		 */
		if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
3015
			list_add(&p->cg_list, &task_css_set(p)->tasks);
3016 3017
		task_unlock(p);
	} while_each_thread(g, p);
3018
	read_unlock(&tasklist_lock);
3019 3020 3021
	write_unlock(&css_set_lock);
}

3022
/**
3023 3024 3025
 * css_next_child - find the next child of a given css
 * @pos_css: the current position (%NULL to initiate traversal)
 * @parent_css: css whose children to walk
3026
 *
3027 3028 3029 3030
 * This function returns the next child of @parent_css and should be called
 * under RCU read lock.  The only requirement is that @parent_css and
 * @pos_css are accessible.  The next sibling is guaranteed to be returned
 * regardless of their states.
3031
 */
3032 3033 3034
struct cgroup_subsys_state *
css_next_child(struct cgroup_subsys_state *pos_css,
	       struct cgroup_subsys_state *parent_css)
3035
{
3036 3037
	struct cgroup *pos = pos_css ? pos_css->cgroup : NULL;
	struct cgroup *cgrp = parent_css->cgroup;
3038 3039 3040 3041 3042 3043 3044
	struct cgroup *next;

	WARN_ON_ONCE(!rcu_read_lock_held());

	/*
	 * @pos could already have been removed.  Once a cgroup is removed,
	 * its ->sibling.next is no longer updated when its next sibling
3045 3046 3047 3048 3049 3050 3051
	 * changes.  As CGRP_DEAD assertion is serialized and happens
	 * before the cgroup is taken off the ->sibling list, if we see it
	 * unasserted, it's guaranteed that the next sibling hasn't
	 * finished its grace period even if it's already removed, and thus
	 * safe to dereference from this RCU critical section.  If
	 * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
	 * to be visible as %true here.
3052 3053 3054 3055 3056 3057 3058 3059
	 *
	 * If @pos is dead, its next pointer can't be dereferenced;
	 * however, as each cgroup is given a monotonically increasing
	 * unique serial number and always appended to the sibling list,
	 * the next one can be found by walking the parent's children until
	 * we see a cgroup with higher serial number than @pos's.  While
	 * this path can be slower, it's taken only when either the current
	 * cgroup is removed or iteration and removal race.
3060
	 */
3061 3062 3063
	if (!pos) {
		next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling);
	} else if (likely(!cgroup_is_dead(pos))) {
3064
		next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
3065 3066 3067 3068
	} else {
		list_for_each_entry_rcu(next, &cgrp->children, sibling)
			if (next->serial_nr > pos->serial_nr)
				break;
3069 3070
	}

3071 3072 3073 3074 3075 3076 3077
	if (&next->sibling == &cgrp->children)
		return NULL;

	if (parent_css->ss)
		return cgroup_css(next, parent_css->ss->subsys_id);
	else
		return &next->dummy_css;
3078
}
3079
EXPORT_SYMBOL_GPL(css_next_child);
3080

3081
/**
3082
 * css_next_descendant_pre - find the next descendant for pre-order walk
3083
 * @pos: the current position (%NULL to initiate traversal)
3084
 * @root: css whose descendants to walk
3085
 *
3086 3087
 * To be used by css_for_each_descendant_pre().  Find the next descendant
 * to visit for pre-order traversal of @root's descendants.
3088 3089 3090 3091
 *
 * While this function requires RCU read locking, it doesn't require the
 * whole traversal to be contained in a single RCU critical section.  This
 * function will return the correct next descendant as long as both @pos
3092
 * and @root are accessible and @pos is a descendant of @root.
3093
 */
3094 3095 3096
struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state *pos,
			struct cgroup_subsys_state *root)
3097
{
3098
	struct cgroup_subsys_state *next;
3099 3100 3101

	WARN_ON_ONCE(!rcu_read_lock_held());

3102
	/* if first iteration, pretend we just visited @root */
3103
	if (!pos)
3104
		pos = root;
3105 3106

	/* visit the first child if exists */
3107
	next = css_next_child(NULL, pos);
3108 3109 3110 3111
	if (next)
		return next;

	/* no child, visit my or the closest ancestor's next sibling */
3112 3113
	while (pos != root) {
		next = css_next_child(pos, css_parent(pos));
3114
		if (next)
3115
			return next;
3116
		pos = css_parent(pos);
3117
	}
3118 3119 3120

	return NULL;
}
3121
EXPORT_SYMBOL_GPL(css_next_descendant_pre);
3122

3123
/**
3124 3125
 * css_rightmost_descendant - return the rightmost descendant of a css
 * @pos: css of interest
3126
 *
3127 3128
 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
 * is returned.  This can be used during pre-order traversal to skip
3129
 * subtree of @pos.
3130 3131 3132 3133 3134
 *
 * While this function requires RCU read locking, it doesn't require the
 * whole traversal to be contained in a single RCU critical section.  This
 * function will return the correct rightmost descendant as long as @pos is
 * accessible.
3135
 */
3136 3137
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
3138
{
3139
	struct cgroup_subsys_state *last, *tmp;
3140 3141 3142 3143 3144 3145 3146

	WARN_ON_ONCE(!rcu_read_lock_held());

	do {
		last = pos;
		/* ->prev isn't RCU safe, walk ->next till the end */
		pos = NULL;
3147
		css_for_each_child(tmp, last)
3148 3149 3150 3151 3152
			pos = tmp;
	} while (pos);

	return last;
}
3153
EXPORT_SYMBOL_GPL(css_rightmost_descendant);
3154

3155 3156
static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state *pos)
3157
{
3158
	struct cgroup_subsys_state *last;
3159 3160 3161

	do {
		last = pos;
3162
		pos = css_next_child(NULL, pos);
3163 3164 3165 3166 3167 3168
	} while (pos);

	return last;
}

/**
3169
 * css_next_descendant_post - find the next descendant for post-order walk
3170
 * @pos: the current position (%NULL to initiate traversal)
3171
 * @root: css whose descendants to walk
3172
 *
3173 3174
 * To be used by css_for_each_descendant_post().  Find the next descendant
 * to visit for post-order traversal of @root's descendants.
3175 3176 3177 3178 3179
 *
 * While this function requires RCU read locking, it doesn't require the
 * whole traversal to be contained in a single RCU critical section.  This
 * function will return the correct next descendant as long as both @pos
 * and @cgroup are accessible and @pos is a descendant of @cgroup.
3180
 */
3181 3182 3183
struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state *pos,
			 struct cgroup_subsys_state *root)
3184
{
3185
	struct cgroup_subsys_state *next;
3186 3187 3188 3189 3190

	WARN_ON_ONCE(!rcu_read_lock_held());

	/* if first iteration, visit the leftmost descendant */
	if (!pos) {
3191 3192
		next = css_leftmost_descendant(root);
		return next != root ? next : NULL;
3193 3194 3195
	}

	/* if there's an unvisited sibling, visit its leftmost descendant */
3196
	next = css_next_child(pos, css_parent(pos));
3197
	if (next)
3198
		return css_leftmost_descendant(next);
3199 3200

	/* no sibling left, visit parent */
3201 3202
	next = css_parent(pos);
	return next != root ? next : NULL;
3203
}
3204
EXPORT_SYMBOL_GPL(css_next_descendant_post);
3205

3206 3207 3208 3209 3210
/**
 * cgroup_advance_task_iter - advance a task itererator to the next css_set
 * @it: the iterator to advance
 *
 * Advance @it to the next css_set to walk.
3211
 */
3212
static void cgroup_advance_task_iter(struct cgroup_task_iter *it)
3213 3214 3215 3216 3217 3218 3219 3220
{
	struct list_head *l = it->cset_link;
	struct cgrp_cset_link *link;
	struct css_set *cset;

	/* Advance to the next non-empty css_set */
	do {
		l = l->next;
3221
		if (l == &it->origin_cgrp->cset_links) {
3222 3223 3224 3225 3226 3227 3228 3229 3230 3231
			it->cset_link = NULL;
			return;
		}
		link = list_entry(l, struct cgrp_cset_link, cset_link);
		cset = link->cset;
	} while (list_empty(&cset->tasks));
	it->cset_link = l;
	it->task = cset->tasks.next;
}

3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246
/**
 * cgroup_task_iter_start - initiate task iteration
 * @cgrp: the cgroup to walk tasks of
 * @it: the task iterator to use
 *
 * Initiate iteration through the tasks of @cgrp.  The caller can call
 * cgroup_task_iter_next() to walk through the tasks until the function
 * returns NULL.  On completion of iteration, cgroup_task_iter_end() must
 * be called.
 *
 * Note that this function acquires a lock which is released when the
 * iteration finishes.  The caller can't sleep while iteration is in
 * progress.
 */
void cgroup_task_iter_start(struct cgroup *cgrp, struct cgroup_task_iter *it)
3247
	__acquires(css_set_lock)
3248 3249 3250 3251 3252 3253
{
	/*
	 * The first time anyone tries to iterate across a cgroup,
	 * we need to enable the list linking each css_set to its
	 * tasks, and fix up all existing tasks.
	 */
3254 3255 3256
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();

3257
	read_lock(&css_set_lock);
3258 3259

	it->origin_cgrp = cgrp;
3260
	it->cset_link = &cgrp->cset_links;
3261 3262

	cgroup_advance_task_iter(it);
3263 3264
}

3265 3266 3267 3268 3269 3270 3271 3272
/**
 * cgroup_task_iter_next - return the next task for the iterator
 * @it: the task iterator being iterated
 *
 * The "next" function for task iteration.  @it should have been
 * initialized via cgroup_task_iter_start().  Returns NULL when the
 * iteration reaches the end.
 */
3273
struct task_struct *cgroup_task_iter_next(struct cgroup_task_iter *it)
3274 3275 3276
{
	struct task_struct *res;
	struct list_head *l = it->task;
3277
	struct cgrp_cset_link *link;
3278 3279

	/* If the iterator cg is NULL, we have no tasks */
3280
	if (!it->cset_link)
3281 3282 3283 3284
		return NULL;
	res = list_entry(l, struct task_struct, cg_list);
	/* Advance iterator to find next entry */
	l = l->next;
3285 3286
	link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link);
	if (l == &link->cset->tasks) {
3287 3288 3289 3290
		/*
		 * We reached the end of this task list - move on to the
		 * next cgrp_cset_link.
		 */
3291
		cgroup_advance_task_iter(it);
3292 3293 3294 3295 3296 3297
	} else {
		it->task = l;
	}
	return res;
}

3298 3299 3300 3301 3302 3303
/**
 * cgroup_task_iter_end - finish task iteration
 * @it: the task iterator to finish
 *
 * Finish task iteration started by cgroup_task_iter_start().
 */
3304
void cgroup_task_iter_end(struct cgroup_task_iter *it)
3305
	__releases(css_set_lock)
3306 3307 3308 3309
{
	read_unlock(&css_set_lock);
}

3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345
static inline int started_after_time(struct task_struct *t1,
				     struct timespec *time,
				     struct task_struct *t2)
{
	int start_diff = timespec_compare(&t1->start_time, time);
	if (start_diff > 0) {
		return 1;
	} else if (start_diff < 0) {
		return 0;
	} else {
		/*
		 * Arbitrarily, if two processes started at the same
		 * time, we'll say that the lower pointer value
		 * started first. Note that t2 may have exited by now
		 * so this may not be a valid pointer any longer, but
		 * that's fine - it still serves to distinguish
		 * between two tasks started (effectively) simultaneously.
		 */
		return t1 > t2;
	}
}

/*
 * This function is a callback from heap_insert() and is used to order
 * the heap.
 * In this case we order the heap in descending task start time.
 */
static inline int started_after(void *p1, void *p2)
{
	struct task_struct *t1 = p1;
	struct task_struct *t2 = p2;
	return started_after_time(t1, &t2->start_time, t2);
}

/**
 * cgroup_scan_tasks - iterate though all the tasks in a cgroup
T
Tejun Heo 已提交
3346 3347 3348 3349 3350
 * @cgrp: the cgroup to iterate tasks of
 * @test: optional test callback
 * @process: process callback
 * @data: data passed to @test and @process
 * @heap: optional pre-allocated heap used for task iteration
3351
 *
T
Tejun Heo 已提交
3352 3353
 * Iterate through all the tasks in a cgroup, calling @test for each, and
 * if it returns %true, call @process for it also.
3354
 *
T
Tejun Heo 已提交
3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
 * @test may be NULL, meaning always true (select all tasks), which
 * effectively duplicates cgroup_task_iter_{start,next,end}() but does not
 * lock css_set_lock for the call to @process.
 *
 * It is guaranteed that @process will act on every task that is a member
 * of @cgrp for the duration of this call.  This function may or may not
 * call @process for tasks that exit or move to a different cgroup during
 * the call, or are forked or move into the cgroup during the call.
 *
 * Note that @test may be called with locks held, and may in some
 * situations be called multiple times for the same task, so it should be
 * cheap.
 *
 * If @heap is non-NULL, a heap has been pre-allocated and will be used for
 * heap operations (and its "gt" member will be overwritten), else a
 * temporary heap will be used (allocation of which may cause this function
 * to fail).
3372
 */
T
Tejun Heo 已提交
3373 3374 3375 3376
int cgroup_scan_tasks(struct cgroup *cgrp,
		      bool (*test)(struct task_struct *, void *),
		      void (*process)(struct task_struct *, void *),
		      void *data, struct ptr_heap *heap)
3377 3378
{
	int retval, i;
3379
	struct cgroup_task_iter it;
3380 3381 3382 3383 3384 3385
	struct task_struct *p, *dropped;
	/* Never dereference latest_task, since it's not refcounted */
	struct task_struct *latest_task = NULL;
	struct ptr_heap tmp_heap;
	struct timespec latest_time = { 0, 0 };

T
Tejun Heo 已提交
3386
	if (heap) {
3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399
		/* The caller supplied our heap and pre-allocated its memory */
		heap->gt = &started_after;
	} else {
		/* We need to allocate our own heap memory */
		heap = &tmp_heap;
		retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
		if (retval)
			/* cannot allocate the heap */
			return retval;
	}

 again:
	/*
T
Tejun Heo 已提交
3400 3401 3402 3403 3404 3405 3406 3407
	 * Scan tasks in the cgroup, using the @test callback to determine
	 * which are of interest, and invoking @process callback on the
	 * ones which need an update.  Since we don't want to hold any
	 * locks during the task updates, gather tasks to be processed in a
	 * heap structure.  The heap is sorted by descending task start
	 * time.  If the statically-sized heap fills up, we overflow tasks
	 * that started later, and in future iterations only consider tasks
	 * that started after the latest task in the previous pass. This
3408 3409 3410
	 * guarantees forward progress and that we don't miss any tasks.
	 */
	heap->size = 0;
T
Tejun Heo 已提交
3411
	cgroup_task_iter_start(cgrp, &it);
3412
	while ((p = cgroup_task_iter_next(&it))) {
3413 3414 3415 3416
		/*
		 * Only affect tasks that qualify per the caller's callback,
		 * if he provided one
		 */
T
Tejun Heo 已提交
3417
		if (test && !test(p, data))
3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444
			continue;
		/*
		 * Only process tasks that started after the last task
		 * we processed
		 */
		if (!started_after_time(p, &latest_time, latest_task))
			continue;
		dropped = heap_insert(heap, p);
		if (dropped == NULL) {
			/*
			 * The new task was inserted; the heap wasn't
			 * previously full
			 */
			get_task_struct(p);
		} else if (dropped != p) {
			/*
			 * The new task was inserted, and pushed out a
			 * different task
			 */
			get_task_struct(p);
			put_task_struct(dropped);
		}
		/*
		 * Else the new task was newer than anything already in
		 * the heap and wasn't inserted
		 */
	}
3445
	cgroup_task_iter_end(&it);
3446 3447 3448

	if (heap->size) {
		for (i = 0; i < heap->size; i++) {
3449
			struct task_struct *q = heap->ptrs[i];
3450
			if (i == 0) {
3451 3452
				latest_time = q->start_time;
				latest_task = q;
3453 3454
			}
			/* Process the task per the caller's callback */
T
Tejun Heo 已提交
3455
			process(q, data);
3456
			put_task_struct(q);
3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
		}
		/*
		 * If we had to process any tasks at all, scan again
		 * in case some of them were in the middle of forking
		 * children that didn't get processed.
		 * Not the most efficient way to do it, but it avoids
		 * having to take callback_mutex in the fork path
		 */
		goto again;
	}
	if (heap == &tmp_heap)
		heap_free(&tmp_heap);
	return 0;
}

T
Tejun Heo 已提交
3472
static void cgroup_transfer_one_task(struct task_struct *task, void *data)
3473
{
T
Tejun Heo 已提交
3474
	struct cgroup *new_cgroup = data;
3475

T
Tejun Heo 已提交
3476
	mutex_lock(&cgroup_mutex);
3477
	cgroup_attach_task(new_cgroup, task, false);
T
Tejun Heo 已提交
3478
	mutex_unlock(&cgroup_mutex);
3479 3480 3481 3482 3483 3484 3485 3486 3487
}

/**
 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
 */
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
{
T
Tejun Heo 已提交
3488
	return cgroup_scan_tasks(from, NULL, cgroup_transfer_one_task, to, NULL);
3489 3490
}

3491
/*
3492
 * Stuff for reading the 'tasks'/'procs' files.
3493 3494 3495 3496 3497 3498 3499 3500
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529
/* which pidlist file are we talking about? */
enum cgroup_filetype {
	CGROUP_FILE_PROCS,
	CGROUP_FILE_TASKS,
};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {
	/*
	 * used to find which pidlist is wanted. doesn't change as long as
	 * this particular list stays in the list.
	*/
	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
	/* array of xids */
	pid_t *list;
	/* how many elements the above list has */
	int length;
	/* how many files are using the current array */
	int use_count;
	/* each of these stored in a list by its cgroup */
	struct list_head links;
	/* pointer to the cgroup we belong to, for list removal purposes */
	struct cgroup *owner;
	/* protects the other fields */
L
Li Zefan 已提交
3530
	struct rw_semaphore rwsem;
3531 3532
};

3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553
/*
 * The following two functions "fix" the issue where there are more pids
 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 * TODO: replace with a kernel-wide solution to this problem
 */
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(count * sizeof(pid_t));
	else
		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
}
static void pidlist_free(void *p)
{
	if (is_vmalloc_addr(p))
		vfree(p);
	else
		kfree(p);
}

3554
/*
3555
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3556
 * Returns the number of unique elements.
3557
 */
3558
static int pidlist_uniq(pid_t *list, int length)
3559
{
3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588
	int src, dest = 1;

	/*
	 * we presume the 0th element is unique, so i starts at 1. trivial
	 * edge cases first; no work needs to be done for either
	 */
	if (length == 0 || length == 1)
		return length;
	/* src and dest walk down the list; dest counts unique elements */
	for (src = 1; src < length; src++) {
		/* find next unique element */
		while (list[src] == list[src-1]) {
			src++;
			if (src == length)
				goto after;
		}
		/* dest always points to where the next unique element goes */
		list[dest] = list[src];
		dest++;
	}
after:
	return dest;
}

static int cmppid(const void *a, const void *b)
{
	return *(pid_t *)a - *(pid_t *)b;
}

3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599
/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{
	struct cgroup_pidlist *l;
	/* don't need task_nsproxy() if we're looking at ourself */
3600
	struct pid_namespace *ns = task_active_pid_ns(current);
3601

3602
	/*
L
Li Zefan 已提交
3603
	 * We can't drop the pidlist_mutex before taking the l->rwsem in case
3604 3605 3606 3607 3608 3609 3610 3611
	 * the last ref-holder is trying to remove l from the list at the same
	 * time. Holding the pidlist_mutex precludes somebody taking whichever
	 * list we find out from under us - compare release_pid_array().
	 */
	mutex_lock(&cgrp->pidlist_mutex);
	list_for_each_entry(l, &cgrp->pidlists, links) {
		if (l->key.type == type && l->key.ns == ns) {
			/* make sure l doesn't vanish out from under us */
L
Li Zefan 已提交
3612
			down_write(&l->rwsem);
3613 3614 3615 3616 3617
			mutex_unlock(&cgrp->pidlist_mutex);
			return l;
		}
	}
	/* entry not found; create a new one */
3618
	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
3619 3620 3621 3622
	if (!l) {
		mutex_unlock(&cgrp->pidlist_mutex);
		return l;
	}
L
Li Zefan 已提交
3623 3624
	init_rwsem(&l->rwsem);
	down_write(&l->rwsem);
3625
	l->key.type = type;
3626
	l->key.ns = get_pid_ns(ns);
3627 3628 3629 3630 3631 3632
	l->owner = cgrp;
	list_add(&l->links, &cgrp->pidlists);
	mutex_unlock(&cgrp->pidlist_mutex);
	return l;
}

3633 3634 3635
/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
3636 3637
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
3638 3639 3640 3641
{
	pid_t *array;
	int length;
	int pid, n = 0; /* used for populating the array */
3642
	struct cgroup_task_iter it;
3643
	struct task_struct *tsk;
3644 3645 3646 3647 3648 3649 3650 3651 3652
	struct cgroup_pidlist *l;

	/*
	 * If cgroup gets more users after we read count, we won't have
	 * enough space - tough.  This race is indistinguishable to the
	 * caller from the case that the additional cgroup users didn't
	 * show up until sometime later on.
	 */
	length = cgroup_task_count(cgrp);
3653
	array = pidlist_allocate(length);
3654 3655 3656
	if (!array)
		return -ENOMEM;
	/* now, populate the array */
3657
	cgroup_task_iter_start(cgrp, &it);
3658
	while ((tsk = cgroup_task_iter_next(&it))) {
3659
		if (unlikely(n == length))
3660
			break;
3661
		/* get tgid or pid for procs or tasks file respectively */
3662 3663 3664 3665
		if (type == CGROUP_FILE_PROCS)
			pid = task_tgid_vnr(tsk);
		else
			pid = task_pid_vnr(tsk);
3666 3667
		if (pid > 0) /* make sure to only use valid results */
			array[n++] = pid;
3668
	}
3669
	cgroup_task_iter_end(&it);
3670 3671 3672
	length = n;
	/* now sort & (if procs) strip out duplicates */
	sort(array, length, sizeof(pid_t), cmppid, NULL);
3673
	if (type == CGROUP_FILE_PROCS)
3674
		length = pidlist_uniq(array, length);
3675 3676
	l = cgroup_pidlist_find(cgrp, type);
	if (!l) {
3677
		pidlist_free(array);
3678
		return -ENOMEM;
3679
	}
3680
	/* store array, freeing old if necessary - lock already held */
3681
	pidlist_free(l->list);
3682 3683 3684
	l->list = array;
	l->length = length;
	l->use_count++;
L
Li Zefan 已提交
3685
	up_write(&l->rwsem);
3686
	*lp = l;
3687
	return 0;
3688 3689
}

B
Balbir Singh 已提交
3690
/**
L
Li Zefan 已提交
3691
 * cgroupstats_build - build and fill cgroupstats
B
Balbir Singh 已提交
3692 3693 3694
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
L
Li Zefan 已提交
3695 3696 3697
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
B
Balbir Singh 已提交
3698 3699 3700 3701
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
	int ret = -EINVAL;
3702
	struct cgroup *cgrp;
3703
	struct cgroup_task_iter it;
B
Balbir Singh 已提交
3704
	struct task_struct *tsk;
3705

B
Balbir Singh 已提交
3706
	/*
3707 3708
	 * Validate dentry by checking the superblock operations,
	 * and make sure it's a directory.
B
Balbir Singh 已提交
3709
	 */
3710 3711
	if (dentry->d_sb->s_op != &cgroup_ops ||
	    !S_ISDIR(dentry->d_inode->i_mode))
B
Balbir Singh 已提交
3712 3713 3714
		 goto err;

	ret = 0;
3715
	cgrp = dentry->d_fsdata;
B
Balbir Singh 已提交
3716

3717
	cgroup_task_iter_start(cgrp, &it);
3718
	while ((tsk = cgroup_task_iter_next(&it))) {
B
Balbir Singh 已提交
3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737
		switch (tsk->state) {
		case TASK_RUNNING:
			stats->nr_running++;
			break;
		case TASK_INTERRUPTIBLE:
			stats->nr_sleeping++;
			break;
		case TASK_UNINTERRUPTIBLE:
			stats->nr_uninterruptible++;
			break;
		case TASK_STOPPED:
			stats->nr_stopped++;
			break;
		default:
			if (delayacct_is_task_waiting_on_io(tsk))
				stats->nr_io_wait++;
			break;
		}
	}
3738
	cgroup_task_iter_end(&it);
B
Balbir Singh 已提交
3739 3740 3741 3742 3743

err:
	return ret;
}

3744

3745
/*
3746
 * seq_file methods for the tasks/procs files. The seq_file position is the
3747
 * next pid to display; the seq_file iterator is a pointer to the pid
3748
 * in the cgroup->l->list array.
3749
 */
3750

3751
static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
3752
{
3753 3754 3755 3756 3757 3758
	/*
	 * Initially we receive a position value that corresponds to
	 * one more than the last pid shown (or 0 on the first call or
	 * after a seek to the start). Use a binary-search to find the
	 * next pid to display, if any
	 */
3759
	struct cgroup_pidlist *l = s->private;
3760 3761 3762
	int index = 0, pid = *pos;
	int *iter;

L
Li Zefan 已提交
3763
	down_read(&l->rwsem);
3764
	if (pid) {
3765
		int end = l->length;
S
Stephen Rothwell 已提交
3766

3767 3768
		while (index < end) {
			int mid = (index + end) / 2;
3769
			if (l->list[mid] == pid) {
3770 3771
				index = mid;
				break;
3772
			} else if (l->list[mid] <= pid)
3773 3774 3775 3776 3777 3778
				index = mid + 1;
			else
				end = mid;
		}
	}
	/* If we're off the end of the array, we're done */
3779
	if (index >= l->length)
3780 3781
		return NULL;
	/* Update the abstract position to be the actual pid that we found */
3782
	iter = l->list + index;
3783 3784 3785 3786
	*pos = *iter;
	return iter;
}

3787
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
3788
{
3789
	struct cgroup_pidlist *l = s->private;
L
Li Zefan 已提交
3790
	up_read(&l->rwsem);
3791 3792
}

3793
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
3794
{
3795 3796 3797
	struct cgroup_pidlist *l = s->private;
	pid_t *p = v;
	pid_t *end = l->list + l->length;
3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810
	/*
	 * Advance to the next pid in the array. If this goes off the
	 * end, we're done
	 */
	p++;
	if (p >= end) {
		return NULL;
	} else {
		*pos = *p;
		return p;
	}
}

3811
static int cgroup_pidlist_show(struct seq_file *s, void *v)
3812 3813 3814
{
	return seq_printf(s, "%d\n", *(int *)v);
}
3815

3816 3817 3818 3819 3820 3821 3822 3823 3824
/*
 * seq_operations functions for iterating on pidlists through seq_file -
 * independent of whether it's tasks or procs
 */
static const struct seq_operations cgroup_pidlist_seq_operations = {
	.start = cgroup_pidlist_start,
	.stop = cgroup_pidlist_stop,
	.next = cgroup_pidlist_next,
	.show = cgroup_pidlist_show,
3825 3826
};

3827
static void cgroup_release_pid_array(struct cgroup_pidlist *l)
3828
{
3829 3830 3831 3832 3833 3834 3835
	/*
	 * the case where we're the last user of this particular pidlist will
	 * have us remove it from the cgroup's list, which entails taking the
	 * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
	 * pidlist_mutex, we have to take pidlist_mutex first.
	 */
	mutex_lock(&l->owner->pidlist_mutex);
L
Li Zefan 已提交
3836
	down_write(&l->rwsem);
3837 3838
	BUG_ON(!l->use_count);
	if (!--l->use_count) {
3839 3840 3841
		/* we're the last user if refcount is 0; remove and free */
		list_del(&l->links);
		mutex_unlock(&l->owner->pidlist_mutex);
3842
		pidlist_free(l->list);
3843
		put_pid_ns(l->key.ns);
L
Li Zefan 已提交
3844
		up_write(&l->rwsem);
3845 3846
		kfree(l);
		return;
3847
	}
3848
	mutex_unlock(&l->owner->pidlist_mutex);
L
Li Zefan 已提交
3849
	up_write(&l->rwsem);
3850 3851
}

3852
static int cgroup_pidlist_release(struct inode *inode, struct file *file)
3853
{
3854
	struct cgroup_pidlist *l;
3855 3856
	if (!(file->f_mode & FMODE_READ))
		return 0;
3857 3858 3859 3860 3861 3862
	/*
	 * the seq_file will only be initialized if the file was opened for
	 * reading; hence we check if it's not null only in that case.
	 */
	l = ((struct seq_file *)file->private_data)->private;
	cgroup_release_pid_array(l);
3863 3864 3865
	return seq_release(inode, file);
}

3866
static const struct file_operations cgroup_pidlist_operations = {
3867 3868 3869
	.read = seq_read,
	.llseek = seq_lseek,
	.write = cgroup_file_write,
3870
	.release = cgroup_pidlist_release,
3871 3872
};

3873
/*
3874 3875 3876
 * The following functions handle opens on a file that displays a pidlist
 * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
 * in the cgroup.
3877
 */
3878
/* helper function for the two below it */
3879
static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
3880
{
3881
	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
3882
	struct cgroup_pidlist *l;
3883
	int retval;
3884

3885
	/* Nothing to do for write-only files */
3886 3887 3888
	if (!(file->f_mode & FMODE_READ))
		return 0;

3889
	/* have the array populated */
3890
	retval = pidlist_array_load(cgrp, type, &l);
3891 3892 3893 3894
	if (retval)
		return retval;
	/* configure file information */
	file->f_op = &cgroup_pidlist_operations;
3895

3896
	retval = seq_open(file, &cgroup_pidlist_seq_operations);
3897
	if (retval) {
3898
		cgroup_release_pid_array(l);
3899
		return retval;
3900
	}
3901
	((struct seq_file *)file->private_data)->private = l;
3902 3903
	return 0;
}
3904 3905
static int cgroup_tasks_open(struct inode *unused, struct file *file)
{
3906
	return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
3907 3908 3909
}
static int cgroup_procs_open(struct inode *unused, struct file *file)
{
3910
	return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
3911
}
3912

3913 3914
static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
3915
{
3916
	return notify_on_release(css->cgroup);
3917 3918
}

3919 3920
static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
3921
{
3922
	clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
3923
	if (val)
3924
		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
3925
	else
3926
		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
3927 3928 3929
	return 0;
}

3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946
/*
 * When dput() is called asynchronously, if umount has been done and
 * then deactivate_super() in cgroup_free_fn() kills the superblock,
 * there's a small window that vfs will see the root dentry with non-zero
 * refcnt and trigger BUG().
 *
 * That's why we hold a reference before dput() and drop it right after.
 */
static void cgroup_dput(struct cgroup *cgrp)
{
	struct super_block *sb = cgrp->root->sb;

	atomic_inc(&sb->s_active);
	dput(cgrp->dentry);
	deactivate_super(sb);
}

3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957
/*
 * Unregister event and free resources.
 *
 * Gets called from workqueue.
 */
static void cgroup_event_remove(struct work_struct *work)
{
	struct cgroup_event *event = container_of(work, struct cgroup_event,
			remove);
	struct cgroup *cgrp = event->cgrp;

3958 3959
	remove_wait_queue(event->wqh, &event->wait);

3960 3961
	event->cft->unregister_event(cgrp, event->cft, event->eventfd);

3962 3963 3964
	/* Notify userspace the event is going away. */
	eventfd_signal(event->eventfd, 1);

3965 3966
	eventfd_ctx_put(event->eventfd);
	kfree(event);
3967
	cgroup_dput(cgrp);
3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984
}

/*
 * Gets called on POLLHUP on eventfd when user closes it.
 *
 * Called with wqh->lock held and interrupts disabled.
 */
static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
		int sync, void *key)
{
	struct cgroup_event *event = container_of(wait,
			struct cgroup_event, wait);
	struct cgroup *cgrp = event->cgrp;
	unsigned long flags = (unsigned long)key;

	if (flags & POLLHUP) {
		/*
3985 3986 3987 3988 3989 3990 3991
		 * If the event has been detached at cgroup removal, we
		 * can simply return knowing the other side will cleanup
		 * for us.
		 *
		 * We can't race against event freeing since the other
		 * side will require wqh->lock via remove_wait_queue(),
		 * which we hold.
3992
		 */
3993 3994 3995 3996 3997 3998 3999 4000 4001 4002
		spin_lock(&cgrp->event_list_lock);
		if (!list_empty(&event->list)) {
			list_del_init(&event->list);
			/*
			 * We are in atomic context, but cgroup_event_remove()
			 * may sleep, so we have to call it in workqueue.
			 */
			schedule_work(&event->remove);
		}
		spin_unlock(&cgrp->event_list_lock);
4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023
	}

	return 0;
}

static void cgroup_event_ptable_queue_proc(struct file *file,
		wait_queue_head_t *wqh, poll_table *pt)
{
	struct cgroup_event *event = container_of(pt,
			struct cgroup_event, pt);

	event->wqh = wqh;
	add_wait_queue(wqh, &event->wait);
}

/*
 * Parse input and register new cgroup event handler.
 *
 * Input must be in format '<event_fd> <control_fd> <args>'.
 * Interpretation of args is defined by control file implementation.
 */
4024 4025
static int cgroup_write_event_control(struct cgroup_subsys_state *css,
				      struct cftype *cft, const char *buffer)
4026
{
4027
	struct cgroup *cgrp = css->cgroup;
4028
	struct cgroup_event *event;
4029
	struct cgroup *cgrp_cfile;
4030
	unsigned int efd, cfd;
4031 4032
	struct file *efile;
	struct file *cfile;
4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057
	char *endp;
	int ret;

	efd = simple_strtoul(buffer, &endp, 10);
	if (*endp != ' ')
		return -EINVAL;
	buffer = endp + 1;

	cfd = simple_strtoul(buffer, &endp, 10);
	if ((*endp != ' ') && (*endp != '\0'))
		return -EINVAL;
	buffer = endp + 1;

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;
	event->cgrp = cgrp;
	INIT_LIST_HEAD(&event->list);
	init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
	init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
	INIT_WORK(&event->remove, cgroup_event_remove);

	efile = eventfd_fget(efd);
	if (IS_ERR(efile)) {
		ret = PTR_ERR(efile);
4058
		goto out_kfree;
4059 4060 4061 4062 4063
	}

	event->eventfd = eventfd_ctx_fileget(efile);
	if (IS_ERR(event->eventfd)) {
		ret = PTR_ERR(event->eventfd);
4064
		goto out_put_efile;
4065 4066 4067 4068 4069
	}

	cfile = fget(cfd);
	if (!cfile) {
		ret = -EBADF;
4070
		goto out_put_eventfd;
4071 4072 4073
	}

	/* the process need read permission on control file */
A
Al Viro 已提交
4074
	/* AV: shouldn't we check that it's been opened for read instead? */
A
Al Viro 已提交
4075
	ret = inode_permission(file_inode(cfile), MAY_READ);
4076
	if (ret < 0)
4077
		goto out_put_cfile;
4078 4079 4080 4081

	event->cft = __file_cft(cfile);
	if (IS_ERR(event->cft)) {
		ret = PTR_ERR(event->cft);
4082
		goto out_put_cfile;
4083 4084
	}

4085 4086 4087 4088 4089 4090 4091
	/*
	 * The file to be monitored must be in the same cgroup as
	 * cgroup.event_control is.
	 */
	cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
	if (cgrp_cfile != cgrp) {
		ret = -EINVAL;
4092
		goto out_put_cfile;
4093 4094
	}

4095 4096
	if (!event->cft->register_event || !event->cft->unregister_event) {
		ret = -EINVAL;
4097
		goto out_put_cfile;
4098 4099 4100 4101 4102
	}

	ret = event->cft->register_event(cgrp, event->cft,
			event->eventfd, buffer);
	if (ret)
4103
		goto out_put_cfile;
4104

4105
	efile->f_op->poll(efile, &event->pt);
4106

4107 4108 4109 4110 4111 4112 4113
	/*
	 * Events should be removed after rmdir of cgroup directory, but before
	 * destroying subsystem state objects. Let's take reference to cgroup
	 * directory dentry to do that.
	 */
	dget(cgrp->dentry);

4114 4115 4116 4117 4118 4119 4120 4121 4122
	spin_lock(&cgrp->event_list_lock);
	list_add(&event->list, &cgrp->event_list);
	spin_unlock(&cgrp->event_list_lock);

	fput(cfile);
	fput(efile);

	return 0;

4123 4124 4125 4126 4127 4128 4129
out_put_cfile:
	fput(cfile);
out_put_eventfd:
	eventfd_ctx_put(event->eventfd);
out_put_efile:
	fput(efile);
out_kfree:
4130 4131 4132 4133 4134
	kfree(event);

	return ret;
}

4135 4136
static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
4137
{
4138
	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4139 4140
}

4141 4142
static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
4143 4144
{
	if (val)
4145
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4146
	else
4147
		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4148 4149 4150
	return 0;
}

4151
static struct cftype cgroup_base_files[] = {
4152
	{
4153
		.name = "cgroup.procs",
4154
		.open = cgroup_procs_open,
B
Ben Blum 已提交
4155
		.write_u64 = cgroup_procs_write,
4156
		.release = cgroup_pidlist_release,
B
Ben Blum 已提交
4157
		.mode = S_IRUGO | S_IWUSR,
4158
	},
4159
	{
4160
		.name = "cgroup.event_control",
4161 4162 4163
		.write_string = cgroup_write_event_control,
		.mode = S_IWUGO,
	},
4164 4165
	{
		.name = "cgroup.clone_children",
4166
		.flags = CFTYPE_INSANE,
4167 4168 4169
		.read_u64 = cgroup_clone_children_read,
		.write_u64 = cgroup_clone_children_write,
	},
4170 4171 4172 4173 4174
	{
		.name = "cgroup.sane_behavior",
		.flags = CFTYPE_ONLY_ON_ROOT,
		.read_seq_string = cgroup_sane_behavior_show,
	},
4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194

	/*
	 * Historical crazy stuff.  These don't have "cgroup."  prefix and
	 * don't exist if sane_behavior.  If you're depending on these, be
	 * prepared to be burned.
	 */
	{
		.name = "tasks",
		.flags = CFTYPE_INSANE,		/* use "procs" instead */
		.open = cgroup_tasks_open,
		.write_u64 = cgroup_tasks_write,
		.release = cgroup_pidlist_release,
		.mode = S_IRUGO | S_IWUSR,
	},
	{
		.name = "notify_on_release",
		.flags = CFTYPE_INSANE,
		.read_u64 = cgroup_read_notify_on_release,
		.write_u64 = cgroup_write_notify_on_release,
	},
4195 4196
	{
		.name = "release_agent",
4197
		.flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
4198 4199 4200 4201
		.read_seq_string = cgroup_release_agent_show,
		.write_string = cgroup_release_agent_write,
		.max_write_len = PATH_MAX,
	},
T
Tejun Heo 已提交
4202
	{ }	/* terminate */
4203 4204
};

4205
/**
4206
 * cgroup_populate_dir - create subsys files in a cgroup directory
4207 4208
 * @cgrp: target cgroup
 * @subsys_mask: mask of the subsystem ids whose files should be added
4209 4210
 *
 * On failure, no file is added.
4211
 */
4212
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
4213 4214
{
	struct cgroup_subsys *ss;
4215
	int i, ret = 0;
4216

4217
	/* process cftsets of each subsystem */
4218
	for_each_subsys(ss, i) {
4219
		struct cftype_set *set;
4220 4221

		if (!test_bit(i, &subsys_mask))
4222
			continue;
4223

4224
		list_for_each_entry(set, &ss->cftsets, node) {
4225
			ret = cgroup_addrm_files(cgrp, set->cfts, true);
4226 4227 4228
			if (ret < 0)
				goto err;
		}
4229
	}
4230

K
KAMEZAWA Hiroyuki 已提交
4231
	/* This cgroup is ready now */
4232
	for_each_root_subsys(cgrp->root, ss) {
K
KAMEZAWA Hiroyuki 已提交
4233
		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
4234 4235
		struct css_id *id = rcu_dereference_protected(css->id, true);

K
KAMEZAWA Hiroyuki 已提交
4236 4237 4238 4239 4240
		/*
		 * Update id->css pointer and make this css visible from
		 * CSS ID functions. This pointer will be dereferened
		 * from RCU-read-side without locks.
		 */
4241 4242
		if (id)
			rcu_assign_pointer(id->css, css);
K
KAMEZAWA Hiroyuki 已提交
4243
	}
4244 4245

	return 0;
4246 4247 4248
err:
	cgroup_clear_dir(cgrp, subsys_mask);
	return ret;
4249 4250
}

4251 4252 4253 4254 4255
static void css_dput_fn(struct work_struct *work)
{
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, dput_work);

4256
	cgroup_dput(css->cgroup);
4257 4258
}

4259 4260 4261 4262 4263 4264 4265 4266
static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

	schedule_work(&css->dput_work);
}

4267 4268
static void init_cgroup_css(struct cgroup_subsys_state *css,
			       struct cgroup_subsys *ss,
4269
			       struct cgroup *cgrp)
4270
{
4271
	css->cgroup = cgrp;
4272
	css->ss = ss;
4273
	css->flags = 0;
K
KAMEZAWA Hiroyuki 已提交
4274
	css->id = NULL;
4275
	if (cgrp == cgroup_dummy_top)
4276
		css->flags |= CSS_ROOT;
4277 4278
	BUG_ON(cgrp->subsys[ss->subsys_id]);
	cgrp->subsys[ss->subsys_id] = css;
4279 4280

	/*
4281 4282 4283 4284
	 * css holds an extra ref to @cgrp->dentry which is put on the last
	 * css_put().  dput() requires process context, which css_put() may
	 * be called without.  @css->dput_work will be used to invoke
	 * dput() asynchronously from css_put().
4285 4286
	 */
	INIT_WORK(&css->dput_work, css_dput_fn);
4287 4288
}

4289
/* invoke ->css_online() on a new CSS and mark it online if successful */
T
Tejun Heo 已提交
4290
static int online_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
4291
{
4292
	struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
T
Tejun Heo 已提交
4293 4294
	int ret = 0;

4295 4296
	lockdep_assert_held(&cgroup_mutex);

4297
	if (ss->css_online)
4298
		ret = ss->css_online(css);
T
Tejun Heo 已提交
4299
	if (!ret)
4300
		css->flags |= CSS_ONLINE;
T
Tejun Heo 已提交
4301
	return ret;
4302 4303
}

4304
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4305 4306 4307 4308 4309 4310 4311 4312 4313
static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
	struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];

	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

4314
	if (ss->css_offline)
4315
		ss->css_offline(css);
4316

4317
	css->flags &= ~CSS_ONLINE;
4318 4319
}

4320
/*
L
Li Zefan 已提交
4321 4322 4323 4324
 * cgroup_create - create a cgroup
 * @parent: cgroup that will be parent of the new cgroup
 * @dentry: dentry of the new cgroup
 * @mode: mode to set on new inode
4325
 *
L
Li Zefan 已提交
4326
 * Must be called with the mutex on the parent inode held
4327 4328
 */
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
A
Al Viro 已提交
4329
			     umode_t mode)
4330
{
4331
	struct cgroup *cgrp;
4332
	struct cgroup_name *name;
4333 4334 4335 4336 4337
	struct cgroupfs_root *root = parent->root;
	int err = 0;
	struct cgroup_subsys *ss;
	struct super_block *sb = root->sb;

T
Tejun Heo 已提交
4338
	/* allocate the cgroup and its ID, 0 is reserved for the root */
4339 4340
	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
	if (!cgrp)
4341 4342
		return -ENOMEM;

4343 4344 4345 4346 4347
	name = cgroup_alloc_name(dentry);
	if (!name)
		goto err_free_cgrp;
	rcu_assign_pointer(cgrp->name, name);

4348 4349 4350 4351 4352
	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
	cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
T
Tejun Heo 已提交
4353
	if (cgrp->id < 0)
4354
		goto err_free_name;
T
Tejun Heo 已提交
4355

4356 4357 4358 4359 4360 4361 4362 4363 4364
	/*
	 * Only live parents can have children.  Note that the liveliness
	 * check isn't strictly necessary because cgroup_mkdir() and
	 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
	 * anyway so that locking is contained inside cgroup proper and we
	 * don't get nasty surprises if we ever grow another caller.
	 */
	if (!cgroup_lock_live_group(parent)) {
		err = -ENODEV;
T
Tejun Heo 已提交
4365
		goto err_free_id;
4366 4367
	}

4368 4369 4370 4371 4372 4373 4374
	/* Grab a reference on the superblock so the hierarchy doesn't
	 * get deleted on unmount if there are child cgroups.  This
	 * can be done outside cgroup_mutex, since the sb can't
	 * disappear while someone has an open control file on the
	 * fs */
	atomic_inc(&sb->s_active);

4375
	init_cgroup_housekeeping(cgrp);
4376

4377 4378 4379
	dentry->d_fsdata = cgrp;
	cgrp->dentry = dentry;

4380 4381
	cgrp->parent = parent;
	cgrp->root = parent->root;
4382

4383 4384 4385
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

4386 4387
	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4388

4389
	for_each_root_subsys(root, ss) {
4390
		struct cgroup_subsys_state *css;
4391

4392
		css = ss->css_alloc(parent->subsys[ss->subsys_id]);
4393 4394
		if (IS_ERR(css)) {
			err = PTR_ERR(css);
4395
			goto err_free_all;
4396
		}
4397 4398

		err = percpu_ref_init(&css->refcnt, css_release);
4399
		if (err) {
4400
			ss->css_free(css);
4401
			goto err_free_all;
4402
		}
4403

4404
		init_cgroup_css(css, ss, cgrp);
4405

4406 4407 4408
		if (ss->use_id) {
			err = alloc_css_id(ss, parent, cgrp);
			if (err)
4409
				goto err_free_all;
4410
		}
4411 4412
	}

4413 4414 4415 4416 4417
	/*
	 * Create directory.  cgroup_create_file() returns with the new
	 * directory locked on success so that it can be populated without
	 * dropping cgroup_mutex.
	 */
T
Tejun Heo 已提交
4418
	err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
4419
	if (err < 0)
4420
		goto err_free_all;
4421
	lockdep_assert_held(&dentry->d_inode->i_mutex);
4422

4423
	cgrp->serial_nr = cgroup_serial_nr_next++;
4424

4425 4426 4427
	/* allocation complete, commit to creation */
	list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
	root->number_of_cgroups++;
T
Tejun Heo 已提交
4428

T
Tejun Heo 已提交
4429
	/* each css holds a ref to the cgroup's dentry */
4430
	for_each_root_subsys(root, ss)
4431
		dget(dentry);
4432

4433 4434 4435
	/* hold a ref to the parent's dentry */
	dget(parent->dentry);

T
Tejun Heo 已提交
4436
	/* creation succeeded, notify subsystems */
4437
	for_each_root_subsys(root, ss) {
T
Tejun Heo 已提交
4438 4439 4440
		err = online_css(ss, cgrp);
		if (err)
			goto err_destroy;
4441 4442 4443 4444 4445 4446 4447 4448 4449

		if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
		    parent->parent) {
			pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
				   current->comm, current->pid, ss->name);
			if (!strcmp(ss->name, "memory"))
				pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
			ss->warned_broken_hierarchy = true;
		}
4450 4451
	}

4452 4453
	idr_replace(&root->cgroup_idr, cgrp, cgrp->id);

4454
	err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
4455 4456 4457 4458
	if (err)
		goto err_destroy;

	err = cgroup_populate_dir(cgrp, root->subsys_mask);
4459 4460
	if (err)
		goto err_destroy;
4461 4462

	mutex_unlock(&cgroup_mutex);
4463
	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
4464 4465 4466

	return 0;

4467
err_free_all:
4468
	for_each_root_subsys(root, ss) {
4469 4470 4471 4472
		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];

		if (css) {
			percpu_ref_cancel_init(&css->refcnt);
4473
			ss->css_free(css);
4474
		}
4475 4476 4477 4478
	}
	mutex_unlock(&cgroup_mutex);
	/* Release the reference count that we took on the superblock */
	deactivate_super(sb);
T
Tejun Heo 已提交
4479
err_free_id:
4480
	idr_remove(&root->cgroup_idr, cgrp->id);
4481 4482
err_free_name:
	kfree(rcu_dereference_raw(cgrp->name));
4483
err_free_cgrp:
4484
	kfree(cgrp);
4485
	return err;
4486 4487 4488 4489 4490 4491

err_destroy:
	cgroup_destroy_locked(cgrp);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&dentry->d_inode->i_mutex);
	return err;
4492 4493
}

4494
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
4495 4496 4497 4498 4499 4500 4501
{
	struct cgroup *c_parent = dentry->d_parent->d_fsdata;

	/* the vfs holds inode->i_mutex already */
	return cgroup_create(c_parent, dentry, mode | S_IFDIR);
}

4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543
static void cgroup_css_killed(struct cgroup *cgrp)
{
	if (!atomic_dec_and_test(&cgrp->css_kill_cnt))
		return;

	/* percpu ref's of all css's are killed, kick off the next step */
	INIT_WORK(&cgrp->destroy_work, cgroup_offline_fn);
	schedule_work(&cgrp->destroy_work);
}

static void css_ref_killed_fn(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

	cgroup_css_killed(css->cgroup);
}

/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
 * guarantee that css_tryget() won't succeed by the time ->css_offline() is
 * invoked.  To satisfy all the requirements, destruction is implemented in
 * the following two steps.
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
4544 4545
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4546
{
4547
	struct dentry *d = cgrp->dentry;
4548
	struct cgroup_event *event, *tmp;
4549
	struct cgroup_subsys *ss;
4550
	bool empty;
4551

4552 4553 4554
	lockdep_assert_held(&d->d_inode->i_mutex);
	lockdep_assert_held(&cgroup_mutex);

4555
	/*
T
Tejun Heo 已提交
4556 4557
	 * css_set_lock synchronizes access to ->cset_links and prevents
	 * @cgrp from being removed while __put_css_set() is in progress.
4558 4559
	 */
	read_lock(&css_set_lock);
T
Tejun Heo 已提交
4560
	empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children);
4561 4562
	read_unlock(&css_set_lock);
	if (!empty)
4563
		return -EBUSY;
L
Li Zefan 已提交
4564

4565
	/*
4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578
	 * Block new css_tryget() by killing css refcnts.  cgroup core
	 * guarantees that, by the time ->css_offline() is invoked, no new
	 * css reference will be given out via css_tryget().  We can't
	 * simply call percpu_ref_kill() and proceed to offlining css's
	 * because percpu_ref_kill() doesn't guarantee that the ref is seen
	 * as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.  The
	 * notification callback keeps track of the number of css's to be
	 * killed and schedules cgroup_offline_fn() to perform the rest of
	 * destruction once the percpu refs of all css's are confirmed to
	 * be killed.
4579
	 */
4580
	atomic_set(&cgrp->css_kill_cnt, 1);
4581
	for_each_root_subsys(cgrp->root, ss) {
4582
		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
4583

4584 4585 4586 4587 4588 4589 4590 4591
		/*
		 * Killing would put the base ref, but we need to keep it
		 * alive until after ->css_offline.
		 */
		percpu_ref_get(&css->refcnt);

		atomic_inc(&cgrp->css_kill_cnt);
		percpu_ref_kill_and_confirm(&css->refcnt, css_ref_killed_fn);
4592
	}
4593
	cgroup_css_killed(cgrp);
4594 4595 4596 4597

	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
	 * creation by disabling cgroup_lock_live_group().  Note that
4598
	 * CGRP_DEAD assertion is depended upon by css_next_child() to
4599
	 * resume iteration after dropping RCU read lock.  See
4600
	 * css_next_child() for details.
4601
	 */
4602
	set_bit(CGRP_DEAD, &cgrp->flags);
4603

4604 4605 4606 4607 4608 4609 4610
	/* CGRP_DEAD is set, remove from ->release_list for the last time */
	raw_spin_lock(&release_list_lock);
	if (!list_empty(&cgrp->release_list))
		list_del_init(&cgrp->release_list);
	raw_spin_unlock(&release_list_lock);

	/*
4611 4612
	 * Clear and remove @cgrp directory.  The removal puts the base ref
	 * but we aren't quite done with @cgrp yet, so hold onto it.
4613
	 */
4614
	cgroup_clear_dir(cgrp, cgrp->root->subsys_mask);
4615
	cgroup_addrm_files(cgrp, cgroup_base_files, false);
4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630
	dget(d);
	cgroup_d_remove_dir(d);

	/*
	 * Unregister events and notify userspace.
	 * Notify userspace about cgroup removing only after rmdir of cgroup
	 * directory to avoid race between userspace and kernelspace.
	 */
	spin_lock(&cgrp->event_list_lock);
	list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
		list_del_init(&event->list);
		schedule_work(&event->remove);
	}
	spin_unlock(&cgrp->event_list_lock);

4631 4632 4633
	return 0;
};

4634 4635 4636 4637 4638 4639 4640 4641 4642 4643
/**
 * cgroup_offline_fn - the second step of cgroup destruction
 * @work: cgroup->destroy_free_work
 *
 * This function is invoked from a work item for a cgroup which is being
 * destroyed after the percpu refcnts of all css's are guaranteed to be
 * seen as killed on all CPUs, and performs the rest of destruction.  This
 * is the second step of destruction described in the comment above
 * cgroup_destroy_locked().
 */
4644 4645 4646 4647 4648 4649 4650 4651 4652
static void cgroup_offline_fn(struct work_struct *work)
{
	struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
	struct cgroup *parent = cgrp->parent;
	struct dentry *d = cgrp->dentry;
	struct cgroup_subsys *ss;

	mutex_lock(&cgroup_mutex);

4653 4654 4655 4656
	/*
	 * css_tryget() is guaranteed to fail now.  Tell subsystems to
	 * initate destruction.
	 */
4657
	for_each_root_subsys(cgrp->root, ss)
4658
		offline_css(ss, cgrp);
4659 4660

	/*
4661 4662 4663 4664 4665
	 * Put the css refs from cgroup_destroy_locked().  Each css holds
	 * an extra reference to the cgroup's dentry and cgroup removal
	 * proceeds regardless of css refs.  On the last put of each css,
	 * whenever that may be, the extra dentry ref is put so that dentry
	 * destruction happens only after all css's are released.
4666
	 */
4667
	for_each_root_subsys(cgrp->root, ss)
T
Tejun Heo 已提交
4668
		css_put(cgrp->subsys[ss->subsys_id]);
4669

4670
	/* delete this cgroup from parent->children */
4671
	list_del_rcu(&cgrp->sibling);
4672

4673 4674 4675 4676 4677 4678 4679 4680
	/*
	 * We should remove the cgroup object from idr before its grace
	 * period starts, so we won't be looking up a cgroup while the
	 * cgroup is being freed.
	 */
	idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
	cgrp->id = -1;

4681 4682
	dput(d);

4683
	set_bit(CGRP_RELEASABLE, &parent->flags);
4684 4685
	check_for_release(parent);

4686
	mutex_unlock(&cgroup_mutex);
4687 4688
}

4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
{
	int ret;

	mutex_lock(&cgroup_mutex);
	ret = cgroup_destroy_locked(dentry->d_fsdata);
	mutex_unlock(&cgroup_mutex);

	return ret;
}

4700 4701 4702 4703 4704 4705 4706 4707 4708
static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss)
{
	INIT_LIST_HEAD(&ss->cftsets);

	/*
	 * base_cftset is embedded in subsys itself, no need to worry about
	 * deregistration.
	 */
	if (ss->base_cftypes) {
4709 4710 4711 4712 4713
		struct cftype *cft;

		for (cft = ss->base_cftypes; cft->name[0] != '\0'; cft++)
			cft->ss = ss;

4714 4715 4716 4717 4718
		ss->base_cftset.cfts = ss->base_cftypes;
		list_add_tail(&ss->base_cftset.node, &ss->cftsets);
	}
}

4719
static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
4720 4721
{
	struct cgroup_subsys_state *css;
D
Diego Calleja 已提交
4722 4723

	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
4724

4725 4726
	mutex_lock(&cgroup_mutex);

4727 4728 4729
	/* init base cftset */
	cgroup_init_cftsets(ss);

4730
	/* Create the top cgroup state for this subsystem */
4731 4732
	list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
	ss->root = &cgroup_dummy_root;
4733
	css = ss->css_alloc(cgroup_dummy_top->subsys[ss->subsys_id]);
4734 4735
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
4736
	init_cgroup_css(css, ss, cgroup_dummy_top);
4737

L
Li Zefan 已提交
4738
	/* Update the init_css_set to contain a subsys
4739
	 * pointer to this state - since the subsystem is
L
Li Zefan 已提交
4740 4741
	 * newly registered, all tasks and hence the
	 * init_css_set is in the subsystem's top cgroup. */
4742
	init_css_set.subsys[ss->subsys_id] = css;
4743 4744 4745

	need_forkexit_callback |= ss->fork || ss->exit;

L
Li Zefan 已提交
4746 4747 4748 4749 4750
	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

4751
	BUG_ON(online_css(ss, cgroup_dummy_top));
4752

4753 4754
	mutex_unlock(&cgroup_mutex);

4755 4756 4757 4758 4759 4760 4761 4762 4763 4764
	/* this function shouldn't be used with modular subsystems, since they
	 * need to register a subsys_id, among other things */
	BUG_ON(ss->module);
}

/**
 * cgroup_load_subsys: load and register a modular subsystem at runtime
 * @ss: the subsystem to load
 *
 * This function should be called in a modular subsystem's initcall. If the
T
Thomas Weber 已提交
4765
 * subsystem is built as a module, it will be assigned a new subsys_id and set
4766 4767 4768 4769 4770 4771
 * up for use. If the subsystem is built-in anyway, work is delegated to the
 * simpler cgroup_init_subsys.
 */
int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
{
	struct cgroup_subsys_state *css;
4772
	int i, ret;
4773
	struct hlist_node *tmp;
4774
	struct css_set *cset;
4775
	unsigned long key;
4776 4777 4778

	/* check name and function validity */
	if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
4779
	    ss->css_alloc == NULL || ss->css_free == NULL)
4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795
		return -EINVAL;

	/*
	 * we don't support callbacks in modular subsystems. this check is
	 * before the ss->module check for consistency; a subsystem that could
	 * be a module should still have no callbacks even if the user isn't
	 * compiling it as one.
	 */
	if (ss->fork || ss->exit)
		return -EINVAL;

	/*
	 * an optionally modular subsystem is built-in: we want to do nothing,
	 * since cgroup_init_subsys will have already taken care of it.
	 */
	if (ss->module == NULL) {
4796
		/* a sanity check */
4797
		BUG_ON(cgroup_subsys[ss->subsys_id] != ss);
4798 4799 4800
		return 0;
	}

4801 4802 4803
	/* init base cftset */
	cgroup_init_cftsets(ss);

4804
	mutex_lock(&cgroup_mutex);
4805
	cgroup_subsys[ss->subsys_id] = ss;
4806 4807

	/*
4808
	 * no ss->css_alloc seems to need anything important in the ss
4809
	 * struct, so this can happen first (i.e. before the dummy root
4810
	 * attachment).
4811
	 */
4812
	css = ss->css_alloc(cgroup_dummy_top->subsys[ss->subsys_id]);
4813
	if (IS_ERR(css)) {
4814 4815
		/* failure case - need to deassign the cgroup_subsys[] slot. */
		cgroup_subsys[ss->subsys_id] = NULL;
4816 4817 4818 4819
		mutex_unlock(&cgroup_mutex);
		return PTR_ERR(css);
	}

4820 4821
	list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
	ss->root = &cgroup_dummy_root;
4822 4823

	/* our new subsystem will be attached to the dummy hierarchy. */
4824
	init_cgroup_css(css, ss, cgroup_dummy_top);
4825 4826
	/* init_idr must be after init_cgroup_css because it sets css->id. */
	if (ss->use_id) {
4827 4828 4829
		ret = cgroup_init_idr(ss, css);
		if (ret)
			goto err_unload;
4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840
	}

	/*
	 * Now we need to entangle the css into the existing css_sets. unlike
	 * in cgroup_init_subsys, there are now multiple css_sets, so each one
	 * will need a new pointer to it; done by iterating the css_set_table.
	 * furthermore, modifying the existing css_sets will corrupt the hash
	 * table state, so each changed css_set will need its hash recomputed.
	 * this is all done under the css_set_lock.
	 */
	write_lock(&css_set_lock);
4841
	hash_for_each_safe(css_set_table, i, tmp, cset, hlist) {
4842
		/* skip entries that we already rehashed */
4843
		if (cset->subsys[ss->subsys_id])
4844 4845
			continue;
		/* remove existing entry */
4846
		hash_del(&cset->hlist);
4847
		/* set new value */
4848
		cset->subsys[ss->subsys_id] = css;
4849
		/* recompute hash and restore entry */
4850 4851
		key = css_set_hash(cset->subsys);
		hash_add(css_set_table, &cset->hlist, key);
4852 4853 4854
	}
	write_unlock(&css_set_lock);

4855
	ret = online_css(ss, cgroup_dummy_top);
T
Tejun Heo 已提交
4856 4857
	if (ret)
		goto err_unload;
4858

4859 4860 4861
	/* success! */
	mutex_unlock(&cgroup_mutex);
	return 0;
4862 4863 4864 4865 4866 4867

err_unload:
	mutex_unlock(&cgroup_mutex);
	/* @ss can't be mounted here as try_module_get() would fail */
	cgroup_unload_subsys(ss);
	return ret;
4868
}
4869
EXPORT_SYMBOL_GPL(cgroup_load_subsys);
4870

B
Ben Blum 已提交
4871 4872 4873 4874 4875 4876 4877 4878 4879 4880
/**
 * cgroup_unload_subsys: unload a modular subsystem
 * @ss: the subsystem to unload
 *
 * This function should be called in a modular subsystem's exitcall. When this
 * function is invoked, the refcount on the subsystem's module will be 0, so
 * the subsystem will not be attached to any hierarchy.
 */
void cgroup_unload_subsys(struct cgroup_subsys *ss)
{
4881
	struct cgrp_cset_link *link;
B
Ben Blum 已提交
4882 4883 4884 4885 4886

	BUG_ON(ss->module == NULL);

	/*
	 * we shouldn't be called if the subsystem is in use, and the use of
4887
	 * try_module_get() in rebind_subsystems() should ensure that it
B
Ben Blum 已提交
4888 4889
	 * doesn't start being used while we're killing it off.
	 */
4890
	BUG_ON(ss->root != &cgroup_dummy_root);
B
Ben Blum 已提交
4891 4892

	mutex_lock(&cgroup_mutex);
4893

4894
	offline_css(ss, cgroup_dummy_top);
4895

T
Tejun Heo 已提交
4896
	if (ss->use_id)
4897 4898
		idr_destroy(&ss->idr);

B
Ben Blum 已提交
4899
	/* deassign the subsys_id */
4900
	cgroup_subsys[ss->subsys_id] = NULL;
B
Ben Blum 已提交
4901

4902
	/* remove subsystem from the dummy root's list of subsystems */
4903
	list_del_init(&ss->sibling);
B
Ben Blum 已提交
4904 4905

	/*
4906 4907 4908
	 * disentangle the css from all css_sets attached to the dummy
	 * top. as in loading, we need to pay our respects to the hashtable
	 * gods.
B
Ben Blum 已提交
4909 4910
	 */
	write_lock(&css_set_lock);
4911
	list_for_each_entry(link, &cgroup_dummy_top->cset_links, cset_link) {
4912
		struct css_set *cset = link->cset;
4913
		unsigned long key;
B
Ben Blum 已提交
4914

4915 4916 4917 4918
		hash_del(&cset->hlist);
		cset->subsys[ss->subsys_id] = NULL;
		key = css_set_hash(cset->subsys);
		hash_add(css_set_table, &cset->hlist, key);
B
Ben Blum 已提交
4919 4920 4921 4922
	}
	write_unlock(&css_set_lock);

	/*
4923 4924 4925 4926
	 * remove subsystem's css from the cgroup_dummy_top and free it -
	 * need to free before marking as null because ss->css_free needs
	 * the cgrp->subsys pointer to find their state. note that this
	 * also takes care of freeing the css_id.
B
Ben Blum 已提交
4927
	 */
4928
	ss->css_free(cgroup_dummy_top->subsys[ss->subsys_id]);
4929
	cgroup_dummy_top->subsys[ss->subsys_id] = NULL;
B
Ben Blum 已提交
4930 4931 4932 4933 4934

	mutex_unlock(&cgroup_mutex);
}
EXPORT_SYMBOL_GPL(cgroup_unload_subsys);

4935
/**
L
Li Zefan 已提交
4936 4937 4938 4939
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
4940 4941 4942
 */
int __init cgroup_init_early(void)
{
4943
	struct cgroup_subsys *ss;
4944
	int i;
4945

4946
	atomic_set(&init_css_set.refcount, 1);
4947
	INIT_LIST_HEAD(&init_css_set.cgrp_links);
4948
	INIT_LIST_HEAD(&init_css_set.tasks);
4949
	INIT_HLIST_NODE(&init_css_set.hlist);
4950
	css_set_count = 1;
4951 4952
	init_cgroup_root(&cgroup_dummy_root);
	cgroup_root_count = 1;
4953
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
4954

4955
	init_cgrp_cset_link.cset = &init_css_set;
4956 4957
	init_cgrp_cset_link.cgrp = cgroup_dummy_top;
	list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links);
4958
	list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links);
4959

4960 4961
	/* at bootup time, we don't worry about modular subsystems */
	for_each_builtin_subsys(ss, i) {
4962 4963
		BUG_ON(!ss->name);
		BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
4964 4965
		BUG_ON(!ss->css_alloc);
		BUG_ON(!ss->css_free);
4966
		if (ss->subsys_id != i) {
D
Diego Calleja 已提交
4967
			printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978
			       ss->name, ss->subsys_id);
			BUG();
		}

		if (ss->early_init)
			cgroup_init_subsys(ss);
	}
	return 0;
}

/**
L
Li Zefan 已提交
4979 4980 4981 4982
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
4983 4984 4985
 */
int __init cgroup_init(void)
{
4986
	struct cgroup_subsys *ss;
4987
	unsigned long key;
4988
	int i, err;
4989 4990 4991 4992

	err = bdi_init(&cgroup_backing_dev_info);
	if (err)
		return err;
4993

4994
	for_each_builtin_subsys(ss, i) {
4995 4996
		if (!ss->early_init)
			cgroup_init_subsys(ss);
K
KAMEZAWA Hiroyuki 已提交
4997
		if (ss->use_id)
4998
			cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
4999 5000
	}

5001
	/* allocate id for the dummy hierarchy */
T
Tejun Heo 已提交
5002 5003 5004
	mutex_lock(&cgroup_mutex);
	mutex_lock(&cgroup_root_mutex);

5005 5006 5007 5008
	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

5009
	BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1));
5010

5011 5012 5013 5014
	err = idr_alloc(&cgroup_dummy_root.cgroup_idr, cgroup_dummy_top,
			0, 1, GFP_KERNEL);
	BUG_ON(err < 0);

T
Tejun Heo 已提交
5015 5016 5017
	mutex_unlock(&cgroup_root_mutex);
	mutex_unlock(&cgroup_mutex);

5018 5019 5020 5021 5022 5023
	cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
	if (!cgroup_kobj) {
		err = -ENOMEM;
		goto out;
	}

5024
	err = register_filesystem(&cgroup_fs_type);
5025 5026
	if (err < 0) {
		kobject_put(cgroup_kobj);
5027
		goto out;
5028
	}
5029

L
Li Zefan 已提交
5030
	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
5031

5032
out:
5033 5034 5035
	if (err)
		bdi_destroy(&cgroup_backing_dev_info);

5036 5037
	return err;
}
5038

5039 5040 5041 5042 5043 5044
/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 *  - No need to task_lock(tsk) on this tsk->cgroup reference, as it
 *    doesn't really matter if tsk->cgroup changes after we read it,
5045
 *    and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
5046 5047 5048 5049 5050 5051
 *    anyway.  No need to check that tsk->cgroup != NULL, thanks to
 *    the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
 *    cgroup to top_cgroup.
 */

/* TODO: Use a proper seq_file iterator */
5052
int proc_cgroup_show(struct seq_file *m, void *v)
5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074
{
	struct pid *pid;
	struct task_struct *tsk;
	char *buf;
	int retval;
	struct cgroupfs_root *root;

	retval = -ENOMEM;
	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
	if (!buf)
		goto out;

	retval = -ESRCH;
	pid = m->private;
	tsk = get_pid_task(pid, PIDTYPE_PID);
	if (!tsk)
		goto out_free;

	retval = 0;

	mutex_lock(&cgroup_mutex);

5075
	for_each_active_root(root) {
5076
		struct cgroup_subsys *ss;
5077
		struct cgroup *cgrp;
5078 5079
		int count = 0;

5080
		seq_printf(m, "%d:", root->hierarchy_id);
5081
		for_each_root_subsys(root, ss)
5082
			seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
5083 5084 5085
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
5086
		seq_putc(m, ':');
5087
		cgrp = task_cgroup_from_root(tsk, root);
5088
		retval = cgroup_path(cgrp, buf, PAGE_SIZE);
5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106
		if (retval < 0)
			goto out_unlock;
		seq_puts(m, buf);
		seq_putc(m, '\n');
	}

out_unlock:
	mutex_unlock(&cgroup_mutex);
	put_task_struct(tsk);
out_free:
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
5107
	struct cgroup_subsys *ss;
5108 5109
	int i;

5110
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
B
Ben Blum 已提交
5111 5112 5113 5114 5115
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
5116
	mutex_lock(&cgroup_mutex);
5117 5118

	for_each_subsys(ss, i)
5119 5120
		seq_printf(m, "%s\t%d\t%d\t%d\n",
			   ss->name, ss->root->hierarchy_id,
5121
			   ss->root->number_of_cgroups, !ss->disabled);
5122

5123 5124 5125 5126 5127 5128
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
A
Al Viro 已提交
5129
	return single_open(file, proc_cgroupstats_show, NULL);
5130 5131
}

5132
static const struct file_operations proc_cgroupstats_operations = {
5133 5134 5135 5136 5137 5138
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

5139 5140
/**
 * cgroup_fork - attach newly forked task to its parents cgroup.
L
Li Zefan 已提交
5141
 * @child: pointer to task_struct of forking parent process.
5142 5143 5144 5145 5146
 *
 * Description: A task inherits its parent's cgroup at fork().
 *
 * A pointer to the shared css_set was automatically copied in
 * fork.c by dup_task_struct().  However, we ignore that copy, since
5147 5148 5149 5150
 * it was not made under the protection of RCU or cgroup_mutex, so
 * might no longer be a valid cgroup pointer.  cgroup_attach_task() might
 * have already changed current->cgroups, allowing the previously
 * referenced cgroup group to be removed and freed.
5151 5152 5153 5154 5155 5156
 *
 * At the point that cgroup_fork() is called, 'current' is the parent
 * task, and the passed argument 'child' points to the child task.
 */
void cgroup_fork(struct task_struct *child)
{
5157
	task_lock(current);
5158
	get_css_set(task_css_set(current));
5159
	child->cgroups = current->cgroups;
5160
	task_unlock(current);
5161
	INIT_LIST_HEAD(&child->cg_list);
5162 5163
}

5164
/**
L
Li Zefan 已提交
5165 5166 5167
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
5168 5169 5170
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
5171
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5172
 * list.
L
Li Zefan 已提交
5173
 */
5174 5175
void cgroup_post_fork(struct task_struct *child)
{
5176
	struct cgroup_subsys *ss;
5177 5178
	int i;

5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189
	/*
	 * use_task_css_set_links is set to 1 before we walk the tasklist
	 * under the tasklist_lock and we read it here after we added the child
	 * to the tasklist under the tasklist_lock as well. If the child wasn't
	 * yet in the tasklist when we walked through it from
	 * cgroup_enable_task_cg_lists(), then use_task_css_set_links value
	 * should be visible now due to the paired locking and barriers implied
	 * by LOCK/UNLOCK: it is written before the tasklist_lock unlock
	 * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock
	 * lock on fork.
	 */
5190 5191
	if (use_task_css_set_links) {
		write_lock(&css_set_lock);
5192 5193
		task_lock(child);
		if (list_empty(&child->cg_list))
5194
			list_add(&child->cg_list, &task_css_set(child)->tasks);
5195
		task_unlock(child);
5196 5197
		write_unlock(&css_set_lock);
	}
5198 5199 5200 5201 5202 5203 5204

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
	if (need_forkexit_callback) {
5205 5206 5207 5208 5209 5210 5211 5212
		/*
		 * fork/exit callbacks are supported only for builtin
		 * subsystems, and the builtin section of the subsys
		 * array is immutable, so we don't need to lock the
		 * subsys array here. On the other hand, modular section
		 * of the array can be freed at module unload, so we
		 * can't touch that.
		 */
5213
		for_each_builtin_subsys(ss, i)
5214 5215 5216
			if (ss->fork)
				ss->fork(child);
	}
5217
}
5218

5219 5220 5221
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
L
Li Zefan 已提交
5222
 * @run_callback: run exit callbacks?
5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
 * the_top_cgroup_hack:
 *
 *    Set the exiting tasks cgroup to the root cgroup (top_cgroup).
 *
 *    We call cgroup_exit() while the task is still competent to
 *    handle notify_on_release(), then leave the task attached to the
 *    root cgroup in each hierarchy for the remainder of its exit.
 *
 *    To do this properly, we would increment the reference count on
 *    top_cgroup, and near the very end of the kernel/exit.c do_exit()
 *    code we would add a second cgroup function call, to drop that
 *    reference.  This would just create an unnecessary hot spot on
 *    the top_cgroup reference count, to no avail.
 *
 *    Normally, holding a reference to a cgroup without bumping its
 *    count is unsafe.   The cgroup could go away, or someone could
 *    attach us to a different cgroup, decrementing the count on
 *    the first cgroup that we never incremented.  But in this case,
 *    top_cgroup isn't going away, and either task has PF_EXITING set,
5251 5252
 *    which wards off any cgroup_attach_task() attempts, or task is a failed
 *    fork, never visible to cgroup_attach_task.
5253 5254 5255
 */
void cgroup_exit(struct task_struct *tsk, int run_callbacks)
{
5256
	struct cgroup_subsys *ss;
5257
	struct css_set *cset;
5258
	int i;
5259 5260 5261 5262 5263 5264 5265 5266 5267

	/*
	 * Unlink from the css_set task list if necessary.
	 * Optimistically check cg_list before taking
	 * css_set_lock
	 */
	if (!list_empty(&tsk->cg_list)) {
		write_lock(&css_set_lock);
		if (!list_empty(&tsk->cg_list))
5268
			list_del_init(&tsk->cg_list);
5269 5270 5271
		write_unlock(&css_set_lock);
	}

5272 5273
	/* Reassign the task to the init_css_set. */
	task_lock(tsk);
5274 5275
	cset = task_css_set(tsk);
	RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
5276 5277

	if (run_callbacks && need_forkexit_callback) {
5278 5279 5280 5281
		/*
		 * fork/exit callbacks are supported only for builtin
		 * subsystems, see cgroup_post_fork() for details.
		 */
5282
		for_each_builtin_subsys(ss, i) {
5283
			if (ss->exit) {
5284 5285
				struct cgroup_subsys_state *old_css = cset->subsys[i];
				struct cgroup_subsys_state *css = task_css(tsk, i);
5286

5287
				ss->exit(css, old_css, tsk);
5288 5289 5290
			}
		}
	}
5291
	task_unlock(tsk);
5292

5293
	put_css_set_taskexit(cset);
5294
}
5295

5296
static void check_for_release(struct cgroup *cgrp)
5297
{
5298
	if (cgroup_is_releasable(cgrp) &&
T
Tejun Heo 已提交
5299
	    list_empty(&cgrp->cset_links) && list_empty(&cgrp->children)) {
5300 5301
		/*
		 * Control Group is currently removeable. If it's not
5302
		 * already queued for a userspace notification, queue
5303 5304
		 * it now
		 */
5305
		int need_schedule_work = 0;
5306

5307
		raw_spin_lock(&release_list_lock);
5308
		if (!cgroup_is_dead(cgrp) &&
5309 5310
		    list_empty(&cgrp->release_list)) {
			list_add(&cgrp->release_list, &release_list);
5311 5312
			need_schedule_work = 1;
		}
5313
		raw_spin_unlock(&release_list_lock);
5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345
		if (need_schedule_work)
			schedule_work(&release_agent_work);
	}
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
	BUG_ON(work != &release_agent_work);
	mutex_lock(&cgroup_mutex);
5346
	raw_spin_lock(&release_list_lock);
5347 5348 5349
	while (!list_empty(&release_list)) {
		char *argv[3], *envp[3];
		int i;
5350
		char *pathbuf = NULL, *agentbuf = NULL;
5351
		struct cgroup *cgrp = list_entry(release_list.next,
5352 5353
						    struct cgroup,
						    release_list);
5354
		list_del_init(&cgrp->release_list);
5355
		raw_spin_unlock(&release_list_lock);
5356
		pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
5357 5358 5359 5360 5361 5362 5363
		if (!pathbuf)
			goto continue_free;
		if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
			goto continue_free;
		agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
		if (!agentbuf)
			goto continue_free;
5364 5365

		i = 0;
5366 5367
		argv[i++] = agentbuf;
		argv[i++] = pathbuf;
5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381
		argv[i] = NULL;

		i = 0;
		/* minimal command environment */
		envp[i++] = "HOME=/";
		envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
		envp[i] = NULL;

		/* Drop the lock while we invoke the usermode helper,
		 * since the exec could involve hitting disk and hence
		 * be a slow process */
		mutex_unlock(&cgroup_mutex);
		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
		mutex_lock(&cgroup_mutex);
5382 5383 5384
 continue_free:
		kfree(pathbuf);
		kfree(agentbuf);
5385
		raw_spin_lock(&release_list_lock);
5386
	}
5387
	raw_spin_unlock(&release_list_lock);
5388 5389
	mutex_unlock(&cgroup_mutex);
}
5390 5391 5392

static int __init cgroup_disable(char *str)
{
5393
	struct cgroup_subsys *ss;
5394
	char *token;
5395
	int i;
5396 5397 5398 5399

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
5400

5401 5402 5403 5404 5405
		/*
		 * cgroup_disable, being at boot time, can't know about
		 * module subsystems, so we don't worry about them.
		 */
		for_each_builtin_subsys(ss, i) {
5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416
			if (!strcmp(token, ss->name)) {
				ss->disabled = 1;
				printk(KERN_INFO "Disabling %s control group"
					" subsystem\n", ss->name);
				break;
			}
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
K
KAMEZAWA Hiroyuki 已提交
5417 5418 5419 5420 5421

/*
 * Functons for CSS ID.
 */

5422
/* to get ID other than 0, this should be called when !cgroup_is_dead() */
K
KAMEZAWA Hiroyuki 已提交
5423 5424
unsigned short css_id(struct cgroup_subsys_state *css)
{
5425 5426 5427 5428 5429 5430 5431
	struct css_id *cssid;

	/*
	 * This css_id() can return correct value when somone has refcnt
	 * on this or this is under rcu_read_lock(). Once css->id is allocated,
	 * it's unchanged until freed.
	 */
5432
	cssid = rcu_dereference_raw(css->id);
K
KAMEZAWA Hiroyuki 已提交
5433 5434 5435 5436 5437

	if (cssid)
		return cssid->id;
	return 0;
}
B
Ben Blum 已提交
5438
EXPORT_SYMBOL_GPL(css_id);
K
KAMEZAWA Hiroyuki 已提交
5439

5440 5441 5442 5443 5444 5445
/**
 *  css_is_ancestor - test "root" css is an ancestor of "child"
 * @child: the css to be tested.
 * @root: the css supporsed to be an ancestor of the child.
 *
 * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
5446
 * this function reads css->id, the caller must hold rcu_read_lock().
5447 5448 5449 5450 5451 5452
 * But, considering usual usage, the csses should be valid objects after test.
 * Assuming that the caller will do some action to the child if this returns
 * returns true, the caller must take "child";s reference count.
 * If "child" is valid object and this returns true, "root" is valid, too.
 */

K
KAMEZAWA Hiroyuki 已提交
5453
bool css_is_ancestor(struct cgroup_subsys_state *child,
5454
		    const struct cgroup_subsys_state *root)
K
KAMEZAWA Hiroyuki 已提交
5455
{
5456 5457
	struct css_id *child_id;
	struct css_id *root_id;
K
KAMEZAWA Hiroyuki 已提交
5458

5459
	child_id  = rcu_dereference(child->id);
5460 5461
	if (!child_id)
		return false;
5462
	root_id = rcu_dereference(root->id);
5463 5464 5465 5466 5467 5468 5469
	if (!root_id)
		return false;
	if (child_id->depth < root_id->depth)
		return false;
	if (child_id->stack[root_id->depth] != root_id->id)
		return false;
	return true;
K
KAMEZAWA Hiroyuki 已提交
5470 5471 5472 5473
}

void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
{
5474 5475
	struct css_id *id = rcu_dereference_protected(css->id, true);

K
KAMEZAWA Hiroyuki 已提交
5476 5477 5478 5479 5480 5481 5482 5483
	/* When this is called before css_id initialization, id can be NULL */
	if (!id)
		return;

	BUG_ON(!ss->use_id);

	rcu_assign_pointer(id->css, NULL);
	rcu_assign_pointer(css->id, NULL);
5484
	spin_lock(&ss->id_lock);
K
KAMEZAWA Hiroyuki 已提交
5485
	idr_remove(&ss->idr, id->id);
5486
	spin_unlock(&ss->id_lock);
5487
	kfree_rcu(id, rcu_head);
K
KAMEZAWA Hiroyuki 已提交
5488
}
B
Ben Blum 已提交
5489
EXPORT_SYMBOL_GPL(free_css_id);
K
KAMEZAWA Hiroyuki 已提交
5490 5491 5492 5493 5494 5495 5496 5497 5498

/*
 * This is called by init or create(). Then, calls to this function are
 * always serialized (By cgroup_mutex() at create()).
 */

static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
{
	struct css_id *newid;
T
Tejun Heo 已提交
5499
	int ret, size;
K
KAMEZAWA Hiroyuki 已提交
5500 5501 5502 5503 5504 5505 5506

	BUG_ON(!ss->use_id);

	size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
	newid = kzalloc(size, GFP_KERNEL);
	if (!newid)
		return ERR_PTR(-ENOMEM);
T
Tejun Heo 已提交
5507 5508

	idr_preload(GFP_KERNEL);
5509
	spin_lock(&ss->id_lock);
K
KAMEZAWA Hiroyuki 已提交
5510
	/* Don't use 0. allocates an ID of 1-65535 */
T
Tejun Heo 已提交
5511
	ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
5512
	spin_unlock(&ss->id_lock);
T
Tejun Heo 已提交
5513
	idr_preload_end();
K
KAMEZAWA Hiroyuki 已提交
5514 5515

	/* Returns error when there are no free spaces for new ID.*/
T
Tejun Heo 已提交
5516
	if (ret < 0)
K
KAMEZAWA Hiroyuki 已提交
5517 5518
		goto err_out;

T
Tejun Heo 已提交
5519
	newid->id = ret;
K
KAMEZAWA Hiroyuki 已提交
5520 5521 5522 5523
	newid->depth = depth;
	return newid;
err_out:
	kfree(newid);
T
Tejun Heo 已提交
5524
	return ERR_PTR(ret);
K
KAMEZAWA Hiroyuki 已提交
5525 5526 5527

}

5528 5529
static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
					    struct cgroup_subsys_state *rootcss)
K
KAMEZAWA Hiroyuki 已提交
5530 5531 5532
{
	struct css_id *newid;

5533
	spin_lock_init(&ss->id_lock);
K
KAMEZAWA Hiroyuki 已提交
5534 5535 5536 5537 5538 5539 5540
	idr_init(&ss->idr);

	newid = get_new_cssid(ss, 0);
	if (IS_ERR(newid))
		return PTR_ERR(newid);

	newid->stack[0] = newid->id;
5541 5542
	RCU_INIT_POINTER(newid->css, rootcss);
	RCU_INIT_POINTER(rootcss->id, newid);
K
KAMEZAWA Hiroyuki 已提交
5543 5544 5545 5546 5547 5548 5549 5550
	return 0;
}

static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
			struct cgroup *child)
{
	int subsys_id, i, depth = 0;
	struct cgroup_subsys_state *parent_css, *child_css;
5551
	struct css_id *child_id, *parent_id;
K
KAMEZAWA Hiroyuki 已提交
5552 5553 5554 5555

	subsys_id = ss->subsys_id;
	parent_css = parent->subsys[subsys_id];
	child_css = child->subsys[subsys_id];
5556
	parent_id = rcu_dereference_protected(parent_css->id, true);
5557
	depth = parent_id->depth + 1;
K
KAMEZAWA Hiroyuki 已提交
5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594

	child_id = get_new_cssid(ss, depth);
	if (IS_ERR(child_id))
		return PTR_ERR(child_id);

	for (i = 0; i < depth; i++)
		child_id->stack[i] = parent_id->stack[i];
	child_id->stack[depth] = child_id->id;
	/*
	 * child_id->css pointer will be set after this cgroup is available
	 * see cgroup_populate_dir()
	 */
	rcu_assign_pointer(child_css->id, child_id);

	return 0;
}

/**
 * css_lookup - lookup css by id
 * @ss: cgroup subsys to be looked into.
 * @id: the id
 *
 * Returns pointer to cgroup_subsys_state if there is valid one with id.
 * NULL if not. Should be called under rcu_read_lock()
 */
struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
{
	struct css_id *cssid = NULL;

	BUG_ON(!ss->use_id);
	cssid = idr_find(&ss->idr, id);

	if (unlikely(!cssid))
		return NULL;

	return rcu_dereference(cssid->css);
}
B
Ben Blum 已提交
5595
EXPORT_SYMBOL_GPL(css_lookup);
K
KAMEZAWA Hiroyuki 已提交
5596

S
Stephane Eranian 已提交
5597 5598 5599 5600 5601 5602 5603 5604 5605
/*
 * get corresponding css from file open on cgroupfs directory
 */
struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
{
	struct cgroup *cgrp;
	struct inode *inode;
	struct cgroup_subsys_state *css;

A
Al Viro 已提交
5606
	inode = file_inode(f);
S
Stephane Eranian 已提交
5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619
	/* check in cgroup filesystem dir */
	if (inode->i_op != &cgroup_dir_inode_operations)
		return ERR_PTR(-EBADF);

	if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
		return ERR_PTR(-EINVAL);

	/* get cgroup */
	cgrp = __d_cgrp(f->f_dentry);
	css = cgrp->subsys[id];
	return css ? css : ERR_PTR(-ENOENT);
}

5620
#ifdef CONFIG_CGROUP_DEBUG
5621 5622
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
5623 5624 5625 5626 5627 5628 5629 5630 5631
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

5632
static void debug_css_free(struct cgroup_subsys_state *css)
5633
{
5634
	kfree(css);
5635 5636
}

5637 5638
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5639
{
5640
	return cgroup_task_count(css->cgroup);
5641 5642
}

5643 5644
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
5645 5646 5647 5648
{
	return (u64)(unsigned long)current->cgroups;
}

5649
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
L
Li Zefan 已提交
5650
					 struct cftype *cft)
5651 5652 5653 5654
{
	u64 count;

	rcu_read_lock();
5655
	count = atomic_read(&task_css_set(current)->refcount);
5656 5657 5658 5659
	rcu_read_unlock();
	return count;
}

5660
static int current_css_set_cg_links_read(struct cgroup_subsys_state *css,
5661 5662 5663
					 struct cftype *cft,
					 struct seq_file *seq)
{
5664
	struct cgrp_cset_link *link;
5665
	struct css_set *cset;
5666 5667 5668

	read_lock(&css_set_lock);
	rcu_read_lock();
5669
	cset = rcu_dereference(current->cgroups);
5670
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5671 5672 5673 5674 5675 5676 5677
		struct cgroup *c = link->cgrp;
		const char *name;

		if (c->dentry)
			name = c->dentry->d_name.name;
		else
			name = "?";
5678 5679
		seq_printf(seq, "Root %d group %s\n",
			   c->root->hierarchy_id, name);
5680 5681 5682 5683 5684 5685 5686
	}
	rcu_read_unlock();
	read_unlock(&css_set_lock);
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
5687 5688
static int cgroup_css_links_read(struct cgroup_subsys_state *css,
				 struct cftype *cft, struct seq_file *seq)
5689
{
5690
	struct cgrp_cset_link *link;
5691 5692

	read_lock(&css_set_lock);
5693
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5694
		struct css_set *cset = link->cset;
5695 5696
		struct task_struct *task;
		int count = 0;
5697 5698
		seq_printf(seq, "css_set %p\n", cset);
		list_for_each_entry(task, &cset->tasks, cg_list) {
5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711
			if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
				seq_puts(seq, "  ...\n");
				break;
			} else {
				seq_printf(seq, "  task %d\n",
					   task_pid_vnr(task));
			}
		}
	}
	read_unlock(&css_set_lock);
	return 0;
}

5712
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
5713
{
5714
	return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

5733 5734 5735 5736 5737 5738 5739 5740 5741 5742
	{
		.name = "current_css_set_cg_links",
		.read_seq_string = current_css_set_cg_links_read,
	},

	{
		.name = "cgroup_css_links",
		.read_seq_string = cgroup_css_links_read,
	},

5743 5744 5745 5746 5747
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

5748 5749
	{ }	/* terminate */
};
5750 5751 5752

struct cgroup_subsys debug_subsys = {
	.name = "debug",
5753 5754
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,
5755
	.subsys_id = debug_subsys_id,
5756
	.base_cftypes = debug_files,
5757 5758
};
#endif /* CONFIG_CGROUP_DEBUG */